CNN Image Classification with Overfitting Reduction
CNN Image Classification with Overfitting Reduction
import os
import tensorflow as tf
from tensorflow import keras
from keras import Sequential
from keras.layers import Dense,Conv2D, MaxPooling2D, Flatten, BatchNormalization,Dropout
import os
import tensorflow as tf
from tensorflow import keras
#generators
train_ds = keras.utils.image_dataset_from_directory(
directory=os.path.join(dataset_path, 'train'),
labels='inferred',
label_mode='int',
batch_size=32,
image_size=(256, 256)
)
validation_ds = keras.utils.image_dataset_from_directory(
directory=os.path.join(dataset_path, 'test'),
labels='inferred',
label_mode='int',
batch_size=32,
image_size=(256, 256)
)
# Normalize
def process(image,label):
image= tf.cast(image/255, tf.float32)
return image,label
train_ds = train_ds.map(process)
validation_ds=validation_ds.map(process)
model.add(Conv2D(64,kernel_size=(3,3),padding='valid',activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2),strides=2,padding='valid'))
model.add(Conv2D(128,kernel_size=(3,3),padding='valid',activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2),strides=2,padding='valid'))
model.add(Flatten())
model.add(Dense(128,activation='relu'))
model.add(Dense(64,activation='relu'))
model.add(Dense(1,activation='sigmoid'))
model.summary()
Model: "sequential_5"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃ Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩
│ conv2d_15 (Conv2D) │ (None, 254, 254, 32) │ 896 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ max_pooling2d_15 (MaxPooling2D) │ (None, 127, 127, 32) │ 0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ conv2d_16 (Conv2D) │ (None, 125, 125, 64) │ 18,496 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ max_pooling2d_16 (MaxPooling2D) │ (None, 62, 62, 64) │ 0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ conv2d_17 (Conv2D) │ (None, 60, 60, 128) │ 73,856 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ max_pooling2d_17 (MaxPooling2D) │ (None, 30, 30, 128) │ 0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ flatten_5 (Flatten) │ (None, 115200) │ 0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dense_15 (Dense) │ (None, 128) │ 14,745,728 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dense_16 (Dense) │ (None, 64) │ 8,256 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dense_17 (Dense) │ (None, 1) │ 65 │
└──────────────────────────────────────┴─────────────────────────────┴─────────────────┘
Total params: 14,847,297 (56.64 MB)
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
history=model.fit(train_ds,epochs=10, validation_data=validation_ds)
Epoch 1/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 46s 68ms/step - accuracy: 0.5960 - loss: 0.6952 - val_accuracy: 0.7538 - val_loss: 0.5068
Epoch 2/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 84s 73ms/step - accuracy: 0.7541 - loss: 0.5022 - val_accuracy: 0.7840 - val_loss: 0.4614
Epoch 3/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 41s 66ms/step - accuracy: 0.8239 - loss: 0.3918 - val_accuracy: 0.8086 - val_loss: 0.4682
Epoch 4/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 46s 74ms/step - accuracy: 0.8897 - loss: 0.2656 - val_accuracy: 0.8070 - val_loss: 0.6143
Epoch 5/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 46s 73ms/step - accuracy: 0.9443 - loss: 0.1415 - val_accuracy: 0.7946 - val_loss: 0.7792
Epoch 6/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 78s 66ms/step - accuracy: 0.9669 - loss: 0.0876 - val_accuracy: 0.7874 - val_loss: 0.8839
Epoch 7/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 42s 67ms/step - accuracy: 0.9804 - loss: 0.0524 - val_accuracy: 0.7804 - val_loss: 0.9358
Epoch 8/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 41s 66ms/step - accuracy: 0.9837 - loss: 0.0521 - val_accuracy: 0.7916 - val_loss: 1.1160
Epoch 9/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 41s 66ms/step - accuracy: 0.9862 - loss: 0.0418 - val_accuracy: 0.7988 - val_loss: 1.1355
Epoch 10/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 82s 67ms/step - accuracy: 0.9895 - loss: 0.0285 - val_accuracy: 0.7774 - val_loss: 1.2932
plt.plot(history.history['loss'],color='red',label='train')
plt.plot(history.history['val_loss'],color='blue',label='validation')
plt.legend()
plt.show()
model.add(Conv2D(64,kernel_size=(3,3),padding='valid',activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2),strides=2,padding='valid'))
model.add(Conv2D(128,kernel_size=(3,3),padding='valid',activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2),strides=2,padding='valid'))
model.add(Flatten())
model.add(Dense(128,activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(64,activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(1,activation='sigmoid'))
model.summary()
Model: "sequential_4"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃ Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩
│ conv2d_12 (Conv2D) │ (None, 254, 254, 32) │ 896 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ batch_normalization_6 │ (None, 254, 254, 32) │ 128 │
│ (BatchNormalization) │ │ │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ max_pooling2d_12 (MaxPooling2D) │ (None, 127, 127, 32) │ 0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ conv2d_13 (Conv2D) │ (None, 125, 125, 64) │ 18,496 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ batch_normalization_7 │ (None, 125, 125, 64) │ 256 │
│ (BatchNormalization) │ │ │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ max_pooling2d_13 (MaxPooling2D) │ (None, 62, 62, 64) │ 0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ conv2d_14 (Conv2D) │ (None, 60, 60, 128) │ 73,856 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ batch_normalization_8 │ (None, 60, 60, 128) │ 512 │
│ (BatchNormalization) │ │ │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ max_pooling2d_14 (MaxPooling2D) │ (None, 30, 30, 128) │ 0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ flatten_4 (Flatten) │ (None, 115200) │ 0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dense_12 (Dense) │ (None, 128) │ 14,745,728 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dropout_4 (Dropout) │ (None, 128) │ 0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dense_13 (Dense) │ (None, 64) │ 8,256 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dropout_5 (Dropout) │ (None, 64) │ 0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dense_14 (Dense) │ (None, 1) │ 65 │
└──────────────────────────────────────┴─────────────────────────────┴─────────────────┘
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
history=model.fit(train_ds,epochs=10, validation_data=validation_ds)
Epoch 1/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 58s 83ms/step - accuracy: 0.5695 - loss: 2.5598 - val_accuracy: 0.7146 - val_loss: 0.5701
Epoch 2/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 75s 76ms/step - accuracy: 0.7121 - loss: 0.5595 - val_accuracy: 0.6924 - val_loss: 0.6028
Epoch 3/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 84s 79ms/step - accuracy: 0.7722 - loss: 0.4792 - val_accuracy: 0.6680 - val_loss: 0.6597
Epoch 4/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 79s 75ms/step - accuracy: 0.7988 - loss: 0.4349 - val_accuracy: 0.7668 - val_loss: 0.5321
Epoch 5/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 51s 82ms/step - accuracy: 0.8392 - loss: 0.3564 - val_accuracy: 0.6392 - val_loss: 1.2680
Epoch 6/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 82s 82ms/step - accuracy: 0.8736 - loss: 0.2948 - val_accuracy: 0.8098 - val_loss: 0.5164
Epoch 7/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 47s 75ms/step - accuracy: 0.9166 - loss: 0.2084 - val_accuracy: 0.7810 - val_loss: 0.5446
Epoch 8/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 51s 81ms/step - accuracy: 0.9482 - loss: 0.1402 - val_accuracy: 0.7934 - val_loss: 0.7180
Epoch 9/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 47s 75ms/step - accuracy: 0.9619 - loss: 0.0995 - val_accuracy: 0.7454 - val_loss: 1.4395
Epoch 10/10
625/625 ━━━━━━━━━━━━━━━━━━━━ 47s 76ms/step - accuracy: 0.9726 - loss: 0.0781 - val_accuracy: 0.8042 - val_loss: 0.7737
import cv2
test_img1=cv2.imread('/content/cat.jpeg')
test_img2= cv2.imread('/content/dog.jpeg')
plt.imshow(test_img1)
<matplotlib.image.AxesImage at 0x7cbac6daa850>
plt.imshow(test_img2)
<matplotlib.image.AxesImage at 0x7cbac6c233d0>
test_img1.shape
(224, 225, 3)
test_img1=cv2.resize(test_img1,(256,256))
test_img2=cv2.resize(test_img2,(256,256))
test_input1= test_img1.reshape((1,256,256,3))
test_input2= test_img2.reshape((1,256,256,3))
model.predict(test_input1)
model.predict(test_input2)
Data Augmentaion
batch_size=16
train_datagen= ImageDataGenerator(rescale=1./255,
zoom_range=0.2,
horizontal_flip=True,
shear_range=0.2)
test_datagen= ImageDataGenerator(rescale=1./255)
train_generator= train_datagen.flow_from_directory(
directory=os.path.join(dataset_path, 'train'),
target_size=(256,256),
batch_size=batch_size,
class_mode='binary'
)
# this is for validation test data
validation_generator= test_datagen.flow_from_directory(
directory=os.path.join(dataset_path, 'test'),
target_size=(256,256),
batch_size=batch_size,
class_mode='binary'
)
model.add(Conv2D(64,kernel_size=(3,3),padding='valid',activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2),strides=2,padding='valid'))
model.add(Conv2D(128,kernel_size=(3,3),padding='valid',activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2),strides=2,padding='valid'))
model.add(Flatten())
model.add(Dense(128,activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(64,activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(1,activation='sigmoid'))
model.summary()
Model: "sequential_2"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃ Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩
│ conv2d_6 (Conv2D) │ (None, 254, 254, 32) │ 896 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
│ batch_normalization_3 │ (None, 254, 254, 32) │ 128 │
│ (BatchNormalization) │ │ │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
history=model.fit(train_generator,epochs=10,
│ max_pooling2d_6 (MaxPooling2D) │validation_data=validation_generator)
(None, 127, 127, 32) │ 0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ conv2d_7 (Conv2D) │ (None, 125, 125, 64) │ 18,496 │
/usr/local/lib/python3.11/dist-packages/keras/src/trainers/data_adapters/py_dataset_adapter.py:121: UserWarning: Your `PyDataset` cl
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
self._warn_if_super_not_called()
│ batch_normalization_4
Epoch 1/10 │ (None, 125, 125, 64) │ 256 │
│ (BatchNormalization)
1250/1250 │
━━━━━━━━━━━━━━━━━━━━ 329s 257ms/step │
- accuracy: 0.5677 - loss: │
2.3260 - val_accuracy: 0.6568 - val_loss: 0.6428
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
Epoch 2/10
│ max_pooling2d_7
1250/1250 (MaxPooling2D)
━━━━━━━━━━━━━━━━━━━━ │ (None,-62,
316s 252ms/step 62, 64)0.6637 - loss:
accuracy: │ 0 │
0.6173 - val_accuracy: 0.7246 - val_loss: 0.5433
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
Epoch 3/10
│ conv2d_8 (Conv2D) │ (None, 60, 60, 128) │ 73,856 │
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 308s 246ms/step - accuracy: 0.7212 - loss: 0.5459 - val_accuracy: 0.7492 - val_loss: 0.5112
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
Epoch 4/10
│ batch_normalization_5
1250/1250 │ (None,-60,
━━━━━━━━━━━━━━━━━━━━ 307s 245ms/step 60, 128)
accuracy: │
0.7715 - loss: 512 │
0.4871 - val_accuracy: 0.7184 - val_loss: 0.7620
│ (BatchNormalization)
Epoch 5/10 │ │ │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 303s 242ms/step - accuracy: 0.7902 - loss: 0.4547 - val_accuracy: 0.7306 - val_loss: 0.5855
│ max_pooling2d_8
Epoch 6/10 (MaxPooling2D) │ (None, 30, 30, 128) │ 0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 325s 245ms/step - accuracy: 0.8191 - loss: 0.4052 - val_accuracy: 0.8130 - val_loss: 0.4174
│ flatten_2
Epoch 7/10 (Flatten) │ (None, 115200) │ 0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 305s 244ms/step - accuracy: 0.8418 - loss: 0.3716 - val_accuracy: 0.7252 - val_loss: 0.6009
│ dense_6 (Dense) │ (None, 128) │ 14,745,728 │
Epoch 8/10
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 306s 245ms/step - accuracy: 0.8509 - loss: 0.3427 - val_accuracy: 0.8388 - val_loss: 0.3717
│ dropout_2 (Dropout) │ (None, 128) │ 0 │
Epoch 9/10
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 305s 244ms/step - accuracy: 0.8755 - loss: 0.3045 - val_accuracy: 0.8790 - val_loss: 0.2999
│ dense_7 (Dense) │ (None, 64) │ 8,256 │
Epoch 10/10
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
1250/1250 ━━━━━━━━━━━━━━━━━━━━ 305s 244ms/step - accuracy: 0.8824 - loss: 0.2822 - val_accuracy: 0.8236 - val_loss: 0.4515
│ dropout_3 (Dropout) │ (None, 64) │ 0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dense_8 (Dense) │ (None, 1) │ 65 │
└──────────────────────────────────────┴─────────────────────────────┴─────────────────┘
import matplotlib.pyplot as plt
plt.plot(history.history['accuracy'],color='red',label='train')
plt.plot(history.history['val_accuracy'],color='blue',label='validation')
plt.legend()
plt.show()
plt.plot(history.history['loss'],color='red',label='train')