I only have 141 pictures, 71 of each class (medical images) and I want to classify them. I know this is very little data so I want to use augmentation.
My problem is I can't get pass the 0.5 accuracy even on the training data when using augmentation!
When I train only on the 141 images I can get to 80%, so it must mean that I'm using augmentation wrong?
Would love if anyone here can understand what am I doing wrong:
my model:
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import optimizers
K.clear_session()
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(256,256,1)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(2, activation='softmax'))
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
#normalize
meanImg = np.mean(X , axis = 0)
stdImg = np.std(X , axis = 0)
X_norm = (X - meanImg) / (stdImg + 0.0001)
# we will split again without normalizing, the DataGenerator will normalize
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_norm, y,test_size=0.2)
train_datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
zoom_range = 0.2,
height_shift_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(featurewise_center=True,
featurewise_std_normalization = True)
train_datagen.fit(X_train)
test_datagen.fit(X_test)
train_generator = train_datagen.flow(X_train,y_train,batch_size = 16 , save_to_dir='train',save_prefix='aug')
validation_generator = test_datagen.flow(X_test,y_test,batch_size =16 , save_to_dir='test' , save_prefix = 'aug')
This gives bad results:
batch_size = 16
model.fit_generator(
train_generator,
steps_per_epoch=2000// batch_size,
epochs=10,
validation_data=validation_generator,
validation_steps=400 // batch_size)
model.save_weights('first_try.h5') # always save your weights after training or during training
This gives good results:
history = model.fit(X_train, y_train, batch_size=16,
epochs=20, verbose=1, validation_split=0.2)