I want to get reproducible results for a CNN. I use Keras and Google Colab with GPU.
In addition to recommendations to insert certain code snippets, which should allow a reproducibility, I also added seeds to the layers.
###### This is the first code snipped to run #####
!pip install -U -q PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
# Authenticate and create the PyDrive client.
# This only needs to be done once per notebook.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
###### This is the second code snipped to run #####
from __future__ import print_function
import numpy as np
import tensorflow as tf
print(tf.test.gpu_device_name())
import random as rn
import os
os.environ['PYTHONASHSEED'] = '0'
np.random.seed(1)
rn.seed(1)
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
###### This is the third code snipped to run #####
from keras import backend as K
tf.set_random_seed(1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
###### This is the fourth code snipped to run #####
def model_cnn():
model = Sequential()
model.add(Conv2D(32, kernel_size=(3,3), kernel_initializer=initializers.glorot_uniform(seed=1), input_shape=(28,28,1)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(32, kernel_size=(3,3), kernel_initializer=initializers.glorot_uniform(seed=2)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25, seed=1))
model.add(Flatten())
model.add(Dense(512, kernel_initializer=initializers.glorot_uniform(seed=2)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.5, seed=1))
model.add(Dense(10, kernel_initializer=initializers.glorot_uniform(seed=2)))
model.add(Activation('softmax'))
model.compile(loss="categorical_crossentropy", optimizer=Adam(lr=0.001), metrics=['accuracy'])
return model
def split_data(X,y):
X_train_val, X_val, y_train_val, y_val = train_test_split(X, y, random_state=42, test_size=1/5, stratify=y)
return(X_train_val, X_val, y_train_val, y_val)
def train_model_with_EarlyStopping(model, X, y):
# make train and validation data
X_tr, X_val, y_tr, y_val = split_data(X,y)
es = EarlyStopping(monitor='val_loss', patience=20, mode='min', restore_best_weights=True)
history = model.fit(X_tr, y_tr,
batch_size=64,
epochs=200,
verbose=1,
validation_data=(X_val,y_val),
callbacks=[es])
return history
###### This is the fifth code snipped to run #####
train_model_with_EarlyStopping(model_cnn(), X, y)
Always I run the above code I get different results. Does the reason lies in the code, or it is simply not possible to obtain reproducible results in Google Colab with GPU support?
The complete code (there are unneccessary parts in the code, such as libraries which are not used):
!pip install -U -q PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
from __future__ import print_function # NEU
import numpy as np
import tensorflow as tf
import random as rn
import os
os.environ['PYTHONASHSEED'] = '0'
np.random.seed(1)
rn.seed(1)
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
from keras import backend as K
tf.set_random_seed(1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
import os
local_root_path = os.path.expanduser("~/data/data")
print(local_root_path)
try:
os.makedirs(local_root_path, exist_ok=True)
except: pass
def ListFolder(google_drive_id, destination):
file_list = drive.ListFile({'q': "'%s' in parents and trashed=false" % google_drive_id}).GetList()
counter = 0
for f in file_list:
# If it is a directory then, create the dicrectory and upload the file inside it
if f['mimeType']=='application/vnd.google-apps.folder':
folder_path = os.path.join(destination, f['title'])
os.makedirs(folder_path, exist_ok=True)
print('creating directory {}'.format(folder_path))
ListFolder(f['id'], folder_path)
else:
fname = os.path.join(destination, f['title'])
f_ = drive.CreateFile({'id': f['id']})
f_.GetContentFile(fname)
counter += 1
print('{} files were uploaded in {}'.format(counter, destination))
ListFolder("1DyM_D2ZJ5UHIXmXq4uHzKqXSkLTH-lSo", local_root_path)
import glob
import h5py
from time import time
from keras import initializers
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, model_from_json
from keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization, merge
from keras.layers import Convolution2D, MaxPooling2D, AveragePooling2D
from keras.optimizers import SGD, Adam, RMSprop, Adagrad, Adadelta, Adamax, Nadam
from keras.utils import np_utils
from keras.callbacks import LearningRateScheduler, ModelCheckpoint, TensorBoard, ReduceLROnPlateau
from keras.regularizers import l2
from keras.layers.advanced_activations import LeakyReLU, ELU
from keras import backend as K
import numpy as np
import pickle as pkl
from matplotlib import pyplot as plt
%matplotlib inline
import gzip
import numpy as np
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, Conv2D, Flatten
from keras.datasets import fashion_mnist
from numpy import mean, std
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold, StratifiedKFold
from keras.datasets import fashion_mnist
from keras.utils import to_categorical
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten
from keras.optimizers import SGD, Adam
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import auc, average_precision_score, f1_score
import time
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from google.colab import files
from PIL import Image
def model_cnn():
model = Sequential()
model.add(Conv2D(32, kernel_size=(3,3), kernel_initializer=initializers.glorot_uniform(seed=1), input_shape=(28,28,1)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(32, kernel_size=(3,3), kernel_initializer=initializers.glorot_uniform(seed=2)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25, seed=1))
model.add(Flatten())
model.add(Dense(512, kernel_initializer=initializers.glorot_uniform(seed=2)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.5, seed=1))
model.add(Dense(10, kernel_initializer=initializers.glorot_uniform(seed=2)))
model.add(Activation('softmax'))
model.compile(loss="categorical_crossentropy", optimizer=Adam(lr=0.001), metrics=['accuracy'])
return model
def train_model_with_EarlyStopping(model, X, y):
X_tr, X_val, y_tr, y_val = split_train_val_data(X,y)
es = EarlyStopping(monitor='val_loss', patience=20, mode='min', restore_best_weights=True)
history = model.fit(X_tr, y_tr,
batch_size=64,
epochs=200,
verbose=1,
validation_data=(X_val,y_val),
callbacks=[es])
evaluate_model(model, history, X_tr, y_tr)
return history
```