I am getting the following error message when trying to run this AlexNET python code.
Traceback (most recent call last):
File "C:\Users\PycharmProjects\Local-Binary-Patterns\pyimagesearch\AlexCM.py", line 6, in <module>
from keras.layers.normalization import BatchNormalization
ImportError: cannot import name 'BatchNormalization' from 'keras.layers.normalization' (C:\Users\PycharmProjects\Local-Binary-Patterns\venv\lib\site-packages\keras\layers\normalization\__init__.py)
I then saw a post to change it to:
from tensorflow.keras.layers import BatchNormalization
but then get the following error message:
C:\Users\PycharmProjects\Local-Binary-Patterns\venv\Scripts\python.exe C:/Users//PycharmProjects/Local-Binary-Patterns/pyimagesearch/AlexCM.py
2022-04-15 15:57:39.219873: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'cudart64_110.dll'; dlerror: cudart64_110.dll not found
2022-04-15 15:57:39.220029: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.
Traceback (most recent call last):
File "C:\Users\PycharmProjects\Local-Binary-Patterns\pyimagesearch\AlexCM.py", line 11, in <module>
from image_dataset_loader import load
ModuleNotFoundError: No module named 'image_dataset_loader'
Process finished with exit code 1
Below is the python code for better reference to the error message that I am getting:
#Importing library
import os
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D
#from keras.layers.normalization import BatchNormalization
from tensorflow.keras.layers import BatchNormalization
import numpy as np
from keras.utils.np_utils import to_categorical
from PIL import Image
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
def fix_gpu():
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
fix_gpu()
np.random.seed(1000)
#Instantiation
AlexNet = Sequential()
#1st Convolutional Layer
AlexNet.add(Conv2D(filters=96, input_shape=(227,227,3), kernel_size=(11,11), strides=(4,4), padding='same'))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
AlexNet.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'))
#2nd Convolutional Layer
AlexNet.add(Conv2D(filters=256, kernel_size=(5, 5), strides=(1,1), padding='same'))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
AlexNet.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'))
#3rd Convolutional Layer
AlexNet.add(Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), padding='same'))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
#4th Convolutional Layer
AlexNet.add(Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), padding='same'))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
#5th Convolutional Layer
AlexNet.add(Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), padding='same'))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
AlexNet.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'))
#Passing it to a Fully Connected layer
AlexNet.add(Flatten())
# 1st Fully Connected Layer
AlexNet.add(Dense(4096, input_shape=(32,32,3,)))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
# Add Dropout to prevent overfitting
AlexNet.add(Dropout(0.4))
#2nd Fully Connected Layer
AlexNet.add(Dense(4096))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
#Add Dropout
AlexNet.add(Dropout(0.4))
#3rd Fully Connected Layer
AlexNet.add(Dense(1000))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('relu'))
#Add Dropout
AlexNet.add(Dropout(0.4))
#Output Layer
AlexNet.add(Dense(24))
AlexNet.add(BatchNormalization())
AlexNet.add(Activation('softmax'))
#Model Summary
AlexNet.summary()
# Compiling the model
AlexNet.compile(loss = keras.losses.categorical_crossentropy, optimizer= 'adam', metrics=['accuracy'])
from image_dataset_loader import load
#Keras library for CIFAR dataset
from keras.datasets import cifar10
(x_train, y_train),(x_test, y_test)=load(path, [imgPath, testPath])
#(x_train, y_train),(x_test, y_test)=cifar10.load_data()
temp = []
for label in y_train:
temp.append([label])
y_train = np.array(temp)
print('-------------------------4')
print(y_train)
temp = []
for label in y_test:
temp.append([label])
y_test = np.array(temp)
print('-------------------------5')
print(y_test)
# print('-----train images-----1')
# print(x_train)
# print('-----train labels-----2')
# print(y_train)
# print('-----test images-----3')
# print(x_test)
# print('-----test labels-----4')
# print(y_test)
#Train-validation-test split
from sklearn.model_selection import train_test_split
# SPLIT IS CRITICAL
# 22 IMAGES, 0.045; 11 IMAGES, 0.09; 8 IMAGES, 0.12
x_train,x_val,y_train,y_val=train_test_split(x_train,y_train,test_size=.12)
#Dimension of the CIFAR10 dataset
print((x_train.shape,y_train.shape))
print((x_val.shape,y_val.shape))
print((x_test.shape,y_test.shape))
#Onehot Encoding the labels.
from sklearn.utils.multiclass import unique_labels
#Since we have 10 classes we should expect the shape[1] of y_train,y_val and y_test to change from 1 to 10
y_train=to_categorical(y_train)
y_val=to_categorical(y_val)
y_test=to_categorical(y_test)
#Verifying the dimension after one hot encoding
print((x_train.shape,y_train.shape))
print((x_val.shape,y_val.shape))
print((x_test.shape,y_test.shape))
#Image Data Augmentation
from keras.preprocessing.image import ImageDataGenerator
train_generator = ImageDataGenerator(rotation_range=2, horizontal_flip=True,zoom_range=.1 )
val_generator = ImageDataGenerator(rotation_range=2, horizontal_flip=True,zoom_range=.1)
test_generator = ImageDataGenerator(rotation_range=2, horizontal_flip= True,zoom_range=.1)
#Fitting the augmentation defined above to the data
train_generator.fit(x_train)
val_generator.fit(x_val)
test_generator.fit(x_test)
#Learning Rate Annealer
from keras.callbacks import ReduceLROnPlateau
lrr= ReduceLROnPlateau(monitor='val_acc', factor=.01, patience=3, min_lr=1e-5)
#Defining the parameters
batch_size= 10
#CHANGE THE EPOCH NUMBERS
epochs=5
learn_rate=.001
#Training the model
AlexNet.fit(train_generator.flow(x_train, y_train, batch_size=batch_size),
epochs = epochs, steps_per_epoch = x_train.shape[0]//batch_size,
validation_data = val_generator.flow(x_val, y_val, batch_size=batch_size),
validation_steps = 2, callbacks = [lrr], verbose=1)
#After successful training, we will visualize its performance.
import matplotlib.pyplot as plt
#Plotting the training and validation loss
f,ax=plt.subplots(1,1) #Creates 2 subplots under 1 column
#Assigning the first subplot to graph training loss and validation loss
ax.plot(AlexNet.history.history['loss'],color='b',label='Training Loss')
ax.plot(AlexNet.history.history['val_loss'],color='r',label='Validation Loss')
plt.legend()
plt.show()
f,ax=plt.subplots(1,1) #Creates 2 subplots under 1 column
#Plotting the training accuracy and validation accuracy
ax.plot(AlexNet.history.history['accuracy'],color='b',label='Training Accuracy')
ax.plot(AlexNet.history.history['val_accuracy'],color='r',label='Validation Accuracy')
plt.legend()
plt.show()
#Defining function for confusion matrix plot
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
#Print Confusion matrix
fig, ax = plt.subplots(figsize=(4,4))
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", color="white"
if cm[i, j] > thresh else "black")
plt.tight_layout()
return ax
np.set_printoptions(precision=2)
#Making prediction
y_pred=(AlexNet.predict(x_test) > 0.5).astype("int32")
y_true=np.argmax(y_test,axis=1)
#Plotting the confusion matrix
from sklearn.metrics import confusion_matrix
confusion_mtx=confusion_matrix(y_true,y_pred)
#class_names=['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
CLASS_NAMES = [f.name for f in os.scandir(imgPath) if f.is_dir()]
class_names=CLASS_NAMES
print(class_names)
print("ypred\n", y_pred)
print("ytrue", y_true)
# Plotting non-normalized confusion matrix
plot_confusion_matrix(y_true, y_pred, classes = class_names,title = 'Confusion matrix, without normalization')
plt.show()
# Plotting normalized confusion matrix
# plot_confusion_matrix(y_true, y_pred, classes=class_names, normalize=True, title='Normalized confusion matrix')
# plt.show()
#Classification Metrics
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, RocCurveDisplay
acc_score = accuracy_score(y_true, y_pred)
print('\n\n\t\t Accuracy Score: ', str(round((100*acc_score), 2)), '%')
prec_score = precision_score(y_true, y_pred, average = 'macro')
print(' Precision Score Macro: ', str(round((100*prec_score), 2)), '%')
prec_score = precision_score(y_true, y_pred, average = 'micro')
print(' Precision Score Micro: ', str(round((100*prec_score), 2)), '%')
prec_score = precision_score(y_true, y_pred, average = 'weighted')
print('Precision Score Weighted: ', str(round((100*prec_score), 2)), '%')
rec_score = recall_score(y_true, y_pred, average = 'macro')
print('\t\t\tRecall Macro: ', str(round((100*rec_score), 2)), '%')
rec_score = recall_score(y_true, y_pred, average = 'micro')
print('\t\t\tRecall Micro: ', str(round((100*rec_score), 2)), '%')
rec_score = recall_score(y_true, y_pred, average = 'weighted')
print('\t\t Recall Weighted: ', str(round((100*rec_score), 2)), '%')
f_score = f1_score(y_true, y_pred, average = 'macro')
print('\t\t F1 Score Macro: ', str(round((100*f_score), 2)), '%')
f_score = f1_score(y_true, y_pred, average = 'micro')
print('\t\t F1 Score Micro: ', str(round((100*f_score), 2)), '%')
f_score = f1_score(y_true, y_pred, average = 'weighted')
print('\t F1 Score Weighted: ', str(round((100*f_score), 2)), '%')
print("Evaluation")
AlexNet.evaluate(x_test, y_test, batch_size=batch_size,verbose=1)
Any other information that is needed, let me know. I am also unable to print the output that is listed at the bottom of the code, so please let me know if I am missing anything with the print functions. Thank you again for the help!