I'm attempting to create a function to generate the saliency map. This is the code.
def do_salience(image, model, label, prefix):
img = cv2.imread(image)
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
img= cv2.resize(img,(300,300))/255.0
img= np.expand_dims(img,axis=0)
num_classes= 2
expected_output= tf.one_hot([label]* img.shape[0],num_classes)
with tf.GradientTape() as tape:
inputs = tf.cast(img,tf.float32)
tape.watch(inputs)
predictions = model(inputs)
loss= tf.keras.losses.categorical_crossentropy(
expected_output,predictions
)
print(predictions)
gradients= tape.gradient(loss,inputs)
grayscale_tensor = tf.reduce_sum(tf.abs(gradients), axis=-1)
normalized_tensor = tf.cast(
255
* (grayscale_tensor-tf.reduce_min(grayscale_tensor))
/ (tf.reduce_max(grayscale_tensor)-tf.reduce_min(grayscale_tensor)),
tf.uint8,
)
normalized_tensor= tf.squeeze(normalized_tensor)
plt.figure(figsize=(8, 8))
plt.axis('off')
plt.imshow(normalized_tensor, cmap='gray')
plt.show()
This part is to superimpose the saliency map with the original image
gradient_color = cv2.applyColorMap(normalized_tensor.numpy(),
cv2.COLORMAP_HOT)
gradient_color = gradient_color/255.0
super_imposed = cv2.addWeighted(img,0.5,gradient_color,0.5,0.0)
salient_image_name = prefix + image
normalized_tensor = tf.expand_dims(normalized_tensor, -1)
normalized_tensor = tf.io.encode_jpeg(normalized_tensor,
quality=100, format='grayscale')
writer = tf.io.write_file(salient_image_name, normalized_tensor)
Next, proceeding to the 'generating of saliency maps with untrained model' stage,
model.load_weights('0_epochs.h5')
do_salience('cat1.jpg', model, 0, 'epoch0_salient')
do_salience('cat2.jpg', model, 0, 'epoch0_salient')
do_salience('catanddog.jpg', model, 0, 'epoch0_salient')
do_salience('dog1.jpg', model, 1, 'epoch0_salient')
do_salience('dog2.jpg', model, 1, 'epoch0_salient')
After loading the code, I'm thrown with an error,
I believe there is a mistake in the code where it's superimposing the saliency map. How to rectify it?