0
def build(width, height, depth, classes, weightsPath=None):
        # initialize the model
        model = Sequential()
        model.add(Conv2D(100, (5, 5), padding="same",input_shape=(depth, height, width), data_format="channels_first"))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2),data_format="channels_first"))

        model.add(Conv2D(100, (5, 5), padding="same"))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), data_format="channels_first"))

        # 3 set of CONV => RELU => POOL
        model.add(Conv2D(100, (5, 5), padding="same"))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2),data_format="channels_first"))

        # 4 set of CONV => RELU => POOL
        model.add(Conv2D(50, (5, 5), padding="same"))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2),data_format="channels_first"))

        # 5 set of CONV => RELU => POOL
        model.add(Conv2D(50, (5, 5), padding="same"))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), data_format="channels_first"))

        # 6 set of CONV => RELU => POOL
        model.add(Conv2D(50, (5, 5), padding="same"))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), data_format="channels_first"))

        # set of FC => RELU layers
        model.add(Flatten())
        #model.add(Dense(classes))
        #model.add(Activation("relu"))

        # softmax classifier
        model.add(Dense(classes))
        model.add(Activation("softmax"))

        return model
test_model = build(width=200, height=200, depth=1, classes=100) 
epochs=50
batch_size=128
cnn_model.compile(optimizer='Adam', loss='mse')
history = test_model.fit(X_train, y_train,validation_data=[X_valid,y_valid],epochs=epochs,batch_size=batch_size,
                   verbose=1)

I want to extract the output of the intermediate layer which is provided below as numpy array and want to save it to a text file

The output of the layer I want to extract is

# 6 set of CONV => RELU => POOL

model.add(Conv2D(50, (5, 5), padding="same"))

I tried links from here Keras, How to get the output of each layer?

However i am unable to incorporate the solution provided in link to my problem. I hope experts may help me overcoming this problem.

pro
  • 113
  • 8

2 Answers2

1

You can do it in this way:

from tensorflow.keras.models import Model
cnn_model = build(...) # build your model by invoking your function
     ...
 # train your model
layer_idx = 6
# Indices are based on order of horizontal graph traversal (bottom-up).
layer_to_interpret = cnn_model.get_layer(index=layer_idx)
# You can also use the name of layer to get it.
# layer_to_interpret = cnn_model.get_layer(layer_name)
# Create multi-output model
multiout_model = Model(inputs=cnn_model.inputs, outputs=[layer_to_interpret.output, cnn_model.output])

conv_outs, predictions = multiout_model(images)
# save conv_outs to a file 
conv_outs.numpy().save("conv_output.npy")
# np.savetxt("foo.csv", conv_outs, delimiter=",")
MSS
  • 3,306
  • 1
  • 19
  • 50
  • 1
    Can you please explain why using `with tf.GradientTape()` for calling _multiout_model_? If _cnn_model_ is already trained there is no need for `tf.GradientTape`. In the other scenario that _cnn_model_ is not trained, and the outputs need to be saved while the network is being trained, there are other steps to be included in the code for training the network. – learner Jan 10 '23 at 12:32
  • Yes you are correct. If model is already trained, we dont need to use `tf.GradientTape` – MSS Jan 10 '23 at 18:00
0

Note that the Sequential constructor accepts a name argument, so to make things easy and unambiguous, add name feature to the layer you want to extract its output :

# 6 set of CONV => RELU => POOL

model.add(Conv2D(50, (5, 5), padding="same", name="my_intermediate_layer"))

# to extract output :
outputs=model.get_layer(name="my_intermediate_layer").output
Timbus Calin
  • 13,809
  • 5
  • 41
  • 59
Yassine
  • 1
  • 2