I'm trying to implement Progressive Neural Networks and in this paper, the author applied transfer learning to exploit previously learned knowledge to train current reinforcement learning agents. 2 Questions:
- How can I lock certain layers so that the weights and biases of these layers can't be updated?
- And how can I only train specific layers during training?
Here is my code:
def __create_network(self):
with tf.variable_scope('inputs'):
self.inputs = tf.placeholder(shape=[-1, 80, 80, 4], dtype=tf.float32, name='input_data')
with tf.variable_scope('networks'):
with tf.variable_scope('conv_1'):
self.conv_1 = slim.conv2d(activation_fn=tf.nn.relu, inputs=self.inputs, num_outputs=32,
kernel_size=[8, 8], stride=4, padding='SAME')
with tf.variable_scope('conv_2'):
self.conv_2 = slim.conv2d(activation_fn=tf.nn.relu, inputs=self.conv_1, num_outputs=64,
kernel_size=[4, 4], stride=2, padding='SAME')
with tf.variable_scope('conv_3'):
self.conv_3 = slim.conv2d(activation_fn=tf.nn.relu, inputs=self.conv_2, num_outputs=64,
kernel_size=[3, 3], stride=1, padding='SAME')
with tf.variable_scope('fc'):
self.fc = slim.fully_connected(slim.flatten(self.conv_3), 512, activation_fn=tf.nn.elu)
I want to lock conv_1
, conv_2
and conv_3
and only train fc
after restoring checkpoint data.