0

In tensorflow, how to get and output the changes for the trainable variables (the difference before and after one optimization step) while training?

Thanks.

Adam
  • 11
  • 1

2 Answers2

0

something like this?

with tf.Session() as sess:
    print 'before: {}'.format(sess.run(w))
    sess.run(train_op)
    print 'after: {}'.format(sess.run(w))
xxi
  • 1,430
  • 13
  • 24
0

I think what you are looking for is a way to see the difference every step. This is fairly straight forward.

Here is a working example that calculates the XOR operation in tensorflow and while it does so you can get the change in the m1 matrix.

### imports
import tensorflow as tf

### constant data
x  = [[0.,0.],[1.,1.],[1.,0.],[0.,1.]]
y_ = [[1.,0.],[1.,0.],[0.,1.],[0.,1.]]

### induction
# 1x2 input -> 2x3 hidden sigmoid -> 3x1 sigmoid output

# Layer 0 = the x2 inputs
x0 = tf.constant( x  , dtype=tf.float32 )
y0 = tf.constant( y_ , dtype=tf.float32 )



# Layer 1 = the 2x3 hidden sigmoid
m1 = tf.Variable( tf.random_uniform( [2,3] , minval=0.1 , maxval=0.9 , dtype=tf.float32  ))
b1 = tf.Variable( tf.random_uniform( [3]   , minval=0.1 , maxval=0.9 , dtype=tf.float32  ))
h1 = tf.sigmoid( tf.matmul( x0,m1 ) + b1 )

# Layer 2 = the 3x2 softmax output
m2 = tf.Variable( tf.random_uniform( [3,2] , minval=0.1 , maxval=0.9 , dtype=tf.float32  ))
b2 = tf.Variable( tf.random_uniform( [2]   , minval=0.1 , maxval=0.9 , dtype=tf.float32  ))
y_out = tf.nn.softmax( tf.matmul( h1,m2 ) + b2 )


### loss

# loss : sum of the squares of y0 - y_out
loss = tf.reduce_sum( tf.square( y0 - y_out ) )

# a place holder for the previous m1
previous_m1 = tf.Variable( tf.zeros( [2,3] , dtype=tf.float32 ))

# an operation to update the placeholder
update_previous = m1.assign(previous_m1)

# a dependency before training to make sure your previous m1 gets updated
with tf.get_default_graph().control_dependencies([update_previous]) :
  train = tf.train.GradientDescentOptimizer(1.0).minimize(loss)

# and finally, a direct call to calculate the difference, if needed.    
with tf.get_default_graph().control_dependencies([train]) :
  diff_m1 = tf.subtract(m1,previous_m1)

### training
# run 500 times using all the X and Y
# print out the loss and any other interesting info
with tf.Session() as sess:
  sess.run( tf.initialize_all_variables() )
  print "\nloss"
  for step in range(500) :
    sess.run(train)
    if (step + 1) % 100 == 0 :
      print "The change in matrix m1 is"
      print sess.run(loss)


  print ""
  results = sess.run([m1,b1,m2,b2,y_out,loss])
  labels  = "m1,b1,m2,b2,y_out,loss".split(",")
  for label,result in zip(*(labels,results)) :
    print ""
    print label
    print result

print ""

The important lines are

# a place holder for the previous m1
previous_m1 = tf.Variable( tf.zeros( [2,3] , dtype=tf.float32 ))

# an operation to update the placeholder
update_previous = m1.assign(previous_m1)

# a dependency before training to make sure your previous m1 gets updated
with tf.get_default_graph().control_dependencies([update_previous]) :
      train = tf.train.GradientDescentOptimizer(1.0).minimize(loss)

# and finally, a direct call to calculate the difference, if needed.    
with tf.get_default_graph().control_dependencies([train]) :
  diff_m1 = tf.subtract(m1,previous_m1)
Anton Codes
  • 3,663
  • 1
  • 19
  • 28
  • The code **without** the displaying the difference in m1 can be found here https://github.com/panchishin/learn-to-tensorflow/blob/master/solutions/04-xor-2d.py – Anton Codes May 09 '17 at 14:38