I am trying to do the following
state[0,:] = state[0,:].assign( 0.9*prev_state + 0.1*( tf.matmul(inputs, weights) + biases ) )
for i in xrange(1,BATCH_SIZE):
state[i,:] = state[i,:].assign( 0.9*state[i-1,:] + 0.1*( tf.matmul(inputs, weights) + biases ) )
prev_state = prev_state.assign( state[BATCH_SIZE-1,:] )
with
state = tf.Variable(tf.zeros([BATCH_SIZE, HIDDEN_1]), name='inner_state')
prev_state = tf.Variable(tf.zeros([HIDDEN_1]), name='previous_inner_state')
As a follow-up for this question. I get an error that Tensor
does not have an assign
method.
What is the correct way to call the assign
method on a slice of a Variable
tensor?
Full current code:
import tensorflow as tf
import math
import numpy as np
INPUTS = 10
HIDDEN_1 = 20
BATCH_SIZE = 3
def create_graph(inputs, state, prev_state):
with tf.name_scope('h1'):
weights = tf.Variable(
tf.truncated_normal([INPUTS, HIDDEN_1],
stddev=1.0 / math.sqrt(float(INPUTS))),
name='weights')
biases = tf.Variable(tf.zeros([HIDDEN_1]), name='biases')
updated_state = tf.scatter_update(state, [0], 0.9 * prev_state + 0.1 * (tf.matmul(inputs[0,:], weights) + biases))
for i in xrange(1, BATCH_SIZE):
updated_state = tf.scatter_update(
updated_state, [i], 0.9 * updated_state[i-1, :] + 0.1 * (tf.matmul(inputs[i,:], weights) + biases))
prev_state = prev_state.assign(updated_state[BATCH_SIZE-1, :])
output = tf.nn.relu(updated_state)
return output
def data_iter():
while True:
idxs = np.random.rand(BATCH_SIZE, INPUTS)
yield idxs
with tf.Graph().as_default():
inputs = tf.placeholder(tf.float32, shape=(BATCH_SIZE, INPUTS))
state = tf.Variable(tf.zeros([BATCH_SIZE, HIDDEN_1]), name='inner_state')
prev_state = tf.Variable(tf.zeros([HIDDEN_1]), name='previous_inner_state')
output = create_graph(inputs, state, prev_state)
sess = tf.Session()
# Run the Op to initialize the variables.
init = tf.initialize_all_variables()
sess.run(init)
iter_ = data_iter()
for i in xrange(0, 2):
print ("iteration: ",i)
input_data = iter_.next()
out = sess.run(output, feed_dict={ inputs: input_data})