2

I'm trying to get into TensorFlow, and trying to make some changes of beginner example.

I am trying to combine Implementing a Neural Network from Scratch with Deep MNIST for Experts

I getting data by using X, y = sklearn.datasets.make_moons(50, noise=0.20). Basically, this line gives 2D X (,) and 2 class Y (0/1)

x = tf.placeholder(tf.float32, shape=[50,2])
y_ = tf.placeholder(tf.float32, shape=[50,2])

The structure of network is the same as Deep MNIST for Experts. The difference is the session running function.

sess.run(train_step, feed_dict={x:X, y_:y})

But that gives an

_ValueError: setting an array element with a sequence._

Can anyone give me some hints on this issue? Here is the code.

import numpy as np
import matplotlib
import tensorflow as tf
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
import sklearn.linear_model
sess = tf.InteractiveSession()
matplotlib.rcParams['figure.figsize'] = (10.0, 8.0)
np.random.seed(0)
X, y = sklearn.datasets.make_moons(50, noise=0.20)
plt.scatter(X[:,0], X[:,1], s=40, c=y, cmap=plt.cm.Spectral)
clf = sklearn.linear_model.LogisticRegressionCV()
clf.fit(X, y)
batch_xs = np.vstack([np.expand_dims(k,0) for k in X])
x = tf.placeholder(tf.float32, shape=[50,2])
y_ = tf.placeholder(tf.float32, shape=[50,2])
W = tf.Variable(tf.zeros([2,2]))
b = tf.Variable(tf.zeros([2]))
a = np.arange(100).reshape((50, 2))
y = tf.nn.softmax(tf.matmul(x,W) + b)
cross_entropy = -tf.reduce_sum(y_*tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
sess.run(tf.initialize_all_variables())
for i in range(20000):
sess.run(train_step, feed_dict={x:X, y_:y})

Here is the correct code after struggling with TensorFlow:

# Package imports
import numpy as np
import matplotlib
import tensorflow as tf
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
import sklearn.linear_model

rng = np.random

input_dim = 2
output_dim = 2
hidden_dim = 3

np.random.seed(0)
Train_X, Train_Y = sklearn.datasets.make_moons(200, noise=0.20)
Train_X = np.reshape(Train_X, (-1,2))
Train_YY = []  
for i in Train_Y:       #making Train_Y a 2-D list
    if i == 1:
        Train_YY.append([1,0])
    else:
        Train_YY.append([0,1])
print Train_YY
X = tf.placeholder("float",shape=[None,input_dim])
Y = tf.placeholder("float")
W1 = tf.Variable(tf.random_normal([input_dim, hidden_dim], stddev=0.35),
                      name="weights")
b1 = tf.Variable(tf.zeros([1,hidden_dim]), name="bias1")
a1 = tf.tanh(tf.add(tf.matmul(X,W1),b1))
W2 = tf.Variable(tf.random_normal([hidden_dim,output_dim]), name="weight2")
b2 = tf.Variable(tf.zeros([1,output_dim]), name="bias2")
a2 = tf.add(tf.matmul(a1, W2), b2)
output=tf.nn.softmax(a2)
correct_prediction = tf.equal(tf.argmax(output,1), tf.argmax(Y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
cross_entropy = -tf.reduce_sum(Y*tf.log(output))
optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
with tf.Session() as sess:
    sess.run(tf.initialize_all_variables())
    for i in range(20000):
        # for (a,d) in zip(Train_X, Train_Y):
        training_cost = sess.run(optimizer, feed_dict={X:Train_X, Y:Train_YY})
        if i%1000 == 0:
            # print "Training cost=", training_cost, "W1=", W1.eval(), "b1=", b1.eval(),"W2=", W2.eval(), "b2=", b2.eval()
            # print output.eval({X:Train_X, Y:Train_YY})
            # print cross_entropy.eval({X:Train_X, Y:Train_YY})
            print "Accuracy = ", accuracy.eval({X:Train_X, Y:Train_YY}) 
Hanyu Guo
  • 717
  • 1
  • 9
  • 18

1 Answers1

2

The problem arises because you redefine y on the following line:

y = tf.nn.softmax(tf.matmul(x,W) + b)

TensorFlow then gives an error because feeding y_: y in the feed_dict would be feeding a tensor with another tensor, which isn't possible (and—even if it were—this particular feed would create a circular dependency!).

The solution is to rewrite your softmax and cross-entropy ops:

y_softmax = tf.nn.softmax(tf.matmul(x,W) + b)
cross_entropy = -tf.reduce_sum(y_*tf.log(y_softmax))
mrry
  • 125,488
  • 26
  • 399
  • 400
  • Oh, what a stupid mistake. But another issue raised` ValueError: Cannot feed value of shape (50,) for Tensor u'Placeholder:0', which has shape (Dimension(50), Dimension(2))`, fixed by looking for your other answer http://stackoverflow.com/questions/33974231/tensorflow-error-using-my-own-data.Thanks a lot – Hanyu Guo Feb 19 '16 at 16:29