2

I am stuck with multiple GPU MNIST classifier in Tensorflow. Code runs without errors, but accuracy is very poor (30%). I am new to Tensorflow so I do not know where is the problem ? GPU: 2x GTX 1080 Ti.

I have found several tutorials for multiple GPU, but code is hard to follow. For this reason I am trying to develop MNIST CNN classifier from scratch.

from __future__ import print_function
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import datetime

def average_gradients(tower_grads):
  average_grads = []
  for grad_and_vars in zip(*tower_grads):
    # Note that each grad_and_vars looks like the following:
    #   ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
    grads = []
    for g, _ in grad_and_vars:
      # Add 0 dimension to the gradients to represent the tower.
      expanded_g = tf.expand_dims(g, 0)

      # Append on a 'tower' dimension which we will average over below.
      grads.append(expanded_g)

    # Average over the 'tower' dimension.
    grad = tf.concat(axis=0, values=grads)
    grad = tf.reduce_mean(grad, 0)

    # Keep in mind that the Variables are redundant because they are shared
    # across towers. So .. we will just return the first tower's pointer to
    # the Variable.
    v = grad_and_vars[0][1]
    grad_and_var = (grad, v)
    average_grads.append(grad_and_var)
    return average_grads

with tf.device('/cpu:0'):
    x  = tf.placeholder(tf.float32, [None, 784], name='x')
    x_img=tf.reshape(x, [-1, 28, 28, 1])
    x_dict={}
    x_dict['x0'],x_dict['x1'] = tf.split(x_img,2)

    y_dict={}
    y = tf.placeholder(tf.float32, [None, 10],  name='y')
    y_dict['y0'],y_dict['y1'] = tf.split(y,2)

    opt=tf.train.GradientDescentOptimizer(0.01)


    keep_prob = tf.placeholder(tf.float32)

    w0=tf.get_variable('w0',initializer=tf.truncated_normal([5, 5,1,32], stddev=0.1))
    b0=tf.get_variable('b0',initializer=tf.zeros([32]))

    w1=tf.get_variable('w1',initializer=tf.truncated_normal([5,5,32,64], stddev=0.1))
    b1=tf.get_variable('b1',initializer=tf.zeros([64]))

    w2=tf.get_variable('w2',initializer=tf.truncated_normal([7*7*64,1024], stddev=0.1))
    b2=tf.get_variable('b2',initializer=tf.zeros([1024]))

    w3=tf.get_variable('w3',initializer=tf.truncated_normal([1024,10], stddev=0.1))
    b3=tf.get_variable('b3',initializer=tf.zeros([10]))


    grads=[]



def conv2d(xx, W):
    return tf.nn.conv2d(xx, W, strides=[1, 1, 1, 1], padding='SAME')

def max_pool_2x2(xx):
    return tf.nn.max_pool(xx, ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1], padding='SAME')


def model_forward(xx):
    h_conv1=tf.nn.relu(conv2d(xx,w0)+b0);
    h_pool1=max_pool_2x2(h_conv1)

    h_conv2=tf.nn.relu(conv2d(h_pool1,w1)+b1);
    h_pool2=max_pool_2x2(h_conv2)

    h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])

    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat,w2)+b2)

    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

    y = tf.nn.sigmoid(tf.matmul(h_fc1_drop,w3)+b3)
    return y


for i in range(0,2):
    with tf.device(('/gpu:{0}').format(i)):
        with tf.variable_scope(('scope_gpu_{0}').format(i)):
            yy=model_forward(x_dict[('x{0}').format(i)])
            cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_dict[('y{0}').format(i)] * tf.log(yy), reduction_indices=[1]))
            grads.append(opt.compute_gradients(cross_entropy,tf.trainable_variables()))


with tf.device('/cpu:0'):
    grad = average_gradients(grads)
    train_step = opt.apply_gradients(grad)
    yy=model_forward(x_dict['x0'])
    correct_prediction = tf.equal(tf.argmax(yy, 1), tf.argmax(y_dict['y0'], 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')


def main():

    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

    with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:

        sess.run(tf.global_variables_initializer())
        writer = tf.summary.FileWriter('C:\\tmp\\test\\', graph=tf.get_default_graph())

        t1_1 = datetime.datetime.now()
        for step in range(0,10000):
            batch_x, batch_y = mnist.train.next_batch(100)
            sess.run(train_step, feed_dict={x: batch_x, y: batch_y, keep_prob: 0.5})

            if (step % 200) == 0:
                print(step, sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels, keep_prob: 1}))
        t2_1 = datetime.datetime.now()

    print("Computation time: " + str(t2_1-t1_1))



if __name__ == "__main__":
    main()
Maxim
  • 52,561
  • 27
  • 155
  • 209

3 Answers3

1

The problems that I noticed:

  • Your cross-entropy loss is wrong (see this question for details, in short you're computing binary cross-entropy).
  • I dropped manual gradient computation in favor of tf.train.AdamOptimizer.
  • I dropped the split of the input of x (it's not the right way to do distributed computation in tensorflow).

The result model easily gets to 99% accuracy even on one GPU.

from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
import datetime

x = tf.placeholder(tf.float32, [None, 784], name='x')
x_img = tf.reshape(x, [-1, 28, 28, 1])
y = tf.placeholder(tf.float32, [None, 10], name='y')
keep_prob = tf.placeholder(tf.float32)

stddev = 0.1
w0 = tf.get_variable('w0', initializer=tf.truncated_normal([5, 5, 1, 32], stddev=stddev))
b0 = tf.get_variable('b0', initializer=tf.zeros([32]))

w1 = tf.get_variable('w1', initializer=tf.truncated_normal([5, 5, 32, 64], stddev=stddev))
b1 = tf.get_variable('b1', initializer=tf.zeros([64]))

w2 = tf.get_variable('w2', initializer=tf.truncated_normal([7 * 7 * 64, 1024], stddev=stddev))
b2 = tf.get_variable('b2', initializer=tf.zeros([1024]))

w3 = tf.get_variable('w3', initializer=tf.truncated_normal([1024, 10], stddev=stddev))
b3 = tf.get_variable('b3', initializer=tf.zeros([10]))

def conv2d(xx, W):
  return tf.nn.conv2d(xx, W, strides=[1, 1, 1, 1], padding='SAME')

def max_pool_2x2(xx):
  return tf.nn.max_pool(xx, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

def model_forward(xx):
  h_conv1 = tf.nn.relu(conv2d(xx, w0) + b0)
  h_pool1 = max_pool_2x2(h_conv1)

  h_conv2 = tf.nn.relu(conv2d(h_pool1, w1) + b1)
  h_pool2 = max_pool_2x2(h_conv2)

  h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
  h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w2) + b2)
  h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
  y = tf.matmul(h_fc1_drop, w3) + b3
  return y

yy = model_forward(x_img)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=yy, labels=y))
train_step = tf.train.AdamOptimizer().minimize(loss)
correct_prediction = tf.equal(tf.argmax(yy, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')

def main():
  mnist = input_data.read_data_sets("/home/maxim/p/data/mnist-tf", one_hot=True)
  with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
    sess.run(tf.global_variables_initializer())

    t1_1 = datetime.datetime.now()
    for step in range(0, 10000):
      batch_x, batch_y = mnist.train.next_batch(100)
      sess.run(train_step, feed_dict={x: batch_x, y: batch_y, keep_prob: 0.5})

      if (step % 200) == 0:
        print(step, sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels, keep_prob: 1}))

    t2_1 = datetime.datetime.now()
    print("Computation time: " + str(t2_1 - t1_1))

if __name__ == "__main__":
  main()

Now, if you really want it, you can do data or model parallelism to utilize your GPU power (there is a great post about it, but sometimes it doesn't render correctly due to hosting problems).

Maxim
  • 52,561
  • 27
  • 155
  • 209
  • Hello, thank you for your comments. The problem was multi-GPU computation. Where you manually compute gradients, average them and update weights on cpu like in this tutorial: https://github.com/normanheckscher/mnist-multi-gpu I would like to use softmax function but that can not be used on multi-gpu due to error, so therefore I used sigmoid. – Petr Nejedlý Dec 15 '17 at 14:04
  • I would be more interested to see multi-gpu version. I am not sure how to solve weight and bias sharing between gpus etc.. – Petr Nejedlý Dec 15 '17 at 14:37
1

Along with the points mentioned in the first two answers, take a look at return average_grads in average_gradients function, it's returning from the 1st iteration of the first for loop, meaning the gradients will only apply to the first variable (probably w0). Hence only w0 is getting updated and so you are getting a very low accuracy since the rest of the variables stay to their original values (either random/zeros).

0

This is because the model is not using the same weights & biases for inference on CPU as well as on the other GPU devices.

For example:

for i in range(0,2):
    with tf.device(('/gpu:{0}').format(i)):
        with tf.variable_scope(('scope_gpu_{0}').format(i)) as infer_scope:
            yy=model_forward(x_dict[('x{0}').format(i)])
            infer_scope.reuse_variables()
            cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_dict[('y{0}').format(i)] * tf.log(yy), reduction_indices=[1]))
            grads.append(opt.compute_gradients(cross_entropy,tf.trainable_variables()))

The reason you are getting low accuracy is that without specifying reuse_variables() and you try to call the model inference inside each epoch, the graph would create a new model with random weights & biases initialization, which is not what you favored.