0

I'm trying to implement a generative adversarial network in Tensorflow. It's working, but I need to assess how it's performing. The code is:

writer = tf.summary.FileWriter("../Results/demo/Aug12")
def train(epoch_count, batch_size, z_dim, learning_rate, beta1, get_batches, data_shape):
    """
    Train the GAN
    """
    input_real, input_z, _ = model_inputs(data_shape[1], data_shape[2], data_shape[3], z_dim)
    d_loss, g_loss = model_loss(input_real, input_z, data_shape[3])
    d_opt, g_opt = model_opt(d_loss, g_loss, learning_rate, beta1)

    steps = 0

    #config stops the code running out of RAM instantly - https://stackoverflow.com/a/44102727/386861
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        for epoch_i in range(epoch_count):
            for batch_images in get_batches(batch_size):

                # values range from -0.5 to 0.5, therefore scale to range -1, 1
                batch_images = batch_images * 2
                steps += 1

                batch_z = np.random.uniform(-1, 1, size=(batch_size, z_dim))
                #print ("batch_z", batch_z, d_opt, len(batch_images))
                _ = sess.run(d_opt, feed_dict={input_real: batch_images, input_z: batch_z})
                #print('running')
                _ = sess.run(g_opt, feed_dict={input_real: batch_images, input_z: batch_z})

                if steps % 400 == 0:
                    # At the end of every 10 epochs, get the losses and print them out
                    train_loss_d = d_loss.eval({input_z: batch_z, input_real: batch_images})
                    train_loss_g = g_loss.eval({input_z: batch_z})

                    print("Epoch {}/{}...".format(epoch_i+1, epochs),
                          "Discriminator Loss: {:.4f}...".format(train_loss_d),
                          "Generator Loss: {:.4f}".format(train_loss_g))

                    _ = show_generator_output(sess, 1, input_z, data_shape[3])
                    writer.add_summary(sess, epoch_i)

The stack trace is:

---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
<ipython-input-10-91c25d921378> in <module>()
     11 with tf.Graph().as_default():
     12     #print(50176 /sum(shape * z_dim * IMAGE_HEIGHT * IMAGE_WIDTH * batch_size))
---> 13     train(epochs, batch_size, z_dim, learning_rate, beta1, get_batches, shape)
     14     count =+ 1
     15 

<ipython-input-9-dd546272c993> in train(epoch_count, batch_size, z_dim, learning_rate, beta1, get_batches, data_shape)
     38 
     39                     _ = show_generator_output(sess, 1, input_z, data_shape[3])
---> 40                     writer.add_summary(sess, epoch_i)

~\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\summary\writer\writer.py in add_summary(self, summary, global_step)
    123     # to save space - we just store the metadata on the first value with a
    124     # specific tag.
--> 125     for value in summary.value:
    126       if not value.metadata:
    127         continue

AttributeError: 'Session' object has no attribute 'value'
elksie5000
  • 7,084
  • 12
  • 57
  • 87
  • 2
    the first argument of `writer.add_summary` is [`Summary protocol buffer`](https://www.tensorflow.org/api_docs/python/tf/summary/FileWriter#add_summary), This is [an example](https://github.com/aymericdamien/TensorFlow-Examples/blob/12ed38ed50a78897d93bbc24c90369ec70adcf76/examples/4_Utils/tensorboard_basic.py#L79-L82) – BugKiller Aug 12 '18 at 17:34
  • @BugKiller Thank you for that. I'm new to TF and finding it very awkward to find good documents. – elksie5000 Aug 12 '18 at 18:57

0 Answers0