2

Import Libraries

%matplotlib inline
import tensorflow as tf
from tensorflow import keras
import numpy as np
import plot_utils
import matplotlib.pyplot as plt
from tqdm import tqdm
print('Tensorflow version:', tf.__version__)

Task 3: Create Batches of Training Data

batch_size = 32
# This dataset fills a buffer with buffer_size elements, 
#then randomly samples elements from this buffer, replacing the selected elements with new elements.
dataset = tf.data.Dataset.from_tensor_slices(x_train).shuffle(1000)
#Combines consecutive elements of this dataset into batches.
dataset = dataset.batch(batch_size, drop_remainder=True).prefetch(1)
#Creates a Dataset that prefetches elements from this dataset


print(dataset)
output:<PrefetchDataset shapes: (32, 32, 32, 3), types: tf.float64>

Task 4: Build the Generator Network for DCGAN

num_features = 100
generator = keras.models.Sequential([
    keras.layers.Dense(256*4*4, input_shape=[num_features]),
    keras.layers.Reshape([4,4,256]),
    keras.layers.BatchNormalization(),
    keras.layers.Conv2DTranspose(128, (4,4), (2,2), padding="same", activation="selu"),
    keras.layers.BatchNormalization(),
    keras.layers.Conv2DTranspose(128, (4,4), (2,2), padding="same", activation="selu"),
    keras.layers.BatchNormalization(),
    keras.layers.Conv2DTranspose(128, (4,4), (2,2), padding="same", activation="selu"),
    keras.layers.BatchNormalization(),
    keras.layers.Conv2DTranspose(3, (3,3), padding="same", activation="tanh"),
])

import numpy as np
import matplotlib.pyplot as plt

def show(images, n_cols=None):
    n_cols = n_cols or len(images)
    n_rows = (len(images) - 1) // n_cols + 1
    if images.shape[-1] == 1:
        images = np.squeeze(images, axis=-1)
    plt.figure(figsize=(n_cols, n_rows))
    for index, image in enumerate(images):
        plt.subplot(n_rows, n_cols, index + 1)
        plt.imshow(image, cmap="binary")
        plt.axis("off")

noise = tf.random.normal(shape=[1, num_features])
generated_images = generator(noise, training=False)
show(generated_images,1)

Task 5: Build the Discriminator Network for DCGAN

discriminator = keras.models.Sequential([
    keras.layers.Conv2D(64, (3,3), (2,2), padding="same", input_shape=[32,32,3]),
    keras.layers.LeakyReLU(0.2),
    keras.layers.Dropout(0.3),
    keras.layers.Conv2D(128, (3,3), (2,2), padding="same"),
    keras.layers.LeakyReLU(0.2),
    keras.layers.Dropout(0.3),
    keras.layers.Conv2D(256, (3,3), (2,2), padding="same"),
    keras.layers.LeakyReLU(0.2),
    keras.layers.Dropout(0.3),
    keras.layers.Flatten(),
    keras.layers.Dense(1, activation='sigmoid')
])

decision = discriminator(generated_images)
print(decision)
output:tf.Tensor([[0.5006197]], shape=(1, 1), dtype=float32)

Task 6: Compile the Deep Convolutional Generative Adversarial Network (DCGAN)

discriminator.compile(loss="binary_crossentropy", optimizer="rmsprop")
discriminator.trainable = False
gan = keras.models.Sequential([generator, discriminator])
gan.compile(loss="binary_crossentropy", optimizer="rmsprop")


from IPython import display
from tqdm import tqdm
seed = tf.random.normal(shape=[batch_size, 100])

Task 7: Define Training Procedure

from tqdm import tqdm
def train_dcgan(gan, dataset, batch_size, num_features, epochs=5):
    generator, discriminator = gan.layers
    for epoch in tqdm(range(epochs)):
        print("Epoch {}/{}".format(epoch + 1, epochs))
        for X_batch in dataset:
            noise = tf.random.normal(shape=[batch_size, num_features])
            generated_images = generator(noise)
            X_fake_and_real = tf.concat([generated_images, X_batch], axis=0)
            y1 = tf.constant([[0.]] * batch_size + [[1.]] * batch_size)
            discriminator.trainable = True
            discriminator.train_on_batch(X_fake_and_real, y1)
            noise = tf.random.normal(shape=[batch_size, num_features])
            y2 = tf.constant([[1.]] * batch_size)
            discriminator.trainable = False
            gan.train_on_batch(noise, y2)
            # Produce images for the GIF as we go
        display.clear_output(wait=True)
        generate_and_save_images(generator, epoch + 1, seed)
        
    display.clear_output(wait=True)
    generate_and_save_images(generator, epochs, seed)


## Source https://www.tensorflow.org/tutorials/generative/dcgan#create_a_gif
def generate_and_save_images(model, epoch, test_input):
  # Notice `training` is set to False.
  # This is so all layers run in inference mode (batchnorm).
  predictions = model(test_input, training=False)

  fig = plt.figure(figsize=(10,10))

  for i in range(25):
      plt.subplot(5, 5, i+1)
      plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='binary')
      plt.axis('off')

  plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))
  plt.show()

Task 8: Train DCGAN

x_train_dcgan = x_train.reshape(-1, 32,32,3) * 2. - 1.

batch_size = 32
dataset = tf.data.Dataset.from_tensor_slices(x_train_dcgan)
dataset = dataset.shuffle(1000)
dataset = dataset.batch(batch_size, drop_remainder=True).prefetch(1)

this is main problem

%%time
train_dcgan(gan, dataset, batch_size, num_features, epochs=10)**
output:
    7             noise = tf.random.normal(shape=[batch_size, num_features])
      8             generated_images = generator(noise)
----> 9             X_fake_and_real = tf.concat([generated_images, X_batch], axis=0)
     10             y1 = tf.constant([[0.]] * batch_size + [[1.]] * batch_size)
     11             discriminator.trainable = True
cannot compute ConcatV2 as input #1(zero-based) was expected to be a float tensor but is a double tensor [Op:ConcatV2] name: concat

It is Cifar10 DCGAN I am really not understanding this error and how to fix it.

David Buck
  • 3,752
  • 35
  • 31
  • 35
  • What is dtype of elements in x_train? – Tou You Oct 10 '20 at 09:53
  • dtype('float64') dtype of x_train...! – ANVAY ABHIRAAJ Oct 10 '20 at 13:32
  • Welcome to Stack Overflow. I have removed lots (and lots) of unnecessary formatting from your question in an attempt to make it readable. Please take the time to take the [tour], read [ask] and then familiarise yourself with the [formatting instructions](https://stackoverflow.com/editing-help). If your questions are clearer, you will get more/better answers. – David Buck Oct 12 '20 at 08:55
  • Detail is never in overabundance, but try to make a summary for the question(on top) so that maybe it can be deduced more easily what the problem is without having to read through some thousand-odd words. May help your chances at getting an answer. – Matias Chara Oct 12 '20 at 13:42
  • I have a similar code as you, @ANVAYABHIRAAJ. And I tried @TouYou's suggestion: ``` X_batch = tf.cast(X_batch, tf.float32) ``` And this got my training kicked off. Hope that helps! – vsm Jan 22 '21 at 23:35

2 Answers2

3

By default, Tensorflow uses float32.You have to convert your data to tf.float32.

X = tf.cast(yourDATA, tf.float32) 
Tou You
  • 1,149
  • 8
  • 7
  • i already tried that but , it keep saying : Attempt to convert a value () with an unsupported type () to a Tensor – ANVAY ABHIRAAJ Oct 14 '20 at 15:31
  • yes, that is because you can't cast dataset, you have to cast your data (tf.tensor or np.array) before the construction of tf.dataset. – Tou You Oct 14 '20 at 21:18
1

Following snippet worked for me in a code inspired by the same tensorflow sample, before performing the tf.concat operation:

X_batch = tf.cast(X_batch, tf.float32)
vsm
  • 147
  • 2
  • 9