I am new to tensorflow and I am trying to train on the CIFAR 10 dataset. I noticed that no matter what batch size I use according to my nvidia control panel I am using 97% of my gpu memory. I tried batch sizes of 100 down to 2 and in each case my gpu memory usage is always 97%. Why would it do this?
def batchGenerator(batch_size = 32):
bi = 0
random.shuffle(train_data)
while bi + batch_size < len(train_data):
x = np.zeros((batch_size, 32, 32, 3))
y = np.zeros((batch_size, 10))
for b in range(batch_size):
x[b] = train_data[bi + b][0]
if random.choice((True, False)):
img = cv2.flip(x[b], 0)
if random.choice((True, False)):
img = cv2.flip(x[b], 1)
y[b][train_data[bi + b][1]] = 1
bi += batch_size
yield(x, y)
with tf.Session() as s:
s.run(tf.initialize_all_variables())
for epoch in range(100):
a = 0.0
i = 0
for x_train, y_train in batchGenerator(2):
outs = s.run([opt, accuracy], feed_dict = {x: x_train, y_exp: y_train, p_keep: 0.5})
a += outs[-1]
i += 1
print('Epoch', epoch, 'Accuracy', a / i)