what is the most efficient way to load an image with tensorflow and crop 100 images from that one image.
what I tried is:
import numpy as np
import tensorflow as tf
import cv2
filenames_train = ['image1.jpg', 'image2.jpg', 'image3.jpg']
def _opencv_operation(image,label):
# operation with image without tensorflow
kernel = np.ones((5, 5), np.float32) / 25
image = cv2.filter2D(image, -1, kernel)
return image, int(label)
def _read_images_and_crop(image_path):
image = tf.read_file(image_path)
image = tf.image.decode_jpeg(image)
print image.shape
image.set_shape([None, None, None])
image = tf.cast(image, tf.float32)
image = tf.scalar_mul(2./255.,image)-1.
image = tf.image.resize_images(image, [299, 299])
image = tf.reshape(image,(299, 299,3))
label = 1
#r_values1 = #random values#
#image1 = tf.image.crop_and_resize(image, r_values1)
# ...
#r_values100 = # random values#
#image100 = tf.image.crop_and_resize(image, r_values100)
#label = r_values1 ... r_values100
return image, label
# but what i actually want to return is: return [image1, image2,..image100], [label1, label2,.. label100]
# Training dataset
dataset_train = tf.data.Dataset.from_tensor_slices((filenames_train))
dataset_train = dataset_train.map(_read_images_and_crop)
dataset_train = dataset_train.map(
lambda filename, label: tuple(tf.py_func(
_opencv_operation, [filename, label], [tf.float32, tf.int64])))
dataset_train = dataset_train.batch(5)
iterator = tf.data.Iterator.from_structure(dataset_train.output_types,
dataset_train.output_shapes)
(next_images,next_labels) = iterator.get_next()
training_init_op = iterator.make_initializer(dataset_train)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i_epoch in xrange(5):
sess.run(training_init_op)
curr_images, curr_labels = sess.run([next_images, next_labels])
so, what my script does, is reading one image from a file and resizing it. and gives this image as an output.
what I need is to crop that image afterwords with 100 different crop parameters, so i have 100 images as an output and 100 labels. but at the and I want bust as many outputs as big like the batch size.
Is it possible with the dataset api or is it just possible to load one image from file and process that image till its the output of the dataset_train.
I dont want to load an image 100 times and process it 100 times. I want to load it one time and process it 100 times(e.g.crop, blur with different parameters and so on..)