0

Does anyone know of any Tensorflow code for undistorting images? (removing fisheye effects)

Currently, I am undistorting images using OpenCV. However, I want to push that code inside of the net. Is there an open source code or Tensorflow function for doing this? I can't find anything via Google.

bremen_matt
  • 6,902
  • 7
  • 42
  • 90
  • I don't think there is anything like that for TensorFlow. You may be able to implement the algorithm yourself, though. [This question](https://stackoverflow.com/questions/31089265/what-are-the-main-references-to-the-fish-eye-camera-model-in-opencv3-0-0dev) on the fish-eye camera model in OpenCV might shed some light on their particular implementation, and it also mentions other models. – jdehesa Jan 15 '18 at 15:26
  • Yeah. I mean I took a look at the source code myself. I was just hoping I wouldn't have to go down that path. – bremen_matt Jan 15 '18 at 20:17

1 Answers1

1

Here you go. I hacked the spatial_transformer code. I think you'll have to multiply the points by K^-1 (inverse camera matrix) before running this code and multiply them by K afterwards if your camera matrix is not the identity camera matrix.

    def distort(images, d, name='distort'):
        def _repeat(x, n_repeats):
            with tf.variable_scope('_repeat'):
                rep = tf.transpose(
                tf.expand_dims(tf.ones(shape=tf.stack([n_repeats, ])), 1), [1, 0])
                rep = tf.cast(rep, 'int32')
                x = tf.matmul(tf.reshape(x, (-1, 1)), rep)
                return tf.reshape(x, [-1])

        def _interpolate(im, x, y, out_size):
            with tf.variable_scope('_interpolate'):
                # constants
                num_batch = tf.shape(im)[0]
                height = tf.shape(im)[1]
                width = tf.shape(im)[2]
                channels = tf.shape(im)[3]

                x = tf.cast(x, 'float32')
                y = tf.cast(y, 'float32')
                height_f = tf.cast(height, 'float32')
                width_f = tf.cast(width, 'float32')
                out_height = out_size[0]
                out_width = out_size[1]
                zero = tf.zeros([], dtype='int32')
                max_y = tf.cast(tf.shape(im)[1] - 1, 'int32')
                max_x = tf.cast(tf.shape(im)[2] - 1, 'int32')

                # scale indices from [-1, 1] to [0, width/height]
                x = (x + 1.0)*(width_f) / 2.0
                y = (y + 1.0)*(height_f) / 2.0

                # do sampling
                x0 = tf.cast(tf.floor(x), 'int32')
                x1 = x0 + 1
                y0 = tf.cast(tf.floor(y), 'int32')
                y1 = y0 + 1

                x0 = tf.clip_by_value(x0, zero, max_x)
                x1 = tf.clip_by_value(x1, zero, max_x)
                y0 = tf.clip_by_value(y0, zero, max_y)
                y1 = tf.clip_by_value(y1, zero, max_y)
                dim2 = width
                dim1 = width*height
                base = _repeat(tf.range(num_batch)*dim1, out_height*out_width)
                base_y0 = base + y0*dim2
                base_y1 = base + y1*dim2
                idx_a = base_y0 + x0
                idx_b = base_y1 + x0
                idx_c = base_y0 + x1
                idx_d = base_y1 + x1

                # use indices to lookup pixels in the flat image and restore
                # channels dim
                im_flat = tf.reshape(im, tf.stack([-1, channels]))
                im_flat = tf.cast(im_flat, 'float32')
                Ia = tf.gather(im_flat, idx_a)
                Ib = tf.gather(im_flat, idx_b)
                Ic = tf.gather(im_flat, idx_c)
                Id = tf.gather(im_flat, idx_d)

                # and finally calculate interpolated values
                x0_f = tf.cast(x0, 'float32')
                x1_f = tf.cast(x1, 'float32')
                y0_f = tf.cast(y0, 'float32')
                y1_f = tf.cast(y1, 'float32')
                wa = tf.expand_dims(((x1_f-x) * (y1_f-y)), 1)
                wb = tf.expand_dims(((x1_f-x) * (y-y0_f)), 1)
                wc = tf.expand_dims(((x-x0_f) * (y1_f-y)), 1)
                wd = tf.expand_dims(((x-x0_f) * (y-y0_f)), 1)
                output = tf.add_n([wa*Ia, wb*Ib, wc*Ic, wd*Id])
                return output

            def _transform(images, d, out_size):
                with tf.variable_scope('_transform'): 

                    shape = tf.shape(images)
                    num_batch = tf.shape(images)[0]
                    num_channels = images.get_shape()[3]

                    out_width = out_size[1]
                    out_height = out_size[0]
                    cx = fx = fy = tf.to_float(out_width) / 2
                    cy = tf.to_float(out_height) / 2
                    x = tf.linspace(-1., 1., out_width)
                    y = tf.linspace(-1., 1., out_height)
                    x, y = tf.meshgrid(x, y)
                    x = tf.tile(tf.reshape(x, [1, -1, 1]), [num_batch,1,1])
                    y = tf.tile(tf.reshape(y, [1, -1, 1]), [num_batch,1,1])

                    a = x 
                    b = y 

                    r2 = tf.square(a) + tf.square(b)

                    r = tf.sqrt(r2)
                    r = tf.Print(r, [tf.reduce_min(r), tf.reduce_max(r)], "R min/max: ")
                    theta = tf.atan(r)
                    theta_d = theta*(1.0 + tf.reduce_sum(tf.reshape(d,
                          [1,1,4]) * tf.concat([tf.square(theta),
                          tf.pow(theta, 4), tf.pow(theta, 6), tf.pow(theta, 
                          8)], axis=-1),
                          axis=-1, keepdims=True))
                    tdr = theta_d / r
                    xd = a * tdr
                    yd = b * tdr


                    xd = tf.reshape(xd, [-1])
                    yd = tf.reshape(yd, [-1])

                    input_transformed = _interpolate(
                            images, xd, yd,
                            out_size)
                    output = tf.reshape(input_transformed, 
                             tf.stack([num_batch, out_height, out_width, 
                             num_channels]))
                    return output
            with tf.variable_scope(name):
                output = _transform(images, d, tf.shape(images)[1:3])
                return output
Nate M
  • 96
  • 2