4

i'm using LSD to detect straight lines in an image, the code that i have downloaded contains a Minimal example of calling LSD but it's static (i.e it outputs only the value in the main function) i want to apply the code on a video, that's the minimal example that outputs static results.

#include <stdio.h>
#include "lsd.h"

int main(void)
{
  image_double image;
  ntuple_list out;
  unsigned int x,y,i,j;
  unsigned int X = 512;  /* x image size */
  unsigned int Y = 512;  /* y image size */

  /* create a simple image: left half black, right half gray */
  image = new_image_double(X,Y);
  for(x=0;x<X;x++)
    for(y=0;y<Y;y++)
      image->data[ x + y * image->xsize ] = x<X/2 ? 0.0 : 64.0; /* image(x,y) */
   IplImage* imgInTmp = cvLoadImage("C:\Documents and Settings\Eslam farag\My Documents\Visual Studio 2008\Projects\line\hand.JPEG", 0);

  /* call LSD */

  out = lsd(image);

  /* print output */
  printf("%u line segments found:\n",out->size);
  for(i=0;i<out->size;i++)
    {
      for(j=0;j<out->dim;j++)
        printf("%f ",out->values[ i * out->dim + j ]);
      printf("\n");
    }

  /* free memory */
  free_image_double(image);
  free_ntuple_list(out);

  return 0;
}

if anyone can help me to apply the code on video i will be pleased.thanks best regards,

Ilmari Karonen
  • 49,047
  • 9
  • 93
  • 153
Eslam Hamdy
  • 7,126
  • 27
  • 105
  • 165
  • This is a fairly broad question. What exactly are you having problems with - acquiring video from a camera? Getting video data from a saved video file? Streaming video data from the network? Do you already have the frame data? – bdonlan Jun 19 '11 at 02:40
  • Right now you are loading an image with `cvLoadImage()` into `imgInTmp`, but you are not doing anything with it in the code you are showing us. – karlphillip Jun 19 '11 at 03:14
  • Check this question for doing image processing on camera frames: http://stackoverflow.com/q/3907028/176769 – karlphillip Jun 19 '11 at 03:15
  • @ bdonlan , Karlphilip,thank you very much,recently, i known how to do that (i.e acquiring video from a camera) and sorry for being somewhat mysterious in my question . best regards, – Eslam Hamdy Jun 20 '11 at 02:34

2 Answers2

4

Since I couldn't find a complete example, I'm sharing a code I wrote that uses OpenCV to load a video file from the disk and perform some image processing on it.

The application takes a filename as input (on the cmd line) and converts each frame of the video to it's grayscale equivalent using OpenCV built-in function cvCvtColor() to do this.

I added some comments on the code to help you understand the basic tasks.

read_video.cpp:

#include <stdio.h>
#include <highgui.h>
#include <cv.h>

int main(int argc, char* argv[])
{
    cvNamedWindow("video", CV_WINDOW_AUTOSIZE);

    CvCapture *capture = cvCaptureFromAVI(argv[1]);
    if(!capture)
    {
        printf("!!! cvCaptureFromAVI failed (file not found?)\n");
        return -1;
    }

    IplImage* frame;
    char key = 0;
    while (key != 'q') // Loop for querying video frames. Pressing Q will quit
    {
        frame = cvQueryFrame( capture );
        if( !frame )
        {
            printf("!!! cvQueryFrame failed\n");
            break;
        }

        /* Let's do a grayscale conversion just 4 fun */

        // A grayscale image has only one channel, and most probably the original
        // video works with 3 channels (RGB). So, for the conversion to work, we
        // need to allocate an image with only 1 channel to store the result of 
        // this operation.
        IplImage* gray_frame = 0;
        gray_frame = cvCreateImage(cvSize(frame->width, frame->height), frame->depth, 1);
        if (!gray_frame)
        {
            printf("!!! cvCreateImage failed!\n" );
            return -1;
        }

        cvCvtColor(frame, gray_frame, CV_RGB2GRAY); // The conversion itself

        // Display processed frame on window
        cvShowImage("video", gray_frame);

        // Release allocated resources
        cvReleaseImage(&gray_frame);

        key = cvWaitKey(33);
    }

    cvReleaseCapture(&capture);
    cvDestroyWindow("video");
}

Compiled with:

g++ read_video.cpp -o read `pkg-config --cflags --libs opencv`

If you want to know how to iterate through the pixels of the frame to do your custom processing, you need to check the following answer because it shows how to do a manual grayscale conversion. There you go: OpenCV cvSet2d.....what does this do

Community
  • 1
  • 1
karlphillip
  • 92,053
  • 36
  • 243
  • 426
  • @Eslam you don't need to accept my answer if it didn't solved your problem. I feel like this answer didn't solved it. You can undo the *accepted* answer thing by clicking on the checkbox again. Nevertheless, if my answer helped you, you might consider voting up. Now that you have 15 reputation points I believe you can vote on questions/answers. – karlphillip Jul 05 '11 at 01:27
1

here is example of the code using LSD with opencv

#include "lsd.h"

void Test_LSD(IplImage* img)
{
    IplImage* grey = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
    cvCvtColor(img, grey, CV_BGR2GRAY);
    image_double image;
    ntuple_list out;
    unsigned int x,y,i,j;
    image = new_image_double(img->width,img->height);
    for(x=0;x<grey->width;x++)
    for(y=0;y<grey->height;y++)
    {
      CvScalar s= cvGet2D(grey,y,x);
      double pix= s.val[0];
      image->data[ x + y * image->xsize ]= pix; /* image(x,y) */
    }

    /* call LSD */
    out = lsd(image);
    //out= lsd_scale(image,1);

    /* print output */
    printf("%u line segments found:\n",out->size);
    vector<Line> vec;
    for(i=0;i<out->size;i++)
    {
      //for(j=0;j<out->dim;j++)
      {
        //printf("%f ",out->values[ i * out->dim + j ]);
          Line line;
          line.x1= out->values[ i * out->dim + 0];
          line.y1= out->values[ i * out->dim + 1];
          line.x2= out->values[ i * out->dim + 2];
          line.y2= out->values[ i * out->dim + 3];
          vec.push_back(line);
      }
      //printf("\n");
    }

    IplImage* black= cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 3);
    cvZero(black);
    draw_lines(vec,black);
    /*cvNamedWindow("img", 0);
    cvShowImage("img", img);*/
    cvSaveImage("lines_detect.png",black/*img*/);
    /* free memory */
    free_image_double(image);
    free_ntuple_list(out);
}

or this way

IplImage* get_lines(IplImage* img,vector<Line>& vec_lines)
{
    //to grey
    //IplImage* grey = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
    //cvCvtColor(img, grey, CV_BGR2GRAY);

    image_double image;
    ntuple_list out;
    unsigned int x,y,i,j;
    image = new_image_double(img->width,img->height);
    for(x=0;x</*grey*/img->width;x++)
    for(y=0;y</*grey*/img->height;y++)
    {
      CvScalar s= cvGet2D(/*grey*/img,y,x);
      double pix= s.val[0];
      image->data[ x + y * image->xsize ]= pix;
    }

    /* call LSD */
    out = lsd(image);
    //out= lsd_scale(image,1);

    /* print output */
    //printf("%u line segments found:\n",out->size);
    //vector<Line> vec;
    for(i=0;i<out->size;i++)
    {
      //for(j=0;j<out->dim;j++)
      {
        //printf("%f ",out->values[ i * out->dim + j ]);
          Line line;
          line.x1= out->values[ i * out->dim + 0];
          line.y1= out->values[ i * out->dim + 1];
          line.x2= out->values[ i * out->dim + 2];
          line.y2= out->values[ i * out->dim + 3];
          /*vec*/vec_lines.push_back(line);
      }
      //printf("\n");
    }

    IplImage* black= cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
    cvZero(black);
    for(int i=0;i<vec_lines.size();++i)
    {
        //if(vec[i].x1==vec[i].x2||vec[i].y1==vec[i].y2)
        cvLine(black,cvPoint(vec_lines[i].x1,vec_lines[i].y1),cvPoint(vec_lines[i].x2,vec_lines[i].y2),CV_RGB(255,255,255),1, CV_AA);
    }
    /*cvNamedWindow("img", 0);
    cvShowImage("img", img);*/
    //cvSaveImage("lines_detect.png",black/*img*/);
    /* free memory */
    //cvReleaseImage(&grey);
    free_image_double(image);
    free_ntuple_list(out);

    return black;
}
mrgloom
  • 20,061
  • 36
  • 171
  • 301