0

i need some help with a project. Its about detecting red object in a determined green area. I have to dodge objects and reach the goal (in this case a blue area), also back to collect the objects with a servomotor and a clamp, all using a camera with OpenCv and the Irobot.

Well the code that i have until now can recognize the red objects, and move to them and stop when they are close. Also can rotate to left and right,i did it this way to try to center the object in the camera, dividing the screen into 3 parts, then I Pick it with the servo. But im getting out of ideas, when i run de python code, the process is very slow and and the detecting the of left and right side is very sensitive, i cant focus the object on the center. Also i dont have idea how to get the distance between the object and the irobot, in theory I can and dodge objects, but dont know how to get to the goal, or how to get back to them.

This is the python code:

    import cv2
    import numpy as np
    import serial
    from time import sleep

    def serialCom(int):
        ser =  serial.Serial(port = "/dev/ttyACM0", baudrate=9600)
        x = ser.readline()
        ser.write(int)
        x = ser.readline()
        print 'Sending: ', x

    # Recognizes how much red is in a given area by the parameters
    def areaRed(img, xI, xF, yI, yF): 

        # counter red pixels
        c = 0
        red = 255 

        for i in range(xI, xF):
            for j in range(yI, yF):
                if img[i][j] == red:
                    c += 1

        return c


    def reconoce(lower_re, upper_re, lower_gree, upper_gree, lower_ble, upper_ble):
        cap = cv2.VideoCapture(1)

        while(1):
            _, frame = cap.read()

            # Converting BGR to HSV
            hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            hsv2 = cv2.cvtColor(frame , cv2.COLOR_BGR2HSV)
            hsv3 = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

            # Threshold the HSV image to get only red,blue and green colors
            mask = cv2.inRange(hsv, lower_re, upper_re)
            maskBlue = cv2.inRange(hsv2, lower_ble, upper_ble)
            maskGreen = cv2.inRange(hsv3, lower_gree, upper_gree)

            moments = cv2.moments(mask)
            area = moments['m00']
            blueMoment = cv2.moments(maskBlue)
            blueArea = blueMoment['m00']
            greenMoment = cv2.moments(maskGreen)
            greenArea = greenMoment['m00']

            # Determine the limits of the mask
            x = mask.shape[0] - 1
            y = mask.shape[1] - 1

            # Determine the center point of the image
            xC = x / 2
            yC = y / 2

            x3 = (x/3)/2
            y3 = y/2

            # Define the variables for the center values ​​of the camera
            xI, xF, yI, yF = xC - (xC / 4), xC + (xC / 4), yC - (yC / 4), yC + (yC / 4)

            #  define the ranges of the mask for the left and right side
            right = areaRed(mask, xI + (x/4), xF + (x/4), yI + (x/4), yF + (x/4))
            left = areaRed(mask, xI - (x/4), xF - (x/4), yI - (x/4), yF - (x/4))

            centro = areaRed(mask, xI, xF, yI, yF)

            if right > 700:
                serialCom ("4")
                print "turning right"
                return mask
            if left > 700:
                serialCom("5")
                print "turning left"
                return mask

            #if there is a red objet
            if centro > 5000: 
                print 'i detect red'
                #and in on the a green area
                if greenArea > 10000000:
                    #we stop the irbot
                    serialCom("1") 
                    print 'I found red and is in the area'
                    cv2.destroyAllWindows()
                    return mask
                else:
                    #then we continue moving
                    serialCom("3")
                    print ''
            else:
                print "there is not red objet:v"


            cv2.imshow('frame',frame)
            cv2.imshow('red',mask)
            k = cv2.waitKey(5) & 0xFF
            if k == 27:
                break

        cv2.destroyAllWindows()


    # The range of colors are defined for the HSV
    lower_red = np.array([170,150,50])
    upper_red = np.array([179,255,255])
    lower_green = np.array([85,80,150])
    upper_green = np.array([95,255,255])
    lower_blue = np.array([100,100,100])
    upper_blue = np.array([120,255,255])


    while True:
        img = reconoce(lower_red, upper_red, lower_green, upper_green, lower_blue, upper_blue)
        cv2.imshow('Freeze image', img)
        cv2.waitKey(10000)
        cv2.destroyAllWindows()

This is the arduino code conecting the irobot with the opencv

#include <Roomba.h>

Roomba roomba(&Serial1);
int p=0;

void init()
{
  roomba.driveDirect(-100,100);
  roomba.driveDirect(-100,100);
  delay(100);
  roomba.driveDirect(100,-100);
  delay(100);
}

void move()
{
  roomba.driveDirect(50,50);
  roomba.driveDirect(50,50);  
}

void reverse()
{
  roomba.driveDirect(-50,-50);
}

void left_rotation()
{
  roomba.driveDirect(-30,30);
  delay(1000);
}

void right_rotation()
{
  roomba.driveDirect(30,-30);
  delay(1000);
}

void stop()
{
  roomba.driveDirect(0,0);
  delay(500);
  Serial.println("9");
}


void setup()
{

  Serial.begin(9600);
  roomba.start();
  roomba.safeMode();

}

void loop()
{

  if(Serial.available()>0)
  {

    p=Serial.read();

    if(p == 51)
    {
      Serial.println("1");
      move();   
    }

    if(p==50)
    {
      Serial.println("2");
      reverse();
      delay(1000);
    }

    if(p==52)
    {
      Serial.println("4");
      left_rotation();
      delay(1000);
      stop();
    }

    if(p==53)
    {
      Serial.println("5");
      right_rotation();
      delay(1000);
      stop();
    }

    if(p==49)
    {
      Serial.println("3");
      stop();
      delay(1000);
    }
  }
  delay(100);
}
Ellebkey
  • 2,201
  • 3
  • 22
  • 31

2 Answers2

0

Yes your code is pretty slow because you are iterating the the image matrix manually with your own naive source code. I would recommend to read a little bit more about object tracking and the theoretical problems and then about existing practical solutions. That might be better that trying to to program with openCV on the low level. For your specific problem I suggest, checking cvBlob

http://code.google.com/p/cvblob/

which is also about tracking of red objects (even with a cool demo video).

If the objects are not just a plain colour but textured, I suggest this part of the openCV doc

http://docs.opencv.org/doc/tutorials/features2d/feature_flann_matcher/feature_flann_matcher.html

which explains tracking by Feature Extraction, Decriptor Generation and FLANN matching.

Kenyakorn Ketsombut
  • 2,072
  • 2
  • 26
  • 43
  • the cvblob is only with c or can be use with python? I checked the link and i dont find any examples of how to use it. Do you have any documenttion or link where i can see more information. – Ellebkey Oct 08 '14 at 15:32
  • There is a python connector here: https://github.com/oostendo/cvblob-python and there is an python example here: http://code.google.com/p/cvblob/source/browse/interfaces/swig/python/test/test.py?name=0.10.4_pythonswig&r=36416e5dcd0a01fa0baeb88f158dcf2654b80637 – Kenyakorn Ketsombut Oct 09 '14 at 08:48
0

I guess "so sensitive" means your robot keeps jittering because your detection is so slow to follow the object's actual position.

First of all, you need to make sure you can detect your target object at a certain framerate, say 25FPS. This means your detection and motor action must response in less than 40ms. You can test your algorithm whether can make one move in 40ms. If not, here is some suggestions.

Your following code is not efficient. They traverse through an image for roughly 12 times, costing too much time. To prevent this, you can make the traverses to be 2 times (or only one time).

        # Converting BGR to HSV
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        hsv2 = cv2.cvtColor(frame , cv2.COLOR_BGR2HSV)
        hsv3 = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

        # Threshold the HSV image to get only red,blue and green colors
        mask = cv2.inRange(hsv, lower_re, upper_re)
        maskBlue = cv2.inRange(hsv2, lower_ble, upper_ble)
        maskGreen = cv2.inRange(hsv3, lower_gree, upper_gree)

        moments = cv2.moments(mask)
        area = moments['m00']
        blueMoment = cv2.moments(maskBlue)
        blueArea = blueMoment['m00']
        greenMoment = cv2.moments(maskGreen)
        greenArea = greenMoment['m00']

        ...

        #  define the ranges of the mask for the left and right side
        right = areaRed(mask, xI + (x/4), xF + (x/4), yI + (x/4), yF + (x/4))
        left = areaRed(mask, xI - (x/4), xF - (x/4), yI - (x/4), yF - (x/4))

        centro = areaRed(mask, xI, xF, yI, yF)

I'm sorry that I can only provide C++ code for now with some existing code.

Mat hsv, RedMask, BlueMask, GreenMask ;
RedMask = Mat::zeros(src.rows, src.cols, CV_8UC1) ;
RedMask.copyTo(BlueMask);
RedMask.copyTo(GreenMask);
//filtering each chanel
cvtColor (src, hsv, CV_BGR2HSV) ;
const uchar* pix ;
uchar * pixoutr ;
uchar * pixoutg ;
uchar * pixoutb ;

long int redarea = 0 ;
long int greenarea = 0 ;
long int bluearea = 0 ;

for (int i = 0 ; i < hsv.rows ; i++)
{
    pix = hsv.ptr<uchar>(i) ;
    pixoutr = RedMask.ptr<uchar>(i) ;
    pixoutg = GreenMask.ptr<uchar>(i) ;
    pixoutb = BlueMask.ptr<uchar>(i) ;

    for (int j=0 ; j < hsv.cols * hsv.channels() ; j+=hsv.channels())
    {
        if (pix[j] >= lower_blue || pix[j] <= higer_blue)
        {
            pixoutb[(int)j/hsv.channels()] = 255 ;
            bluearea+=1 ;
        }
        if (pix[j+1] >= lower_gree || pix[j+1] <= higer_gree)
        {
            pixoutg[(int)j/hsv.channels()] = 255 ;
            greenarea+=1 ;
        }
        if (pix[j+2] >= lower_re || pix[j+2] <= higer_re)
        {
            pixoutr[(int)j/hsv.channels()] = 255 ;
            bluearea+=1;
        }

    }
}

So you get all channels mask and the area with only one traverse. It can be even faster if applying OpenMP or python multi-threading techniques.

Also your tracking algorithm has lot of issue, please have a look on image correlation/blob tracking etc. For your HU moment usage, the direction is right, since it is fast enough. Feature detection in real-time is not very practical for you now. And last, please note your should arrange your OpenCV detection algorithm happens in parallel with your adrino action code, you may use simple threading skill for that, so you can utilize the computation time while your servo motor is running.

Supplement:

http://openmp.org/wp/resources/ for OpenMP, http://opencv.jp/?s=OpenMP an example of OpenCV with OpenMP, opencv python Multi Threading Video Capture a python threading technique with OpenCV, the other answer gave the blob tracking example already. For image correlation template matching method, check http://docs.opencv.org/doc/tutorials/imgproc/histograms/template_matching/template_matching.html .

Community
  • 1
  • 1
tomriddle_1234
  • 3,145
  • 6
  • 41
  • 71