3

I use a color tracking code for processing.

What I want (example):

  • If red is detected show image 1
  • If green is detected show image 2
  • If blue is detected show image 3

The problem is, if the last color is detected and the last image is shown, and I track now the first color the first image is not in front (I can't see it).

The whole code:

 import processing.video.*;
//import hypermedia.net.*;


PImage img;
PImage img2;
PImage img3;

Capture video; 
final int TOLERANCE = 20;

float XRc = 0;// XY coordinate of the center of the first target
float YRc = 0;
float XRh = 0;// XY coordinate of the center of the second target
float YRh = 0;
float XRc2 = 0; // XY coordinate of the center of the third target
float YRc2 = 0;
float XRh2 = 0;// XY coordinate of the center of the fourth target
float YRh2 = 0;

int ii=0; //Mouse click counter

color trackColor; //The first color is the center of the robot 
color trackColor2; //The second color is the head of the robot
color trackColor3; //The first color is the center of the robot 2
color trackColor4; //The first color is the center of the robot 2

void setup() {
img = loadImage("IMG_4700.JPG");
img2 = loadImage("2.JPG");
img3 = loadImage("3.JPG");
size(800,800);
video = new Capture(this,640,480);
video.start();

trackColor = color(94,164,126);
trackColor2 = color(60,110,194);
trackColor3 = color(197, 76,64);
trackColor4 = color(255,0,0);
smooth();
}

void draw() {
background(0);
if (video.available()) {
    video.read();
}

video.loadPixels();
image(video,0,0);

  float r2 = red(trackColor);
  float g2 = green(trackColor);
  float b2 = blue(trackColor);

  float r3 = red(trackColor2);
  float g3 = green(trackColor2);
  float b3 = blue(trackColor2);

  float r4 = red(trackColor3);
  float g4 = green(trackColor3);
  float b4 = blue(trackColor3);

  float r5 = red(trackColor4);
  float g5 = green(trackColor4);
  float b5 = blue(trackColor4);


  int somme_x = 0, somme_y = 0; // pour le calcul des baricentres
  int compteur = 0;

  int somme_x2 = 0, somme_y2 = 0; // pour le calcul des baricentres
  int compteur2 = 0;

  int somme_x3 = 0, somme_y3 = 0; // pour le calcul des baricentres
  int compteur3 = 0;

  int somme_x4 = 0, somme_y4 = 0; // pour le calcul des baricentres
  int compteur4 = 0;


  for(int x = 0; x < video.width; x++) {
    for(int y = 0; y < video.height; y++) {

      int currentLoc = x + y*video.width;
      color currentColor = video.pixels[currentLoc];

      float r1 = red(currentColor);
      float g1 = green(currentColor);
      float b1 = blue(currentColor);


      if(dist(r1,g1,b1,r2,g2,b2) < TOLERANCE) {
         somme_x += x;
         somme_y += y;
        compteur++;
      }

      else if(compteur > 0) { 
        XRc = somme_x / compteur;
        YRc = somme_y / compteur;
      }


      if(dist(r1,g1,b1,r3,g3,b3) < TOLERANCE) {
         somme_x2 += x;
         somme_y2 += y;
        compteur2++;
      }

      else if(compteur2 > 0) { 
        XRh = somme_x2 / compteur2;
        YRh = somme_y2 / compteur2;
      }

      if(dist(r1,g1,b1,r4,g4,b4) < TOLERANCE) {
         somme_x3 += x;
         somme_y3 += y;
        compteur3++;
      }

      else if(compteur3 > 0) { 
        XRc2 = somme_x3 / compteur3;
        YRc2 = somme_y3 / compteur3;
      }

      if(dist(r1,g1,b1,r5,g5,b5) < TOLERANCE) {
         somme_x4 += x;
         somme_y4 += y;
        compteur4++;
      }

      else if(compteur4 > 0) { 
        XRh2 = somme_x4 / compteur4;
        YRh2 = somme_y4 / compteur4;
      }

  }
  }


// track the color and show images
boolean c1 = false;
boolean c2 = false;
boolean c3 = false;


  if(XRc != 0 || YRc != 0) { // color Green detected
    c1 = true;
    c2 = false;
    c3 = false;
   } 


   if(XRh != 0 || YRh != 0) { // color blue detected
    c2 = true;
    c1 = false;
    c3 = false;
   }

    if(XRc2 != 0 || YRc2 != 0) { // color red detected
      c3 = true;
      c1 = false;
      c2 = false;
    }


     if(c1 == true) {
       image(img,0,0); // show image 1
      } else if (c2 == true) {
       image(img2,0,0); // show image 2
     } else if (c3 == true) {
       image(img3,0,0); // show image 3
     }

}

The important snippet:

// detect color and show images
boolean c1 = false;
boolean c2 = false;
boolean c3 = false;


  if(XRc != 0 || YRc != 0) { // color Green detected
    c1 = true;
    c2 = false;
    c3 = false;
   } 


   if(XRh != 0 || YRh != 0) { // color blue detected
    c2 = true;
    c1 = false;
    c3 = false;
   }

    if(XRc2 != 0 || YRc2 != 0) { // color red detected
      c3 = true;
      c1 = false;
      c2 = false;
    }


     if(c1 == true) {
       image(img,0,0); // show image 1
      } else if (c2 == true) {
       image(img2,0,0); // show image 2
     } else if (c3 == true) {
       image(img3,0,0); // show image 3
     }

Screenshots:

First object is tracked and image is shown first object is tracked and image is shown

Second object is tracked and image is shown second object is tracked and image is shown

Third object is tracked and image is shown third object is tracked and image is shown

My Problem: (the first object should be tracked and the first image should me shown) the first object should be tracked and the first image should me shown

Kevin Workman
  • 41,537
  • 9
  • 68
  • 107
fab
  • 247
  • 1
  • 2
  • 7
  • Can you please post some screenshots to show exactly what you're seeing? – Kevin Workman Jun 10 '16 at 23:07
  • hey @KevinWorkman i´ve updated my post with images from the problem – fab Jun 10 '16 at 23:36
  • If I were you, I'd add a bunch of `println()` statements to figure out exactly which if statements are being entered, and the values of all of your variables. That'll tell you what your code is doing, then we can work backwards to why it's doing it. – Kevin Workman Jun 11 '16 at 12:27

2 Answers2

3

There are few things that could be improved. In terms of efficiency, a couple of minor suggestions:

  1. you could pre-compute the RGB components of the colours you want to track once in setup() rather than many times per second in draw() (e.g.float r2 = red(trackColor); etc.)
  2. You could use a flat 1D loop rather than a nested loop using video.pixels[]. One minor disadvantage is that you'd need to compute the x,y position from the pixel index. Since you need to display an image and it doesn't seem to matter where, you might not even need to compute x,y.(e.g. for(int currentLoc = 0; currentLoc < video.pixels.length; currentLoc++))

In terms of the algorithm itself:

  1. You are using a single threshold value(TOLERANCE). This will cut out anything on the left of the value which is ok, but not cut a whole range of other colours that might mess with your counter. I recommend using a range (e.g. MIN_TOLERANCE,MAX_TOLERANCE).
  2. You are using R,G,B colour space. R,G,B colours don't mix together as we'd expect. A more perceptual colour space will behave as you'd expect (e.g. orange will be closer to red and yellow). For that you would need to convert from RGB to CIE XYZ, then to Lab*, compute the euclidean distance between two colours, then convert the result back to RGB if you need to display it.You can find an example here. It's in OpenFrameworks (c++), but you should be able to see the similarities to Processing and port it. There is another option: HSB colour space. More on that bellow
  3. You could draw some visualisation of how your code is segmenting the image. It will be faster to get an idea of which values work better, which don't

I recommend trying the OpenCV for Processing library which uses a more modern OpenCV library under the hood and provides more functionalities and great examples. One of them is particularly useful for you: HueRangeSelection

HueRangeSelection

Give it a go. Notice you can move the mouse to shift the range and if you hold a key pressed you can increase the range. For example, here's how the quick demo of it with your images. (The HSB range threshold result is displayed smaller in the bottom right corner):

red range

green range

blue range

From my own experience, I'd recommend not using shiny/reflective materials (e.g. the coke can). You can see in the image above the segmentation isn't as good as on the green and blue objects with flatter colours. Because the can is reflective, it will appear to have different colours not only with global lighting changes, but also it's position/rotation and objects close to it. It's a pain to cater to all these.

Also, to take the HueRange example further you can:

  1. Apply a morphologic filter (for example erode(), then dilate()) to remove some of the noise (smaller white pixel patches). At this point you can count the number of white pixels per colour range and decide which image to display.
  2. find contours on the filtered image which will can use to determine the x,y,width,height of the region that falls within a range of the colour you want to track.

Good luck and most importantly have fun!

Community
  • 1
  • 1
George Profenza
  • 50,687
  • 19
  • 144
  • 218
2

Hummm... Without running the code, I'd bet the problem is that you rely on the coordinates (XRc and it's siblings) being zero to choose which image to use. They are all initiated to 0, so the first run goes fine, but... you never reset them to zero, do you? So after they all have being changed once by detecting the 3 colors your test became useless. Perhaps you can reset them all to zero whenever a color is detected.

And maybe you don't need the boolean at all...

What do think of this?

PSEUDO

//global
PImage imgs = new PImage[3];

int imageToDispaly = 0;




//all the stuff...





 if(XRc != 0 || YRc != 0) { // color Green detected
  // not sure this will work, but the idea is some thing like this.
  XRh = YRh = XRc2 = YRc2 = 0;
  imageToDispaly = 0;
 } 

 if(XRh != 0 || YRh != 0) { // color blue detected
  XRc = YRc = XRc2 = YRc2 = 0;
  imageToDispaly = 1;
 }

 if(XRc2 != 0 || YRc2 != 0) { // color red detected
  XRh = YRh = XRc = YRc = 0;
  imageToDispaly = 2;
 }

// at appropriated time...
image(imgs[imageToDispaly], x, y);
v.k.
  • 2,826
  • 2
  • 20
  • 29