2

@robot_sherrick answered me this question, this is a follow-up question for his answer.

cv::SimpleBlobDetector in Opencv 2.4 looks very exciting but I am not sure I can make it work for more detailed data extraction.

I have the following concerns:

  • if this only returns center of the blob, I can't have an entire, labelled Mat, can I?
  • how can I access the features of the detected blobs like area, convexity, color and so on?
  • can I display an exact segmentation with this? (like with say, waterfall)
Community
  • 1
  • 1
Barney Szabolcs
  • 11,846
  • 12
  • 66
  • 91

3 Answers3

11

So the code should look something like this:

cv::Mat inputImg = imread(image_file_name, CV_LOAD_IMAGE_COLOR);   // Read a file
cv::SimpleBlobDetector::Params params; 
params.minDistBetweenBlobs = 10.0;  // minimum 10 pixels between blobs
params.filterByArea = true;         // filter my blobs by area of blob
params.minArea = 20.0;              // min 20 pixels squared
params.maxArea = 500.0;             // max 500 pixels squared
SimpleBlobDetector myBlobDetector(params);
std::vector<cv::KeyPoint> myBlobs;
myBlobDetector.detect(inputImg, myBlobs);

If you then want to have these keypoints highlighted on your image:

cv::Mat blobImg;    
cv::drawKeypoints(inputImg, myBlobs, blobImg);
cv::imshow("Blobs", blobImg);

To access the info in the keypoints, you then just access each element like so:

for(std::vector<cv::KeyPoint>::iterator blobIterator = myBlobs.begin(); blobIterator != myBlobs.end(); blobIterator++){
   std::cout << "size of blob is: " << blobIterator->size << std::endl;
   std::cout << "point is at: " << blobIterator->pt.x << " " << blobIterator->pt.y << std::endl;
} 

Note: this has not been compiled and may have typos.

Community
  • 1
  • 1
thealmightygrant
  • 701
  • 1
  • 5
  • 11
  • so you are saying that there's no chance that I could extract more blob info than that? (I mean, from OOP, a class that stores these blob info and provides an iterator, like an std::map, would make more sense, and would be more flexible...) – Barney Szabolcs Nov 23 '12 at 19:45
  • You could certainly store the keypoints in association with a name using a std::map, but the inbuilt functions for OpenCV only recognize a std::vector. You would have to somehow recognize each blob as belonging to a specific name like based on location, size, color, etc. To do this, you could have a different filter for each expected type of blob. – thealmightygrant Nov 23 '12 at 19:50
  • ok, let's go back to the original question. Are you saying that you don't think there's a chance I could extract more blob info than the centre and the size? – Barney Szabolcs Nov 23 '12 at 20:00
  • That's just the information provided by the simple blob detector. You can certainly extract color or area information yourself. Color information can be extracted and averaged using your keypoint location and size and the `cv::Mat` `at` function with an iterator. Though, rereading your question, it sounds like you want to [filter your image](http://opencv.willowgarage.com/documentation/cpp/image_filtering.html#cv-filter2d) and analyze the results. – thealmightygrant Nov 23 '12 at 20:31
5

Here is a version that will allow you to get the last contours back, via the getContours() method. They will match up by index to the keypoints.

class BetterBlobDetector : public cv::SimpleBlobDetector
{
public:

    BetterBlobDetector(const cv::SimpleBlobDetector::Params &parameters = cv::SimpleBlobDetector::Params());

    const std::vector < std::vector<cv::Point> > getContours();

protected:
    virtual void detectImpl( const cv::Mat& image, std::vector<cv::KeyPoint>& keypoints, const cv::Mat& mask=cv::Mat()) const;
    virtual void findBlobs(const cv::Mat &image, const cv::Mat &binaryImage,
                           std::vector<Center> &centers, std::vector < std::vector<cv::Point> >&contours) const;

};

Then cpp

using namespace cv;

BetterBlobDetector::BetterBlobDetector(const SimpleBlobDetector::Params &parameters)
{

}

void BetterBlobDetector::findBlobs(const cv::Mat &image, const cv::Mat &binaryImage,
                                   vector<Center> &centers, std::vector < std::vector<cv::Point> >&curContours) const
{
    (void)image;
    centers.clear();

    curContours.clear();

    std::vector < std::vector<cv::Point> >contours;
    Mat tmpBinaryImage = binaryImage.clone();
    findContours(tmpBinaryImage, contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);


    for (size_t contourIdx = 0; contourIdx < contours.size(); contourIdx++)
    {
        Center center;
        center.confidence = 1;
        Moments moms = moments(Mat(contours[contourIdx]));
        if (params.filterByArea)
        {
            double area = moms.m00;
            if (area < params.minArea || area >= params.maxArea)
                continue;
        }

        if (params.filterByCircularity)
        {
            double area = moms.m00;
            double perimeter = arcLength(Mat(contours[contourIdx]), true);
            double ratio = 4 * CV_PI * area / (perimeter * perimeter);
            if (ratio < params.minCircularity || ratio >= params.maxCircularity)
                continue;
        }

        if (params.filterByInertia)
        {
            double denominator = sqrt(pow(2 * moms.mu11, 2) + pow(moms.mu20 - moms.mu02, 2));
            const double eps = 1e-2;
            double ratio;
            if (denominator > eps)
            {
                double cosmin = (moms.mu20 - moms.mu02) / denominator;
                double sinmin = 2 * moms.mu11 / denominator;
                double cosmax = -cosmin;
                double sinmax = -sinmin;

                double imin = 0.5 * (moms.mu20 + moms.mu02) - 0.5 * (moms.mu20 - moms.mu02) * cosmin - moms.mu11 * sinmin;
                double imax = 0.5 * (moms.mu20 + moms.mu02) - 0.5 * (moms.mu20 - moms.mu02) * cosmax - moms.mu11 * sinmax;
                ratio = imin / imax;
            }
            else
            {
                ratio = 1;
            }

            if (ratio < params.minInertiaRatio || ratio >= params.maxInertiaRatio)
                continue;

            center.confidence = ratio * ratio;
        }

        if (params.filterByConvexity)
        {
            vector < Point > hull;
            convexHull(Mat(contours[contourIdx]), hull);
            double area = contourArea(Mat(contours[contourIdx]));
            double hullArea = contourArea(Mat(hull));
            double ratio = area / hullArea;
            if (ratio < params.minConvexity || ratio >= params.maxConvexity)
                continue;
        }

        center.location = Point2d(moms.m10 / moms.m00, moms.m01 / moms.m00);

        if (params.filterByColor)
        {
            if (binaryImage.at<uchar> (cvRound(center.location.y), cvRound(center.location.x)) != params.blobColor)
                continue;
        }

        //compute blob radius
        {
            vector<double> dists;
            for (size_t pointIdx = 0; pointIdx < contours[contourIdx].size(); pointIdx++)
            {
                Point2d pt = contours[contourIdx][pointIdx];
                dists.push_back(norm(center.location - pt));
            }
            std::sort(dists.begin(), dists.end());
            center.radius = (dists[(dists.size() - 1) / 2] + dists[dists.size() / 2]) / 2.;
        }

        centers.push_back(center);
        curContours.push_back(contours[contourIdx]);    
}

static std::vector < std::vector<cv::Point> > _contours;

const std::vector < std::vector<cv::Point> > BetterBlobDetector::getContours() {
    return _contours;
}

void BetterBlobDetector::detectImpl(const cv::Mat& image, std::vector<cv::KeyPoint>& keypoints, const cv::Mat&) const
{
    //TODO: support mask
     _contours.clear();

    keypoints.clear();
    Mat grayscaleImage;
    if (image.channels() == 3)
        cvtColor(image, grayscaleImage, CV_BGR2GRAY);
    else
        grayscaleImage = image;

    vector < vector<Center> > centers;
    vector < vector<cv::Point> >contours;
    for (double thresh = params.minThreshold; thresh < params.maxThreshold; thresh += params.thresholdStep)
    {
        Mat binarizedImage;
        threshold(grayscaleImage, binarizedImage, thresh, 255, THRESH_BINARY);

        vector < Center > curCenters;
        vector < vector<cv::Point> >curContours, newContours;
        findBlobs(grayscaleImage, binarizedImage, curCenters, curContours);
        vector < vector<Center> > newCenters;
        for (size_t i = 0; i < curCenters.size(); i++)
        {

            bool isNew = true;
            for (size_t j = 0; j < centers.size(); j++)
            {
                double dist = norm(centers[j][ centers[j].size() / 2 ].location - curCenters[i].location);
                isNew = dist >= params.minDistBetweenBlobs && dist >= centers[j][ centers[j].size() / 2 ].radius && dist >= curCenters[i].radius;
                if (!isNew)
                {
                    centers[j].push_back(curCenters[i]);

                    size_t k = centers[j].size() - 1;
                    while( k > 0 && centers[j][k].radius < centers[j][k-1].radius )
                    {
                        centers[j][k] = centers[j][k-1];
                        k--;
                    }
                    centers[j][k] = curCenters[i];

                    break;
                }
            }
            if (isNew)
            {
                newCenters.push_back(vector<Center> (1, curCenters[i]));
                newContours.push_back(curContours[i]);
                //centers.push_back(vector<Center> (1, curCenters[i]));
            }
        }
        std::copy(newCenters.begin(), newCenters.end(), std::back_inserter(centers));
        std::copy(newContours.begin(), newContours.end(), std::back_inserter(contours));
    }

    for (size_t i = 0; i < centers.size(); i++)
    {
        if (centers[i].size() < params.minRepeatability)
            continue;
        Point2d sumPoint(0, 0);
        double normalizer = 0;
        for (size_t j = 0; j < centers[i].size(); j++)
        {
            sumPoint += centers[i][j].confidence * centers[i][j].location;
            normalizer += centers[i][j].confidence;
        }
        sumPoint *= (1. / normalizer);
        KeyPoint kpt(sumPoint, (float)(centers[i][centers[i].size() / 2].radius));
        keypoints.push_back(kpt);
        _contours.push_back(contours[i]);
    }
}
Joel Teply
  • 3,260
  • 1
  • 31
  • 21
0
//Access SimpleBlobDetector datas for video

#include "opencv2/imgproc/imgproc.hpp" // 
#include "opencv2/highgui/highgui.hpp"

    #include <iostream>
    #include <math.h>
    #include <vector>
    #include <fstream>
    #include <string>
    #include <sstream>
    #include <algorithm>

#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/features2d/features2d.hpp"


using namespace cv;
using namespace std;


int main(int argc, char *argv[])
{


    const char* fileName ="C:/Users/DAGLI/Desktop/videos/new/m3.avi";  
    VideoCapture cap(fileName); //
    if(!cap.isOpened()) //
    {
        cout << "Couldn't open Video  " << fileName << "\n"; 
        return -1; 
    }
    for(;;)  // videonun frameleri icin sonsuz dongu
    {
        Mat frame,labelImg; 
        cap >> frame; 
        if(frame.empty()) break;  
        //imshow("main",frame);  

        Mat frame_gray;
        cvtColor(frame,frame_gray,CV_RGB2GRAY);


        //////////////////////////////////////////////////////////////////////////
        // convert binary_image
        Mat binaryx;
        threshold(frame_gray,binaryx,120,255,CV_THRESH_BINARY);


        Mat src, gray, thresh, binary;
        Mat out;
        vector<KeyPoint> keyPoints;

        SimpleBlobDetector::Params params;
        params.minThreshold = 120;
        params.maxThreshold = 255;
        params.thresholdStep = 100;

        params.minArea = 20; 
        params.minConvexity = 0.3;
        params.minInertiaRatio = 0.01;

        params.maxArea = 1000;
        params.maxConvexity = 10;

        params.filterByColor = false;
        params.filterByCircularity = false;



        src = binaryx.clone();

        SimpleBlobDetector blobDetector( params );
        blobDetector.create("SimpleBlob");



        blobDetector.detect( src, keyPoints );
        drawKeypoints( src, keyPoints, out, CV_RGB(255,0,0), DrawMatchesFlags::DEFAULT);


        cv::Mat blobImg;    
        cv::drawKeypoints(frame, keyPoints, blobImg);
        cv::imshow("Blobs", blobImg);

        for(int i=0; i<keyPoints.size(); i++){
            //circle(out, keyPoints[i].pt, 20, cvScalar(255,0,0), 10);
            //cout<<keyPoints[i].response<<endl;
            //cout<<keyPoints[i].angle<<endl;
            //cout<<keyPoints[i].size()<<endl;
            cout<<keyPoints[i].pt.x<<endl;
            cout<<keyPoints[i].pt.y<<endl;

        }
        imshow( "out", out );

        if ((cvWaitKey(40)&0xff)==27) break;  // esc 'ye basilinca break
    }
    system("pause");

}
Waldir Leoncio
  • 10,853
  • 19
  • 77
  • 107