0

I am using the below code provided at opencv site to find the scale and rotation invariant template matching,but I want to calculate exact pixel co-ordinates of template image in source image,especially for rotated image.

#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/nonfree/nonfree.hpp"

using namespace cv;

void readme();

/** @function main */
int main( int argc, char** argv )
{
    if( argc != 3 )
    { 
        readme();
        return -1;
    }

    Mat img_object = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
    Mat img_scene = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );

    if( !img_object.data || !img_scene.data )
    {
        std::cout<< " --(!) Error reading images " << std::endl;
        return -1; 
    }

    //-- Step 1: Detect the keypoints using SURF Detector
    int minHessian = 400;

    SurfFeatureDetector detector( minHessian );

    std::vector<KeyPoint> keypoints_object, keypoints_scene;

    detector.detect( img_object, keypoints_object );
    detector.detect( img_scene, keypoints_scene );

    //-- Step 2: Calculate descriptors (feature vectors)
    SurfDescriptorExtractor extractor;

    Mat descriptors_object, descriptors_scene;

    extractor.compute( img_object, keypoints_object, descriptors_object );
    extractor.compute( img_scene, keypoints_scene, descriptors_scene );

    //-- Step 3: Matching descriptor vectors using FLANN matcher
    FlannBasedMatcher matcher;
    std::vector< DMatch > matches;
    matcher.match( descriptors_object, descriptors_scene, matches );

    double max_dist = 0; 
    double min_dist = 100;

    //-- Quick calculation of max and min distances between keypoints
    for( int i = 0; i < descriptors_object.rows; i++ )
    { 
        double dist = matches[i].distance;
        if( dist < min_dist )
            min_dist = dist;
        if( dist > max_dist ) 
            max_dist = dist;
    }

    printf("-- Max dist : %f \n", max_dist );
    printf("-- Min dist : %f \n", min_dist );

    //-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
    std::vector< DMatch > good_matches;

    for( int i = 0; i < descriptors_object.rows; i++ )
    { 
        if( matches[i].distance < 3*min_dist )
        { 
            good_matches.push_back( matches[i]); 
        }
    }

    Mat img_matches;
    drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
               good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
               vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

    //-- Localize the object
    std::vector<Point2f> obj;
    std::vector<Point2f> scene;

    for( int i = 0; i < good_matches.size(); i++ )
    {
      //-- Get the keypoints from the good matches
      obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
      scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
    }

    Mat H = findHomography( obj, scene, CV_RANSAC );

    //-- Get the corners from the image_1 ( the object to be "detected" )
    std::vector<Point2f> obj_corners(4);
    obj_corners[0] = cvPoint(0,0); 
    obj_corners[1] = cvPoint( img_object.cols, 0 );
    obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); 
    obj_corners[3] = cvPoint( 0, img_object.rows );
    std::vector<Point2f> scene_corners(4);

    perspectiveTransform( obj_corners, scene_corners, H);

    //-- Draw lines between the corners (the mapped object in the scene - image_2 )
    line( img_matches, scene_corners[0] + Point2f( img_object.cols, 0), scene_corners[1] + Point2f( img_object.cols, 0), Scalar(0, 255, 0), 4 );
    line( img_matches, scene_corners[1] + Point2f( img_object.cols, 0), scene_corners[2] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
    line( img_matches, scene_corners[2] + Point2f( img_object.cols, 0), scene_corners[3] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
    line( img_matches, scene_corners[3] + Point2f( img_object.cols, 0), scene_corners[0] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );

    //-- Show detected matches
    imshow( "Good Matches & Object detection", img_matches );

    waitKey(0);
    return 0;
}

/** @function readme */
void readme()
{ 
    std::cout << " Usage: ./SURF_descriptor <img1> <img2>" << std::endl; 
}
Cœur
  • 37,241
  • 25
  • 195
  • 267
user3807950
  • 75
  • 1
  • 9

1 Answers1

0

You already have these coordinates in scene_corners after computing the perspective transform. The four points in scene_corners make up a rectangle that is the template match found in the scene, so these four corners are what you're looking for.

Further, if you wish to find the trasnformed coordinates of any point from the template in the scene, you just need to apply the same homography, via the perspectiveTransform function, to that point.

Zaphod
  • 1,927
  • 11
  • 13
  • Hi Zaphod,thanks for your quick reply.You are right that scene_corners will contain the corners of template but that holds good only for my straight angled images.Actually I am trying to detect a logo in square box on wall,but if the image is angled then the scene_corners gives me a distorted square around the template image(logo) which is not feasible for my calculation requirements.Can you please suggest any alternative to get exact square box with exact 4 scene_corners for rotated images as well.Is there any other algorithm approach apart from SIFT or SURF forthe purpose in opencv.Thanks! – user3807950 Aug 27 '14 at 16:45
  • You just need a bounding box around the distorted rectangle - http://stackoverflow.com/questions/622140/calculate-bounding-box-coordinates-from-a-rotated-rectangle-picture-inside – Zaphod Aug 28 '14 at 08:54