1

Iam trying to recognize a source image(c1.jpg- a face) in a bigger destination image(allimg.jpg-containing 3 faces) using the ORB detector/descriptor and Flann or brute Force matcher. c1.jpg was created from allimg.jpg by cropping/copying from it. The ORB detector/descriptor work as expected returning detectors/descriptors correctly BUT the Flann or brute Force matcher give incorrect matching results for the destination.As a result when further I try to use findHomography(),it shows incorrect result, mapping source to somewhere else on the destination instead of the correct face in the destination(allimg). Although not shown the code below,after Knnmatch,I drew a bounding rect on c1.jpg and allimag.jpg after the matches and displayed the images.I found that the source bounding rect was correct but the bounding rect of the allimag was quite very big and including the source face .It should have just found the source face in the destination. Iam using opencv 3.0. Did anyone face such Problems? Are there any other matchers which accurately finds the source image(face or anything) in the destination?

I have given the code below and the images(given by links):

#include <opencv2/core/core.hpp>
#include <opencv2\opencv.hpp>
#include <opencv2/features2d/features2d.hpp>

using namespace std;
using namespace cv;

const double nn_match_ratio = 0.80f; // Nearest-neighbour matching ratio
const double ransac_thresh = 2.5f; // RANSAC inlier threshold
const int bb_min_inliers = 100; // Minimal number of inliers to draw BBox     

Mat img1;
Mat img2;

bool refineMatchesWithHomography(const vector<cv::KeyPoint>& queryKeypoints,    
const vector<cv::KeyPoint>& trainKeypoints,     
float reprojectionThreshold,    
vector<cv::DMatch>& matches,    
Mat& homography  )  
{  
const int minNumberMatchesAllowed = 4;    
if (matches.size() <minNumberMatchesAllowed)    
return false;    
// Prepare data for cv::findHomography    
vector<cv::Point2f> queryPoints(matches.size());    
std::vector<cv::Point2f> trainPoints(matches.size());    
for (size_t i = 0; i <matches.size(); i++)    
{    
queryPoints[i] = queryKeypoints[matches[i].queryIdx].pt;    
trainPoints[i] = trainKeypoints[matches[i].trainIdx].pt;    
}    
// Find homography matrix and get inliers mask    
std::vector<unsigned char> inliersMask(matches.size());    
homography = findHomography(queryPoints,     
trainPoints,     
CV_FM_RANSAC,     
reprojectionThreshold,     
inliersMask);    
vector<cv::DMatch> inliers;    
for (size_t i=0; i<inliersMask.size(); i++)    
{    
if (inliersMask[i])    
inliers.push_back(matches[i]);    
}    
matches.swap(inliers);  
Mat homoShow;  
drawMatches (img1,queryKeypoints,img2,trainKeypoints,matches,homoShow,  

Scalar::all(-1),CV_RGB(255,255,255), Mat(),  2);       

imshow("homoShow",homoShow); 


waitKey(100000);
return matches.size() > minNumberMatchesAllowed;   

}  




int main()
{
//Stats stats;
vector<String> fileName;

fileName.push_back("D:\\pmn\\c1.jpg");
fileName.push_back("D:\\pmn\\allimg.jpg");

img1 = imread(fileName[0], CV_LOAD_IMAGE_COLOR);
img2 = imread(fileName[1], CV_LOAD_IMAGE_COLOR);

if (img1.rows*img1.cols <= 0)
{
cout << "Image " << fileName[0] << " is empty or cannot be found\n";
return(0);
}
if (img2.rows*img2.cols <= 0)
{
cout << "Image " << fileName[1] << " is empty or cannot be found\n";
return(0);
}

// keypoint  for img1 and img2
vector<KeyPoint> keyImg1, keyImg2;
// Descriptor for img1 and img2

Mat descImg1, descImg2;


Ptr<Feature2D> porb = ORB::create(500,1.2f,8,0,0,2,0,14);


porb->detect(img2, keyImg2, Mat());
// and compute their descriptors with method  compute
porb->compute(img2, keyImg2, descImg2);

// We can detect keypoint with detect method
porb->detect(img1, keyImg1,Mat());
// and compute their descriptors with method  compute
porb->compute(img1, keyImg1, descImg1);


//FLANN parameters

//  Ptr<flann::IndexParams> indexParams = 
makePtr<flann::LshIndexParams> (6, 12, 1);     

//  Ptr<flann::SearchParams> searchParams = makePtr<flann::SearchParams>                                                           
(50);   

String itMatcher = "BruteForce-L1";

Ptr<DescriptorMatcher> 

matdescriptorMatchercher(newcv::BFMatcher(cv::NORM_HAMMING, false)); 

vector<vector<DMatch> > matches,bestMatches;
vector<DMatch> m;

matdescriptorMatchercher->knnMatch(descImg1, descImg2, matches,2);

const float minRatio = 0.95f;//1.f / 1.5f; 
for (int i = 0; i<matches.size(); i++)
{
if(matches[i].size()>1)     
{
DMatch& bestMatch = matches[i][0];  
DMatch& betterMatch = matches[i][1];  
float distanceRatio = bestMatch.distance / betterMatch.distance;  
if (distanceRatio <minRatio)  
{
bestMatches.push_back(matches[i]);
m.push_back(bestMatch);
}
}
}


Mat homo;  
float homographyReprojectionThreshold = 1.0;  
bool homographyFound = refineMatchesWithHomography(  
keyImg1,keyImg2,homographyReprojectionThreshold,m,homo);  

return 0;
}

[c1.jpg][1]

[allimg.jpg][2]


[1]: https://i.stack.imgur.com/Uuy3o.jpg
[2]: https://i.stack.imgur.com/Kwne7.jpg
PMN
  • 11
  • 2
  • I suggest using ratio test and symmetrical matching to remove outliers and possibly ransac although that removed too many features in my situation, see this: http://stackoverflow.com/questions/9539473/opencv-orb-not-finding-matches-once-rotation-scale-invariances-are-introduced – EdChum Apr 05 '16 at 13:29

1 Answers1

0

Thanks EdChum. I used the code given at the link(ratiotest/symmetrytest) and it provided with somewhat ok image matching only if the sourceimage was part of the destination, though it is not accurate enough. Note that I did commented out the last ransacTest as it was removing lot of positives unnecessarily. I have attached the 2 images(source.jpg/destination.jpg) which will show what Iam saying by highlighting the matched part in destination. Is there any algorithm which will still more accurately/correctly (>90%) identify the source in destination?

Also if the source is a similar image(and not exact as in destination),I found that the destination image matching is way off and useless. Am I right? Kindly share your view. 1=source,2=destination

PMN
  • 11
  • 2
  • The images were not attached, so Iam doing it here: [1]=source,[2]=destination [1]: http://i.stack.imgur.com/d6CHD.jpg [2]: http://i.stack.imgur.com/EIYOs.jpg – PMN Apr 07 '16 at 13:33