I'm computing fundamental matrix for video odometry in Python and C++ using OpenCV. I've tried to keep the code in both implementations quite the same. However, I'm getting different results in both. In Python, it works correctly, and in C++ it is showing completely incorrect results. Below is a partial example of their code and outputs (first one in Python and second one in C++)
Python version code:
import os
import sys
import cv2
import numpy as np
import math
# Main Function
if __name__ == '__main__':
K = np.matrix([[522.4825, 0, 300.9989],
[0, 522.5723, 258.1389],
[0.0, 0.0, 1.0]])
img1 = cv2.imread(sys.argv[1] + ".jpg")
img2 = cv2.imread(sys.argv[2] + ".jpg")
# sift = cv2.SURF()
detector = cv2.FeatureDetector_create("SURF") # SURF, FAST, SIFT
descriptor = cv2.DescriptorExtractor_create("SURF") # SURF, SIFT
# kp1, des1 = sift.detectAndCompute(img1,None)
# kp2, des2 = sift.detectAndCompute(img2,None)
kp1 = detector.detect(img1)
kp2 = detector.detect(img2)
k1, des1 = descriptor.compute(img1,kp1)
k2, des2 = descriptor.compute(img2,kp2)
# BFMatcher with default params
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1,des2, k=2)
good = []
# Apply ratio test
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
MIN_MATCH_COUNT = 10
if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
F, mask = cv2.findFundamentalMat(src_pts, dst_pts, cv2.RANSAC, 5.0)
matchesMask = mask.ravel().tolist()
else:
print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)
matchesMask = None
print F
And it's output:
[[ -3.22706105e-07 1.12585581e-04 -2.86938406e-02]
[ -1.16307090e-04 -5.04244159e-07 5.60714444e-02]
[ 2.98839742e-02 -5.99974406e-02 1.00000000e+00]]
C++ version here:
#include <iostream>
#include <vector>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/nonfree/features2d.hpp>
#include <opencv2/legacy/legacy.hpp>
using namespace std;
int main(int argc,char *argv[]) {
//Define intrinsic matrix
cv::Mat intrinsic = (cv::Mat_<double>(3,3) << 522.4825, 0, 300.9989,
0, 522.5723, 258.1389,
0, 0, 1);
// Read input images
string jpg1 = argv[1];
jpg1.append(".jpg");
string jpg2 = argv[2];
jpg2.append(".jpg");
cv::Mat image1 = cv::imread(jpg1,0);
cv::Mat image2 = cv::imread(jpg2,0);
if (!image1.data || !image2.data)
return 0;
// Display the images
// cv::namedWindow("Image 1");
// cv::imshow("Image 1",image1);
// cv::namedWindow("Image 2");
// cv::imshow("Image 2",image2);
// pointer to the feature point detector object
cv::Ptr<cv::FeatureDetector> detector = new cv::SurfFeatureDetector();
// pointer to the feature descriptor extractor object
cv::Ptr<cv::DescriptorExtractor> extractor = new cv::SurfDescriptorExtractor();
// Detection of the SURF features
vector<cv::KeyPoint> keypoints1, keypoints2;
detector->detect(image1,keypoints1);
detector->detect(image2,keypoints2);
// Extraction of the SURF descriptors
cv::Mat descriptors1, descriptors2;
extractor->compute(image1,keypoints1,descriptors1);
extractor->compute(image2,keypoints2,descriptors2);
// Construction of the matcher
cv::BruteForceMatcher<cv::L2<float> > matcher;
vector<vector<cv::DMatch> > matches;
vector<cv::DMatch> good_matches;
matcher.knnMatch(descriptors1, descriptors2, matches, 2);
for (vector<vector<cv::DMatch> >::iterator matchIterator= matches.begin();
matchIterator!= matches.end(); ++matchIterator) {
if ((*matchIterator)[0].distance < 0.7f * (*matchIterator)[1].distance) {
good_matches.push_back((*matchIterator)[0]);
}
}
// Convert keypoints into Point2f
vector<cv::Point2f> src_pts, dst_pts;
for (vector<cv::DMatch>::iterator it= good_matches.begin();
it!= good_matches.end(); ++it)
{
// Get the position of left keypoints
float x= keypoints1[it->queryIdx].pt.x;
float y= keypoints1[it->queryIdx].pt.y;
src_pts.push_back(cv::Point2f(x,y));
// Get the position of right keypoints
x= keypoints2[it->trainIdx].pt.x;
y= keypoints2[it->trainIdx].pt.y;
dst_pts.push_back(cv::Point2f(x,y));
}
// Compute F matrix using RANSAC
cv::Mat fundemental = cv::findFundamentalMat(
cv::Mat(src_pts),cv::Mat(dst_pts), // matching points
CV_FM_RANSAC, // RANSAC method
5.0); // distance
cout << fundemental << endl;
return 0;
}
And its output:
[-4.310057787788129e-06, 0.0002459670522815174, -0.0413520716270485;
-0.0002531048911221476, -8.423657757958228e-08, 0.0974897887347238;
0.04566865455090797, -0.1062956485414729, 1]
Here are two test images: image 1 image 2
I can't find the reason. Could anyone tell me why?