Given two occupancy grid maps converted into cv::Mat grayscale, I used ORB feature descriptor and match the extracted features from both images. Most of it are false positives. Here below, I only showed one relation to show that it is really a false positive.
Ultimately, what I want to do is to find the right transformation between the two occupancy grid maps in order to merge their maps into one globally consistent map. The current code that I have is shown below:
// Load the two maps
nav_msgs::OccupancyGrid map1;
initOccupancyGridMap1(map1);
nav_msgs::OccupancyGrid map2;
initOccupancyGridMap2(map2);
// Convert the two maps to grayscale images
cv::Mat im1 = cvtMapToMat(map1);
cv::Mat im2 = cvtMapToMat(map2);
// Feature Descriptor Extraction
cv::OrbFeatureDetector featureDetector;
cv::OrbDescriptorExtractor featureExtractor;
std::vector<cv::KeyPoint> kp1;
std::vector<cv::KeyPoint> kp2;
cv::Mat d1;
cv::Mat d2;
std::vector<cv::DMatch> matches;
cv::BFMatcher dematc(cv::NORM_HAMMING, false);
// 1. Detect keypoints
featureDetector.detect(im1, kp1);
featureDetector.detect(im2, kp2);
// 2. Extract descriptors
featureExtractor.compute(im1, kp1, d1);
featureExtractor.compute(im2, kp2, d2);
// 3. Match keypoints
dematc.match(d1, d2, matches);
for (int i = 0; i < matches.size(); i++){
std::vector<cv::DMatch> match(1,matches[i]);
std::cout << "Distance: " << match[0].distance << std::endl;
cv::Mat img_show;
drawMatches(im1, kp1, im2, kp2, match, img_show);
imshow("Matches", img_show);
cv::waitKey(0);
}