0

ERROR: Error: Bad argument (The input arrays should be 2D or 3D point sets) in cv::findHomography, file C:\opencv\source\opencv-3.3.0\modules\calib3d\src\fundam.cpp, line 341

I'm new to both c++ and opencv3.3. following is a program I'm trying to develop to stitch 2 images. i have the similar python code and i'm trying to get it into c++. a runime error occurs and please help..

#include <opencv2/opencv.hpp>
#include <opencv2/xfeatures2d.hpp>

using namespace std;
using namespace cv;

Mat DetectAndDescribe(Mat image) {
    //create the greyscale image
    Mat grayImage;
    cv::cvtColor(image, grayImage, COLOR_BGR2GRAY);

    //create the keypoint detector and descriptor as "feature" using SIFT
    Ptr<Feature2D> feature = xfeatures2d::SIFT::create();

    //create a matrix of keypoints using feature
    vector<KeyPoint> keypoints;
    feature->detect(grayImage, keypoints);

    //create a maatrix of descriptors using feature and keypoints
    Mat descriptor;
    feature->compute(grayImage, keypoints, descriptor);

    return (keypoints, descriptor);
}

Mat matchKeypoints(Mat imageA, Mat imageB, vector<KeyPoint> keypointA, vector<KeyPoint> keypointB, Mat featuresA, Mat featuresB, float ratio, double repojThresh){
    //create a vector of vector to hold raw matches
    vector<vector<DMatch>> rawMatches;

    //create a vector of DMatches to hold good matches
    vector<DMatch> goodMatches;

    //create two vector points to hold the points where the lines will be drawn
    vector<Point2f> pointsA;
    vector<Point2f> pointsB;

    Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce");
    matcher->knnMatch(featuresA, featuresB, rawMatches, 2);

    goodMatches.reserve(rawMatches.size());

/*  double tresholdDist = ratio * sqrt(double(imageA.size().height*imageA.size().height + imageA.size().width*imageA.size().width));

    for (size_t i = 0; i < rawMatches.size(); ++i)
    {
        for (int j = 0; j < rawMatches[i].size(); j++)
        {
            Point2f from = keypointA[rawMatches[i][j].queryIdx].pt;
            Point2f to = keypointB[rawMatches[i][j].trainIdx].pt;

            //calculate local distance for each possible match
            double dist = sqrt((from.x - to.x) * (from.x - to.x) + (from.y - to.y) * (from.y - to.y));

            //save as best match if local distance is in specified area and on same height
            if (dist < tresholdDist && abs(from.y - to.y)<2)
            {
                goodMatches.push_back(rawMatches[i][j]);
                j = rawMatches[i].size();
            }
        }
    }
*/
    for (size_t i = 0; i < rawMatches.size(); i++)
    {
        if ((rawMatches[i].size()==2) && (rawMatches[i][0].distance < (rawMatches[i][1].distance*ratio)))
        {
            goodMatches.push_back(rawMatches[i][0]);
        }
    }

    cv::KeyPoint::convert(keypointA, pointsA);
    cv::KeyPoint::convert(keypointB, pointsB);

    if (goodMatches.size() > 4) {
        Mat homographyM = findHomography(pointsA, pointsB, RANSAC, repojThresh);
        return(goodMatches, homographyM);
    }
}

Mat drawMatches(Mat imageA, Mat imageB, vector<KeyPoint> keypointsA, vector<KeyPoint> keypointsB, vector<DMatch> matches) {
    //initialize the output visualization image
    float hA = imageA.size().height;
    float wA = imageA.size().width;

    float hB = imageB.size().height;
    float wB = imageB.size().width;

    Mat resultImage = Mat(fmax(hA, hB), wA + wB, 3, "uint8");

    //connect lines between the selected points
    Point2f pointA;
    Point2f pointB;

    for (int i=0; i<matches.size();i++) {
        pointA = Point2f(keypointsA[matches[i].queryIdx].pt.x, keypointsA[matches[i].queryIdx].pt.y);
        pointB = Point2f(keypointsA[matches[i].trainIdx].pt.x+ wA, keypointsB[matches[i].trainIdx].pt.y);

        cv::line(resultImage, pointA, pointB, (0, 255, 0), 1);
    }

    return resultImage;
}

Mat stitch(Mat imageA, Mat imageB, float ratio, double repojThresh, bool showMatches) {
    vector<KeyPoint> keypointA;
    vector<KeyPoint> keypointB;
    Mat featuresA;
    Mat featuresB;
    Mat matchFeatures;
    Mat matches;
    Mat homographyM;
    Mat result;
    float hA = imageA.size().height;
    float wA = imageA.size().width;

    float hB = imageB.size().height;
    float wB = imageB.size().width;

    (keypointA, featuresA) = DetectAndDescribe(imageA);
    (keypointB, featuresB) = DetectAndDescribe(imageB);

    (matches, homographyM) = matchKeypoints(imageA, imageB, keypointA, keypointB, featuresA, featuresB, ratio, repojThresh);

    cv::warpPerspective(imageA, result, homographyM, Size(wA+wB, hA));

    //Point a cv::Mat header at it (no allocation is done)
    Mat final(Size(imageB.cols * 2 + imageB.cols, imageA.rows * 2), CV_8UC3);

    //velikost img1
    Mat roi1(final, Rect(0, 0, imageB.cols, imageB.rows));
    Mat roi2(final, Rect(0, 0, result.cols, result.rows));
    result.copyTo(roi2);
    imageB.copyTo(roi1);


    if (showMatches) {
        Mat visiblelines = drawMatches(imageA, imageB, keypointA, keypointB, matches);
        return (result, visiblelines);
    }

    return result;
}

int main() {
    Mat imageA = cv::imread("left.jpg", 1);
    Mat imageB = cv::imread("middle.jpg", 1);

    Mat result;
    Mat visiblelines;

    (result, visiblelines) = stitch(imageB, imageA, 0.75, 4.0, true);

    cv::imshow("KeyPoint matches", visiblelines);
    cv::imshow("Resulting Image", result);

    cv::imwrite("Result.jpg", result);

    cv::waitKey(0);
    cv::destroyAllWindows();
}

1 Answers1

0

I think, coming from python you might be misunderstanding how return works in C/C++. It seems like your function DetectAndDescribe is returning only the second value (descriptor), so

return (keypoints, descriptor);

is equivalent to

return descriptor;

Furthermore

(keypointA, featuresA) = DetectAndDescribe(imageA);
(keypointB, featuresB) = DetectAndDescribe(imageB);

is actually equivalent to

featuresA = DetectAndDescribe(imageA);
featuresB = DetectAndDescribe(imageB);

Take a look at what comma operator works in C++ when not overloaded (I don't believe it is in OpenCV, but I'll be punished if I'm wrong by those who know for sure)

A simple example:

  int bb = 42;
  int cc;
  (bb, cc) = (3,5);
  std::cout << std::endl << bb << std::endl << cc;

will output

42
5

not

3
5

Almost forgot to say, that to return multiple values you can pass them by reference and assign inside the function:

void DetectAndDescribe(Mat image, vector<KeyPoint>& keypoints, Mat& descriptor) {
    //create the greyscale image
    Mat grayImage;
    cv::cvtColor(image, grayImage, COLOR_BGR2GRAY);

    //create the keypoint detector and descriptor as "feature" using SIFT
    Ptr<Feature2D> feature = xfeatures2d::SIFT::create();

    //create a matrix of keypoints using feature
    feature->detect(grayImage, keypoints);

    //create a maatrix of descriptors using feature and keypoints
    feature->compute(grayImage, keypoints, descriptor);
}

And call it like this:

DetectAndDescribe(imageA, keypointA, featuresA);

Lastly, passing large structures such as matrices by value is a bad practice, you should rather pass them by reference too

void DetectAndDescribe(Mat& image, vector<KeyPoint>& keypoints, Mat& descriptor)

or if you aren't planning to modify them as a const reference:

void DetectAndDescribe(const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptor)
isp-zax
  • 3,833
  • 13
  • 21
  • Thanks. I changed as you said. both detectAndDescribe() and matchPoints(). i didn't know about that earlier. but now another error rose. OpenCV Error: Assertion failed (src.checkVector(2) == dst.checkVector(2)) in cv::findHomography, file C:\opencv\source\opencv-3.3.0\modules\calib3d\src\fundam.cpp, line 349 – KalanaRatnayake Sep 09 '17 at 07:07
  • Finally was able to get the thing running thanks. had missed some other pointers as well – KalanaRatnayake Sep 09 '17 at 14:45