3

I'm trying to calibrate a webcam using OpenCV 2.3.1 and Visual Studio 2010 (c++ console app). I'm using this class:

class CameraCalibrator{
private:
   std::vector<std::vector<cv::Point3f>> objectPoints;
   std::vector<std::vector<cv::Point2f>> imagePoints;
   //Square Lenght
   float squareLenght;
   //output Matrices
   cv::Mat cameraMatrix; //intrinsic
   cv::Mat distCoeffs;
   //flag to specify how calibration is done
   int flag;
   //used in image undistortion
   cv::Mat map1,map2;
   bool mustInitUndistort;
public:
    CameraCalibrator(): flag(0), squareLenght(36.0), mustInitUndistort(true){};
    int addChessboardPoints(const std::vector<std::string>& filelist,cv::Size& boardSize){
        std::vector<std::string>::const_iterator itImg;
        std::vector<cv::Point2f> imageCorners;
        std::vector<cv::Point3f> objectCorners;
        //initialize the chessboard corners in the chessboard reference frame
        //3d scene points
        for(int i = 0; i<boardSize.height; i++){
            for(int j=0;j<boardSize.width;j++){
                objectCorners.push_back(cv::Point3f(float(i)*squareLenght,float(j)*squareLenght,0.0f));
            }
        }
        //2D Image points:
        cv::Mat image; //to contain chessboard image
        int successes = 0;
        //cv::namedWindow("Chess");
        for(itImg=filelist.begin(); itImg!=filelist.end(); itImg++){
            image = cv::imread(*itImg,0);
            bool found = cv::findChessboardCorners(image, boardSize, imageCorners);
            //cv::drawChessboardCorners(image, boardSize, imageCorners, found);
            //cv::imshow("Chess",image);
            //cv::waitKey(1000);
            cv::cornerSubPix(image, imageCorners, cv::Size(5,5),cv::Size(-1,-1),
                cv::TermCriteria(cv::TermCriteria::MAX_ITER+cv::TermCriteria::EPS,30,0.1));
            //if we have a good board, add it to our data
            if(imageCorners.size() == boardSize.area()){
                addPoints(imageCorners,objectCorners);
                successes++;
            }
        }
        return successes;
    }
    void addPoints(const std::vector<cv::Point2f>& imageCorners,const std::vector<cv::Point3f>& objectCorners){
        //2D image point from one view
        imagePoints.push_back(imageCorners);
        //corresponding 3D scene points
        objectPoints.push_back(objectCorners);
    }
    double calibrate(cv::Size &imageSize){
        mustInitUndistort = true;
        std::vector<cv::Mat> rvecs,tvecs;
        return
            cv::calibrateCamera(objectPoints, //the 3D points
                imagePoints,
                imageSize, 
                cameraMatrix, //output camera matrix
                distCoeffs,
                rvecs,tvecs,
                flag);
    }
    void remap(const cv::Mat &image, cv::Mat &undistorted){
        std::cout << cameraMatrix;
        if(mustInitUndistort){ //called once per calibration
            cv::initUndistortRectifyMap(
                cameraMatrix,
                distCoeffs,
                cv::Mat(),
                cameraMatrix,
                image.size(),
                CV_32FC1,
                map1,map2);
            mustInitUndistort = false;
        }
        //apply mapping functions
        cv::remap(image,undistorted,map1,map2,cv::INTER_LINEAR);
    }
};

I'm using 10 chessboard images (supposing that's enough for calibation) with resolution 640x480. The main function looks like this:

int main(){
    CameraCalibrator calibrateCam;
    std::vector<std::string> filelist;
    filelist.push_back("img10.jpg");
    filelist.push_back("img09.jpg");
    filelist.push_back("img08.jpg");
    filelist.push_back("img07.jpg");
    filelist.push_back("img06.jpg");
    filelist.push_back("img05.jpg");
    filelist.push_back("img04.jpg");
    filelist.push_back("img03.jpg");
    filelist.push_back("img02.jpg");
    filelist.push_back("img01.jpg");

    cv::Size boardSize(8,6);
    double calibrateError;
    int success;
    success = calibrateCam.addChessboardPoints(filelist,boardSize);
    std::cout<<"Success:" << success << std::endl;
    cv::Size imageSize;
    cv::Mat inputImage, outputImage;
    inputImage = cv::imread("img10.jpg",0);
    outputImage = inputImage.clone();
    imageSize = inputImage.size();
    calibrateError = calibrateCam.calibrate(imageSize);
    std::cout<<"Calibration error:" << calibrateError << std::endl;
    calibrateCam.remap(inputImage,outputImage);
    cv::namedWindow("Original");
    cv::imshow("Original",inputImage);
    cv::namedWindow("Undistorted");
    cv::imshow("Undistorted",outputImage);
    cv::waitKey();
    return 0;
}

Everything runs without errors. cameraMatrix looks like this (approximately):

685.65 0 365.14
0 686.38 206.98
0 0 1

Calibration error is 0.310157, which is acceptable.

But when I use remap, output image looks even worse than original. Here is the sample:

Original image: Original image]

Undistorted image: Undistorted image]

So, the question is, am I doing something wrong in process of calibration? Is 10 different chessboard images enough for calibration? Do you have any suggestions?

SSteve
  • 10,550
  • 5
  • 46
  • 72
Banana
  • 1,276
  • 2
  • 16
  • 19
  • possible duplicate of [OpenCV Transform using Chessboard](http://stackoverflow.com/questions/7902895/opencv-transform-using-chessboard) – karlphillip Apr 05 '12 at 12:35
  • hi i am using the class. It's working perfectly fine with me. the only problem i have is how to read the camera intrinsics. I am trying to use .at(0,0) thing but i am getting error message. . . – user1388142 Sep 15 '12 at 07:24
  • maybe the type is wrong, try .at(0,0) – Banana Sep 17 '12 at 13:35

2 Answers2

1

The camera matrix doesn't undistort the lens, those 4 values are simply the focal length (in H and V) and the image centre (in X and Y)

There is another 3 or 4 value row matrix (distCoeffs in your code) which contains the lens mapping - see Karl's Answer for example code

Martin Beckett
  • 94,801
  • 28
  • 188
  • 263
  • Yes, I understand that. I saw example code from Karl's answer. As you can see in my code, `distCoeffs` are included in function `cv::initUndistortRectifyMap` and I want to know did I write that function correctly, because after using `cv::remap` I get worse results than original, as you can see in my pictures sample. Thanks for help. – Banana Apr 05 '12 at 16:49
  • @Banana - check the values in distcoefs, are they empty or strange (I appreciate it's hard ot know what are good values!) but you could run the opencv sample software with your camera and see what it calcualtes – Martin Beckett Apr 05 '12 at 17:23
  • BTW, having the image center at 365.14,206.98 is *way* off for a 640x480 image. It is very likely much closer to 320,240. I have a 640x480 camera with center point of 317.66,240.11. – cape1232 Apr 05 '12 at 20:25
1

The calibration is done with a numerical optimization that has a pretty shallow slope near the solution. Also, the function being minimized is very nonlinear. So, my guess is that your 10 images aren't enough. I calibrate cameras with very wide-angle lenses (i.e. very distorted images), and I try to get like 50 or 60 images.

I try to get images with the chessboard at 3 or 4 positions along each edge of the image, plus some in the middle, with multiple orientations relative to the camera and at 3 different distances (super close, typical, and as far as you can get and still resolve the checkerboard).

Getting the chessboard near the corners is very important. Your example images do not have the chessboard very near the corner of the image. It's those points that constrain the calibration to do the right thing in the very distorted parts of the image (the corners).

cape1232
  • 999
  • 6
  • 21
  • Thank you for explanation. I'll try in few days camera calibration as you described and write here all the results. – Banana Apr 05 '12 at 21:24