I figured I can do something like what is done during calibration with a grid. I can calculate the reprojection error. So first I solve to get the transformation matrix. Then I transform the points in 3D using the transformation matrix and afterwards use projectPoints to project them back in 2D. Then I check distance between original 2D points and the projected 2D points. This can then be used for quality. Objects that are not possible often have 100 pixels or more reprojection error in my images, but possible objects have less than 20px. So I just did a 25 pixel cutoff and it seems to work fine.
Note that more transformations are possible than I though. In my original image maybe two are not possible with my current camera, but it still did reject a lot of fakes.
If nobody else has some ideas I will accept this as answer.
Here is some code for the method I use:
//This is the object in 3D
double width = 50.0; //Object is 50mm wide
double height = 30.0; //Object is 30mm tall
cv::Mat object_points(4,3,CV_64FC1);
object_points.at<double>(0,0)=0;
object_points.at<double>(0,1)=0;
object_points.at<double>(0,2)=0;
object_points.at<double>(1,0)=width;
object_points.at<double>(1,1)=0;
object_points.at<double>(1,2)=0;
object_points.at<double>(2,0)=width;
object_points.at<double>(2,1)=height;
object_points.at<double>(2,2)=0;
object_points.at<double>(3,0)=0;
object_points.at<double>(3,1)=height;
object_points.at<double>(3,2)=0;
//Check all rectangles for error
cv::Mat image_points(4,2,CV_64FC1);
for (size_t i = 0; i < rectangles_to_test.size(); i++) {
// Get rectangle points
for (size_t c = 0; c < 4; ++c) {
image_points.at<double>(c,0) = (rectangles_to_test[i].points[c].x);
image_points.at<double>(c,1) = (rectangles_to_test[i].points[c].y);
}
// Calculate transformation matrix
cv::Mat rvec, tvec;
cv::solvePnP(object_points, image_points, M1, D1, rvec, tvec);
cv::Mat rotation;
Matrix4<double> transform;
transform.init_identity();
cv::Rodrigues(rvec, rotation);
for(size_t row = 0; row < 3; ++row) {
for(size_t col = 0; col < 3; ++col) {
transform.set(row, col, rotation.at<double>(row, col));
}
transform.set(row, 3, tvec.at<double>(row, 0));
}
// Calculate projection
std::vector<cv::Point3f> p3(4);
std::vector<cv::Point2f> p2;
Vector4<double> p = transform * Vector4<double>(0, 0, 0, 1);
p3[0] = cv::Point3f((float)p.x, (float)p.y, (float)p.z);
p = transform * Vector4<double>(width, 0, 0, 1);
p3[1] = cv::Point3f((float)p.x, (float)p.y, (float)p.z);
p = transform * Vector4<double>(width, height, 0, 1);
p3[2] = cv::Point3f((float)p.x, (float)p.y, (float)p.z);
p = transform * Vector4<double>(0, height, 0, 1);
p3[3] = cv::Point3f((float)p.x, (float)p.y, (float)p.z);
cv::projectPoints(p3, cv::Mat::zeros(1, 3, CV_64FC1), cv::Mat::zeros(1, 3, CV_64FC1), M1, D1, p2);
// Calculate reprojection error
rectangles_to_test[i].reprojection_error = 0.0;
for (size_t c = 0; c < 4; ++c) {
double dx = p2[c].x - rectangles_to_test[i].points[c].x;
double dy = p2[c].y - rectangles_to_test[i].points[c].y;
rectangles_to_test[i].reprojection_error += std::sqrt(dx*dx + dy*dy);
}
if (rectangles_to_test[i].reprojection_error > reprojection_error_threshold) {
//rectangle is no good
}
}