I am trying to find the transformation matrix H so that i can multiply the (x,y) pixel coordinates and get the (x,y) real world coordinates. Here is my code:
import cv2
import numpy as np
from numpy.linalg import inv
if __name__ == '__main__' :
D=[159.1,34.2]
I=[497.3,37.5]
G=[639.3,479.7]
A=[0,478.2]
# Read source image.
im_src = cv2.imread('/home/vivek/june_14.png')
# Four corners of the book in source image
pts_src = np.array([D,I,G,A])
# Read destination image.
im_dst = cv2.imread('/home/vivek/june_14.png')
# Four corners of the book in destination image.
print "img1 shape:",im_dst.shape
scale=1
O=[0.0,0.0]
X=[134.0*scale,0]
Y=[0.0,184.0*scale]
P=[134.0*scale,184.0*scale]
# lx = 75.5 * scale
# ly = 154.0 * scale
pts_dst = np.array([O,X,P,Y])
# Calculate Homography
h, status = cv2.findHomography(pts_src, pts_dst)
print "homography:",h
print "inv of H:",inv(h)
print "position of the blob on the ground xy plane:",np.dot(np.dot(h,np.array([[323.0],[120.0],[1.0]])),scale)
# Warp source image to destination based on homography
im_out = cv2.warpPerspective(im_src, h, (im_dst.shape[1],im_dst.shape[0]))
# Display images
cv2.imshow("Source Image", im_src)
cv2.imshow("Destination Image", im_dst)
cv2.imshow("Warped Source Image", im_out)
cv2.imwrite("im_out.jpg", im_out)
cv2.waitKey(0)
The global xy's i am getting are very off. Am i doing something wrong somewhere?