-2

I am working with VSCode 1.68.1, Ubuntu 20.04.
I am following link: https://courses.ece.cornell.edu/ece5990/ECE5725_Fall2020_Projects/Dec_21_Demo/Drawing%20Robot/eam348_mm2994_W/index.html

from unittest import result
import numpy as np
import time
import cv2
import sys
import cv2.aruco as aruco
import socket
import datetime
import glob
import math
import multiprocessing as mp

port = 30003
IP = '192.11.0.25'
robot_ID = 20185500976
robot = 'ur-20185500976'
marker_dimension =0.06
worldx = 390
worldy = 260
bottom_left = 31  #this is the origin - positivex: towards bottom right - positivey: towards top left
bottom_right = 32
top_left = 9
top_right = 20

#camera dist, matrix and newcameramatrix
dist=np.array(([[5.0164361897882787e-02, 6.6308284023737640e-01, 2.5493975084043882e-03, -6.0403656948007376e-03, -2.9652221208277720e+00]]))
mtx=np.array([[6.1618286891135097e+02, 0., 3.2106366551961219e+02],
 [0 , 6.1595924417559945e+02, 2.4165645046034246e+02],
 [0. , 0. , 1. ]])

found_dict_pixel_space = {}
found_dict_camera_space = {}
found_dict_world_space = {}
found_dict_homography_space = {}
final_string = ""
originRvec = np.array([0,0,1])
markerRvec= np.array([0,0,0])

def UDP(IP,port,message):
   sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #IPv4 DNS server - UDP protocol
   sock.sendto(bytes(message, "utf-8"), (IP,port)) #self, data, address

def getMarkerCenter(corners):
 px = (corners[0][0] + corners[1][0] + corners[2][0]+ corners[3][0]) * 0.25
 py = (corners[0][1] + corners[1][1] + corners[2][1]+ corners[3][1]) * 0.25
 return [px,py]

def getMarkerRotation(corners):
 unit_x_axis = [1.,0.]
 center = getMarkerCenter(corners)
 right_edge_midpoint = (corners[1]+corners[2])/2.
 unit_vec = (right_edge_midpoint-center)/np.linalg.norm(right_edge_midpoint-center)
 angle = np.arccos(np.dot(unit_x_axis,unit_vec))
 return angle

def inversePerspective(rvec, tvec):
   R, _ = cv2.Rodrigues(rvec)
   R = np.array(R).T #this was np.matrix but had error
   invTvec = np.dot(-R, np.array(tvec))
   invRvec, _ = cv2.Rodrigues(R)
   return invRvec, invTvec
def normalize(v):
   if np.linalg.norm(v) == 0 : return v
   return v / np.linalg.norm(v)

def findWorldCoordinate(originCorners, point):
   zero = np.array(originCorners[3]) #bottom left as the origin - check the data structure
   print(zero)
   x = (np.array(originCorners[0]) - zero)  # bottom right - Green Axis -- throw out z
   y = (np.array(originCorners[1]) - zero)   # top left - Red Axis -- throw out z
   x = x[0][0:2]
   y = y[0][0:2]
   x = normalize(x)
   y = normalize(y)
   #print("x", x)
   vec = (point - zero)[0][0:2]
   #print("vec", vec)
   vecNormal = normalize(vec)
   cosX = np.dot(x,vecNormal)
   cosY = np.dot(y,vecNormal)
   xW = np.linalg.norm(vec) * cosX
   yW = np.linalg.norm(vec) * cosY
   return [xW, yW]

cap=cv2.VideoCapture(4)
font = cv2.FONT_HERSHEY_SIMPLEX #font for displaying text (below)

while True:
   t0 = time.time()
   ret, frame = cap.read()
   h, w = frame.shape[:2]
   #new image size to generate192.0.1.25
   
   h1, w1 = h, w

   newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w,h), 0, (w1,h1))
   #print(newcameramtx)
   #mapx, mapy = cv2.initUndistortRectifyMap(mtx, dist, None, newcameramtx, (w1,h1), 5)
   #dst1 = cv2.remap(frame, mapx, mapy, cv2.INTER_LINEAR)
   dst1 = cv2.undistort(frame, mtx, dist, None, newcameramtx)
   x, y, w1, h1 = roi
   dst1 = dst1[y:y + h1, x:x + w1]
   frame=dst1

   t1 = time.time()-t0
   gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
   aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)
   arucoParameters = aruco.DetectorParameters_create()
   t2 = time.time()-t0
   data = aruco.detectMarkers(gray, aruco_dict, parameters=arucoParameters)
   t3 = time.time()-t0
   corners = data[0]
   ids = data[1]
   originIDglobal = 0

   #    If you can't find it, type id
   if ids is not None:
        t4 = time.time()-t0
        result = aruco.estimatePoseSingleMarkers(corners, marker_dimension, newcameramtx, dist)
        rvecs = result[0] # rotation vectors of markers
        tvecs = result[1] # translation vector of markers
        
        #setting bottom_left as the origin
        if bottom_left in ids:
            originID = np.where(ids == bottom_left)[0][0]
            originIDglobal = originID
        else:
            originID = originIDglobal
        originCorners = corners[originID] # corners of the tag set as the origin
        originCornersCamera = getCornerInCameraWorld(marker_dimension, rvecs[originID], tvecs[originID])[0] # origin tag corners in camera space
        originRvec = rvecs[originID] # rotation vec of origin tag
        originTvec = tvecs[originID] # translation vec of origin tag

        display = aruco.drawDetectedMarkers(frame, corners,ids) #Draw a square around the markers
        t5 = time.time()-t0
        for i in range(len(ids)):
            ID = ids[i]
            rvec = rvecs[i]
            tvec = tvecs[i]
            corners4 = corners[i]
            
            display = cv2.drawFrameAxes(frame, newcameramtx, dist, rvec, tvec,0.03)#Draw 3D Axis, 3cm(0.03)
            found_dict_pixel_space[""+str(ids[i][0])] = corners4 # put the corners of this tag in the dictionary

        # Homography
        zero = found_dict_pixel_space[str(bottom_left)][0][3] #bottom left - 3
        x = found_dict_pixel_space[str(bottom_right)][0][2] #bottom right - 27
        y = found_dict_pixel_space[str(top_left)][0][0] #top left - 22
        xy = found_dict_pixel_space[str(top_right)][0][1] #top right - 24    

        workspace_world_corners = np.array([[0.0, 0.0], [worldx, 0.0], [0.0, worldy], [worldx, worldy]], np.float32) # 4 corners in millimeters
        workspace_pixel_corners = np.array([zero,x,y,xy], np.float32)  # 4 corners in pixels
        # Homography Matrix
        h, status = cv2.findHomography(workspace_pixel_corners, workspace_world_corners) #perspective matrix
        t6=time.time()-t0
        im_out = cv2.warpPerspective(frame, h, (worldx,worldy)) #showing that it works
          
        t7 = time.time()-t0
        for i in range(len(ids)):
            j = ids[i][0]
            corners_pix = found_dict_pixel_space[str(j)]#[0]
            corners_pix_transformed = cv2.perspectiveTransform(corners_pix,h)
            found_dict_homography_space[str(j)] = corners_pix_transformed
        print(found_dict_homography_space)
        robot = found_dict_homography_space[str(robot_ID)][0]
        print(getMarkerCenter(robot))
        cv2.imshow('Warped Source Image', im_out)
        t8=time.time()-t0
        print("t1: %8.4f   t2: %8.4f   t3: %8.4f   t4: %8.4f   t5: %8.4f   t6: %8.4f   t7: %8.4f   t8: %8.4f" %(t1,t2-t1,t3-t2,t4-t3,t5-t4,t6-t5,t7-t6,t8-t7))
      

   else:
        display = frame
        cv2.imshow('Display', display)

   # Display result frame
   cv2.imshow("frame",frame)

   key = cv2.waitKey(1)
   if key == ord("q"):
        break

cap.release()
cv2.destroyAllWindows()

I am getting Error: "NameError: name 'getCornerInCameraWorld' is not defined"

  • I am unable to find anything related to getCornerInCameraWorld.

Please provide some help.

Manpreet
  • 43
  • 1
  • 6
  • You didn't define `getCornerInCameraWorld()` in your code. – Himanshu Kawale Jun 29 '22 at 16:47
  • @HimanshuKawale It's not his code, he just copied it from a tutorial site. – Barmar Jun 29 '22 at 16:51
  • I'm not sure this is a standard CV2 function, I don't know why the tutorial uses it. [this question](https://stackoverflow.com/questions/46363618/aruco-markers-with-opencv-get-the-3d-corner-coordinates) contains a definition of getCornersInCameraWorld, but it's in C++, not Python. Notice that the name has `corners` not `corner`. Maybe try changing to that in your code. – Barmar Jun 29 '22 at 16:53
  • I tried getCornersInCameraWold didn't work – Manpreet Jun 29 '22 at 17:12
  • @Barmar Corners didn't work. Is there any other command in python that I can use to get the corners? Thanku – Manpreet Jun 29 '22 at 17:44
  • This seems like a problem with the tutorial. Maybe you should contact the school and find out if there's an errata. – Barmar Jun 29 '22 at 17:50
  • Ok I will try to find the contact info of tutorial author. Thanku – Manpreet Jun 29 '22 at 17:54
  • Contact info is on the page: Mahshid Moghadasi: mm2994@cornell.edu - MS Matter Design Computation 2021, Emmett Milliken: eam348@cornell.edu - MEng Electrical and Computer Engineering 2020 – Christoph Rackwitz Jun 30 '22 at 12:44

1 Answers1

0

I got reply from the author of the code. She confirmed the code was missing some part and provided that.

The code missing was:

def getCornerInCameraWorld(size, rvec, tvec):
    half_size = size * 0.5
    rotMatrix, jacobian = cv2.Rodrigues(rvec) #convert rot vector from marker space to camera space
    X = half_size * rotMatrix[:,0]
    Y = half_size * rotMatrix[:,1]
    c1 = np.add(np.add(-1*X,Y), tvec)   #top left
    c2 = np.add(np.add(X, Y), tvec)    #top right
    c3 = np.add(np.add(X, -1*Y), tvec)     # bottom right
    c4 = np.add(np.add(-1*X, -1*Y), tvec)    # bottom left
    cornersInCameraWorld = [c1,c2,c3,c4]
    cornersInCameraWorld = np.array(cornersInCameraWorld, dtype=np.float32)
    return cornersInCameraWorld, rotMatrix

I have checked and it is working for me

Manpreet
  • 43
  • 1
  • 6
  • Your answer could be improved with additional supporting information. Please [edit] to add further details, such as citations or documentation, so that others can confirm that your answer is correct. You can find more information on how to write good answers [in the help center](/help/how-to-answer). – Community Jul 06 '22 at 06:16