1

I am attempting to create a main function that will use the program below with a webcam to display what cards are being detected on the screen currently. what would be the best way to do this? any help is appreciated.

 """

Usage: 

  ./card_img.py filename, num_cards, training_image_filename, training_labels_filename, num_training_cards

Example:
  ./card_img.py test.JPG 4 train.png train.tsv 56


"""

import sys
import numpy as np
sys.path.insert(0, "/usr/local/lib/python2.7/site-packages/") 
import cv2


###############################################################################
# Utility code
###############################################################################

def rectify(h):
#//changes the array of the picture\\
  h = h.reshape((4,2))
  hnew = np.zeros((4,2),dtype = np.float32)

  add = h.sum(1)
  hnew[0] = h[np.argmin(add)]
  hnew[2] = h[np.argmax(add)]

  diff = np.diff(h,axis = 1)
  hnew[1] = h[np.argmin(diff)]
  hnew[3] = h[np.argmax(diff)]

  return hnew

###############################################################################
# Image Matching  
###############################################################################
def preprocess(img):
#Example: test_img = cv2.imread("test.JPG")
#         preprocess(test_img)
  gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
  blur = cv2.GaussianBlur(gray,(5,5),2 )
  thresh = cv2.adaptiveThreshold(blur,255,1,1,11,1)
  return thresh

def imgdiff(img1,img2):
  img1 = cv2.GaussianBlur(img1,(5,5),5)
  img2 = cv2.GaussianBlur(img2,(5,5),5)    
  diff = cv2.absdiff(img1,img2)  
  diff = cv2.GaussianBlur(diff,(5,5),5)    
  flag, diff = cv2.threshold(diff, 200, 255, cv2.THRESH_BINARY) 
  return np.sum(diff)  

def find_closest_card(training,img):
  features = preprocess(img)
  return sorted(training.values(), key=lambda x:imgdiff(x[1],features))[0][0]


###############################################################################
# Card Extraction
###############################################################################  
def getCards(im, numcards=4):
#Example: test = cv2.imread("test.JPG") //also works with 1 arg(file)\\
#         getCards(test,2)
#or:      getCards("test.jpg",2) 
  gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
  blur = cv2.GaussianBlur(gray,(1,1),1000)
  flag, thresh = cv2.threshold(blur, 120, 255, cv2.THRESH_BINARY) 

  contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)

  contours = sorted(contours, key=cv2.contourArea,reverse=True)[:numcards]  

  for card in contours:
    peri = cv2.arcLength(card,True)
    approx = rectify(cv2.approxPolyDP(card,0.02*peri,True))

    # box = np.int0(approx)
    # cv2.drawContours(im,[box],0,(255,255,0),6)
    # imx = cv2.resize(im,(1000,600))
    # cv2.imshow('a',imx)      

    h = np.array([ [0,0],[449,0],[449,449],[0,449] ],np.float32)

    transform = cv2.getPerspectiveTransform(approx,h)
    warp = cv2.warpPerspective(im,transform,(450,450))

    yield warp


def get_training(training_labels_filename,training_image_filename,num_training_cards,avoid_cards=None):
#Example: get_training("train.tsv","train.png",4,None)
  training = {}

  labels = {}
  for line in file(training_labels_filename): 
    key, num, suit = line.strip().split()
    labels[int(key)] = (num,suit)

  print "Training"

  im = cv2.imread(training_image_filename)
  for i,c in enumerate(getCards(im,num_training_cards)):
    if avoid_cards is None or (labels[i][0] not in avoid_cards[0] and labels[i][1] not in avoid_cards[1]):
      training[i] = (labels[i], preprocess(c))

  print "Done training"
  return training


if __name__ == '__main__':
  if len(sys.argv) == 6:
    filename = sys.argv[1]
    num_cards = int(sys.argv[2])
    training_image_filename = sys.argv[3]
    training_labels_filename = sys.argv[4]    
    num_training_cards = int(sys.argv[5])

    training = get_training(training_labels_filename,training_image_filename,num_training_cards)

    im = cv2.imread("test.")

    width = im.shape[0]
    height = im.shape[1]
    if width < height:
      im = cv2.transpose(im)
      im = cv2.flip(im,1)

    # Debug: uncomment to see registered images
    #for i,c in enumerate(getCards(im,num_cards)):
    #   card = find_closest_card(training,c,)
    #   cv2.imshow(str(card),c)
    #   cv2.waitKey(0) 

    cards = [find_closest_card(training,c) for c in getCards(im,num_cards)]
    print cards

  else:
    print __doc__

1 Answers1

0

Build open cv with nonfree modules and use SURF Features. They are awesome and can detect and seperate almost everything:

http://docs.opencv.org/master/doc/py_tutorials/py_feature2d/py_surf_intro/py_surf_intro.html

Here are some Code examples (sorry just C++ not python): http://docs.opencv.org/doc/user_guide/ug_features2d.html

This is the important code:

// detecting keypoints
SurfFeatureDetector detector(400);
vector<KeyPoint> keypoints1, keypoints2;
detector.detect(img1, keypoints1);
detector.detect(img2, keypoints2);

// computing descriptors
SurfDescriptorExtractor extractor;
Mat descriptors1, descriptors2;
extractor.compute(img1, keypoints1, descriptors1);
extractor.compute(img2, keypoints2, descriptors2);

// matching descriptors
BruteForceMatcher<L2<float> > matcher;
vector<DMatch> matches;
matcher.match(descriptors1, descriptors2, matches);

If speed matter you can use cv::FlannBasedMatcher matcher;

std::vector< cv::DMatch > matches;
matcher.match( descriptors_this, descriptors_givenImage, matches );

Where FLANN is just a very cool, very fast framework which uses a lot of methods like tree, bvhs and other stuff to speed things up a lot.

Here are some answers if you need help with installing surf: OpenCV - undefined reference: SurfFeatureDetector and BruteForceMatcher

Community
  • 1
  • 1
stupidstudent
  • 678
  • 4
  • 13