4

I'm in a struggle with a project that takes an image of a pretty clear font from say a label for example reads the "text region" and outputs it as a string using OCR tesseract for instance.

Now I've made quite some progress with the thing as I added varios global filters to get to a quite clear result but I'm struggling with finding method of filtering just the text out of there and then you have to think about rotating it to be as horizontal as possible and then after that the easy part should be to crop it.

May I have any leads to how to do that not using traning data and over complicating the system sins I only use a rasdpberry pi to do the computing?

Thanks for helping here's what I've came up with so far:

Original Image(Captured from PiCamera):

https://i.imgur.com/vm5wb1Z.jpg

Adaptive thresh after shadow removal:

[https://i.stack.imgur.com/OadKs.jpg[2]

Glocad tresh after shadow removal:

https://i.stack.imgur.com/5KAhz.jpg

Here's the code:

# import the necessary packages
from PIL import Image
import pytesseract
import argparse
import cv2
import os

import picamera
import time

import numpy as np
#preprocess = "tresh"

#Remaining textcorping and rotating:
import math
import json
from collections import defaultdict
from scipy.ndimage.filters import rank_filter

def dilate(ary, N, iterations): 
    """Dilate using an NxN '+' sign shape. ary is np.uint8."""
    kernel = np.zeros((N,N), dtype=np.uint8)
    kernel[(N-1)/2,:] = 1
    dilated_image = cv2.dilate(ary / 255, kernel, iterations=iterations)

    kernel = np.zeros((N,N), dtype=np.uint8)
    kernel[:,(N-1)/2] = 1
    dilated_image = cv2.dilate(dilated_image, kernel, iterations=iterations)
    return dilated_image


def props_for_contours(contours, ary):
    """Calculate bounding box & the number of set pixels for each contour."""
    c_info = []
    for c in contours:
        x,y,w,h = cv2.boundingRect(c)
        c_im = np.zeros(ary.shape)
        cv2.drawContours(c_im, [c], 0, 255, -1)
        c_info.append({
            'x1': x,
            'y1': y,
            'x2': x + w - 1,
            'y2': y + h - 1,
            'sum': np.sum(ary * (c_im > 0))/255
        })
    return c_info


def union_crops(crop1, crop2):
    """Union two (x1, y1, x2, y2) rects."""
    x11, y11, x21, y21 = crop1
    x12, y12, x22, y22 = crop2
    return min(x11, x12), min(y11, y12), max(x21, x22), max(y21, y22)


def intersect_crops(crop1, crop2):
    x11, y11, x21, y21 = crop1
    x12, y12, x22, y22 = crop2
    return max(x11, x12), max(y11, y12), min(x21, x22), min(y21, y22)


def crop_area(crop):
    x1, y1, x2, y2 = crop
    return max(0, x2 - x1) * max(0, y2 - y1)


def find_border_components(contours, ary):
    borders = []
    area = ary.shape[0] * ary.shape[1]
    for i, c in enumerate(contours):
        x,y,w,h = cv2.boundingRect(c)
        if w * h > 0.5 * area:
            borders.append((i, x, y, x + w - 1, y + h - 1))
    return borders


def angle_from_right(deg):
    return min(deg % 90, 90 - (deg % 90))


def remove_border(contour, ary):
    """Remove everything outside a border contour."""
    # Use a rotated rectangle (should be a good approximation of a border).
    # If it's far from a right angle, it's probably two sides of a border and
    # we should use the bounding box instead.
    c_im = np.zeros(ary.shape)
    r = cv2.minAreaRect(contour)
    degs = r[2]
    if angle_from_right(degs) <= 10.0:
        box = cv2.cv.BoxPoints(r)
        box = np.int0(box)
        cv2.drawContours(c_im, [box], 0, 255, -1)
        cv2.drawContours(c_im, [box], 0, 0, 4)
    else:
        x1, y1, x2, y2 = cv2.boundingRect(contour)
        cv2.rectangle(c_im, (x1, y1), (x2, y2), 255, -1)
        cv2.rectangle(c_im, (x1, y1), (x2, y2), 0, 4)

    return np.minimum(c_im, ary)


def find_components(edges, max_components=16):
    """Dilate the image until there are just a few connected components.

    Returns contours for these components."""
    # Perform increasingly aggressive dilation until there are just a few
    # connected components.
    count = 21
    dilation = 5
    n = 1
    while count > 16:
        n += 1
        dilated_image = dilate(edges, N=3, iterations=n)
        contours, hierarchy = cv2.findContours(dilated_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        count = len(contours)
    #print dilation
    #Image.fromarray(edges).show()
    #Image.fromarray(255 * dilated_image).show()
    return contours


def find_optimal_components_subset(contours, edges):
    """Find a crop which strikes a good balance of coverage/compactness.

    Returns an (x1, y1, x2, y2) tuple.
    """
    c_info = props_for_contours(contours, edges)
    c_info.sort(key=lambda x: -x['sum'])
    total = np.sum(edges) / 255
    area = edges.shape[0] * edges.shape[1]

    c = c_info[0]
    del c_info[0]
    this_crop = c['x1'], c['y1'], c['x2'], c['y2']
    crop = this_crop
    covered_sum = c['sum']

    while covered_sum < total:
        changed = False
        recall = 1.0 * covered_sum / total
        prec = 1 - 1.0 * crop_area(crop) / area
        f1 = 2 * (prec * recall / (prec + recall))
        #print '----'
        for i, c in enumerate(c_info):
            this_crop = c['x1'], c['y1'], c['x2'], c['y2']
            new_crop = union_crops(crop, this_crop)
            new_sum = covered_sum + c['sum']
            new_recall = 1.0 * new_sum / total
            new_prec = 1 - 1.0 * crop_area(new_crop) / area
            new_f1 = 2 * new_prec * new_recall / (new_prec + new_recall)

            # Add this crop if it improves f1 score,
            # _or_ it adds 25% of the remaining pixels for <15% crop expansion.
            # ^^^ very ad-hoc! make this smoother
            remaining_frac = c['sum'] / (total - covered_sum)
            new_area_frac = 1.0 * crop_area(new_crop) / crop_area(crop) - 1
            if new_f1 > f1 or (
                    remaining_frac > 0.25 and new_area_frac < 0.15):
                print '%d %s -> %s / %s (%s), %s -> %s / %s (%s), %s -> %s' % (
                        i, covered_sum, new_sum, total, remaining_frac,
                        crop_area(crop), crop_area(new_crop), area, new_area_frac,
                        f1, new_f1)
                crop = new_crop
                covered_sum = new_sum
                del c_info[i]
                changed = True
                break

        if not changed:
            break

    return crop


def pad_crop(crop, contours, edges, border_contour, pad_px=15):
    """Slightly expand the crop to get full contours.

    This will expand to include any contours it currently intersects, but will
    not expand past a border.
    """
    bx1, by1, bx2, by2 = 0, 0, edges.shape[0], edges.shape[1]
    if border_contour is not None and len(border_contour) > 0:
        c = props_for_contours([border_contour], edges)[0]
        bx1, by1, bx2, by2 = c['x1'] + 5, c['y1'] + 5, c['x2'] - 5, c['y2'] - 5

    def crop_in_border(crop):
        x1, y1, x2, y2 = crop
        x1 = max(x1 - pad_px, bx1)
        y1 = max(y1 - pad_px, by1)
        x2 = min(x2 + pad_px, bx2)
        y2 = min(y2 + pad_px, by2)
        return crop

    crop = crop_in_border(crop)

    c_info = props_for_contours(contours, edges)
    changed = False
    for c in c_info:
        this_crop = c['x1'], c['y1'], c['x2'], c['y2']
        this_area = crop_area(this_crop)
        int_area = crop_area(intersect_crops(crop, this_crop))
        new_crop = crop_in_border(union_crops(crop, this_crop))
        if 0 < int_area < this_area and crop != new_crop:
            print '%s -> %s' % (str(crop), str(new_crop))
            changed = True
            crop = new_crop

    if changed:
        return pad_crop(crop, contours, edges, border_contour, pad_px)
    else:
        return crop


def downscale_image(im, max_dim=2048):
    """Shrink im until its longest dimension is <= max_dim.

    Returns new_image, scale (where scale <= 1).
    """
    a, b = im.size
    if max(a, b) <= max_dim:
        return 1.0, im

    scale = 1.0 * max_dim / max(a, b)
    new_im = im.resize((int(a * scale), int(b * scale)), Image.ANTIALIAS)
    return scale, new_im


def process_image(inputImg):

    opnImg = Image.open(inputImg)
    scale, im = downscale_image(opnImg)

    edges = cv2.Canny(np.asarray(im), 100, 200)

    # TODO: dilate image _before_ finding a border. This is crazy sensitive!
    contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    borders = find_border_components(contours, edges)
    borders.sort(key=lambda (i, x1, y1, x2, y2): (x2 - x1) * (y2 - y1))

    border_contour = None
    if len(borders):
        border_contour = contours[borders[0][0]]
        edges = remove_border(border_contour, edges)

    edges = 255 * (edges > 0).astype(np.uint8)

    # Remove ~1px borders using a rank filter.


    maxed_rows = rank_filter(edges, -4, size=(1, 20))
    maxed_cols = rank_filter(edges, -4, size=(20, 1))

    debordered = np.minimum(np.minimum(edges, maxed_rows), maxed_cols)
    edges = debordered


    contours = find_components(edges)
    if len(contours) == 0:
        print '%s -> (no text!)' % path
        return

    crop = find_optimal_components_subset(contours, edges)
    crop = pad_crop(crop, contours, edges, border_contour)

    crop = [int(x / scale) for x in crop]  # upscale to the original image size.
    #draw = ImageDraw.Draw(im)
    #c_info = props_for_contours(contours, edges)
    #for c in c_info:
    #    this_crop = c['x1'], c['y1'], c['x2'], c['y2']
    #    draw.rectangle(this_crop, outline='blue')
    #draw.rectangle(crop, outline='red')
    #im.save(out_path)
    #draw.text((50, 50), path, fill='red')
    #orig_im.save(out_path)
    #im.show()
    text_im = opnImg.crop(crop)
    text_im.save('Cropted_and_rotated_image.jpg')
    return text_im
    '''
    text_im.save(out_path)
    print '%s -> %s' % (path, out_path)
    '''


#Camera capturing stuff:
myCamera = picamera.PiCamera()

myCamera.vflip = True 
myCamera.hflip = True
'''
myCamera.start_preview()
time.sleep(6)
myCamera.stop_preview()
'''
myCamera.capture("Captured_Image.png")
#End capturing persidure


imgAddr = '/home/pi/My_examples/Mechanical_display_converter/Example1.jpg'
#imgAddr = "Captured_Image.png"



# construct the argument parse and parse the arguments
#ap = argparse.ArgumentParser()
'''
ap.add_argument("-i", "--image", required=True,
    help="path to input image to be OCR'd")

ap.add_argument("-p", "--preprocess", type=str, default="thresh",
    help="type of preprocessing to be done")
args = vars(ap.parse_args())
'''

# load the example image and convert it to grayscale
img = cv2.imread(imgAddr)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow('Step1_gray_filter', gray)

'''

# check to see if we should apply thresholding to preprocess the
# image
if args["preprocess"] == "thresh":
    gray = cv2.threshold(gray, 0, 255,
        cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]

# make a check to see if median blurring should be done to remove
# noise
elif args["preprocess"] == "blur":
    gray = cv2.medianBlur(gray, 3)


if preprocess == "thresh":
    gray = cv2.threshold(gray, 150, 255,
        cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]



# make a check to see if median blurring should be done to remove
# noise
elif preprocess == "blur":
    gray = cv2.medianBlur(gray, 3)

'''

rgb_planes = cv2.split(img)

result_planes = []
result_norm_planes = []
for plane in rgb_planes:
    dilated_img = cv2.dilate(plane, np.ones((7,7), np.uint8))
    bg_img = cv2.medianBlur(dilated_img, 21)
    diff_img = 255 - cv2.absdiff(plane, bg_img)
    norm_img = cv2.normalize(diff_img, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)
    result_planes.append(diff_img)
    result_norm_planes.append(norm_img)

result = cv2.merge(result_planes)
result_norm = cv2.merge(result_norm_planes)

cv2.imshow('shadows_out.png', result)
cv2.imshow('shadows_out_norm.png', result_norm)

grayUnShadowedImg = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
cv2.imshow('Shadow_Gray_CVT', grayUnShadowedImg)


ret, threshUnShadowedImg = cv2.threshold(grayUnShadowedImg, 200, 255, cv2.THRESH_BINARY)
cv2.imshow('unShadowed_Thresh_filtering', threshUnShadowedImg)
#v2.imwrite('unShadowed_Thresh_filtering.jpg', threshUnShadowedImg)

#croptedunShadowedImg = process_image('unShadowed_Thresh_filtering.jpg')

adptThreshUnShadowedImg = cv2.adaptiveThreshold(grayUnShadowedImg, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 115, 1)
cv2.imshow('unShadowed_Adaptive_Thresh_filtering', adptThreshUnShadowedImg)

'''
blurFImg = cv2.GaussianBlur(adptThreshUnShadowedImg,(25,25), 0)
ret, f3Img = cv2.threshold(blurFImg,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
cv2.imshow('f3Img', f3Img )
'''
#OCR Stage:
'''
# write the grayscale image to disk as a temporary file so we can
# apply OCR to it
filename = "{}.png".format(os.getpid())
cv2.imwrite(filename, threshImg)

# load the image as a PIL/Pillow image, apply OCR, and then delete
# the temporary file
text = pytesseract.image_to_string(Image.open(filename))
os.remove(filename)
print("\n" + text)
'''

cv2.waitKey(0)
cv2.destroyAllWindows() 

Tryed this source out as well but this doesn't seem to work and is not that clear to understand:

https://www.danvk.org/2015/01/07/finding-blocks-of-text-in-an-image-using-python-opencv-and-numpy.html

James
  • 1,928
  • 3
  • 13
  • 30
MikeLemo
  • 521
  • 1
  • 5
  • 11

3 Answers3

7

I have made an example to maybe give you an idea on how to proceede. I made it without your transformations of the image but you could do it with them if you would like.

What I did was to first transform the image to binary with cv2.THRESH_BINARY. Next I made a mask and drew the contours by limiting them with size (cv2.contourArea()) and ratio (got it from cv2.boundingRect()) for threshold. Then I conected all the contours that are near each other using cv2.morphologyEx() and a big kernel size (50x50).

enter image description here

Then I selected the biggest contour (text) and drew a rotated rectangle with cv2.minAreaRect() which got me the rotational angle.

enter image description here

Then I could rotate the image using cv2.getRotationMatrix2D() and cv2.warpAffine() and get a slightly bigger bounding box using the highest X, Y and lowest X,Y values of the rotated rectangle which I used to crop the image.

enter image description here

Then I serched again for contours and removed the noise (little contours) from the image and the result is a text with high contrast.

Final result:

enter image description here

This code is meant only to give an idea or another point of view to the problem and it may not work with other images (if they differ from the original too much) or at least you would have to adjust some parameters of code. Hope it helps. Cheers!

Code:

import cv2
import numpy as np

# Read image and search for contours. 
img = cv2.imread('rotatec.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, threshold = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(threshold,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)

# Create first mask used for rotation.
mask = np.ones(img.shape, np.uint8)*255

# Draw contours on the mask with size and ratio of borders for threshold.
for cnt in contours:
    size = cv2.contourArea(cnt)
    x,y,w,h = cv2.boundingRect(cnt)
    if 10000 > size > 500 and w*2.5 > h:
        cv2.drawContours(mask, [cnt], -1, (0,0,0), -1)

# Connect neighbour contours and select the biggest one (text).
kernel = np.ones((50,50),np.uint8)
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
gray_op = cv2.cvtColor(opening, cv2.COLOR_BGR2GRAY)
_, threshold_op = cv2.threshold(gray_op, 150, 255, cv2.THRESH_BINARY_INV)
contours_op, hierarchy_op = cv2.findContours(threshold_op, cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
cnt = max(contours_op, key=cv2.contourArea)

# Create rotated rectangle to get the angle of rotation and the 4 points of the rectangle.
_, _, angle = rect = cv2.minAreaRect(cnt)
(h,w) = img.shape[:2]
(center) = (w//2,h//2)

# Rotate the image.
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(img, M, (int(w),int(h)), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT)

# Create bounding box for rotated text (use old points of rotated rectangle).
box = cv2.boxPoints(rect)
a, b, c, d = box = np.int0(box)
bound =[]
bound.append(a)
bound.append(b)
bound.append(c)
bound.append(d)
bound = np.array(bound)
(x1, y1) = (bound[:,0].min(), bound[:,1].min())
(x2, y2) = (bound[:,0].max(), bound[:,1].max())
cv2.drawContours(img,[box],0,(0,0,255),2)

# Crop the image and create new mask for the final image.
rotated = rotated[y1:y2, x1:x2]
mask_final = np.ones(rotated.shape, np.uint8)*255

# Remove noise from the final image.
gray_r = cv2.cvtColor(rotated, cv2.COLOR_BGR2GRAY)
_, threshold_r = cv2.threshold(gray_r, 150, 255, cv2.THRESH_BINARY_INV)
contours, hierarchy = cv2.findContours(threshold_r,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
for cnt in contours:
    size = cv2.contourArea(cnt)
    if size < 500:
        cv2.drawContours(threshold_r, [cnt], -1, (0,0,0), -1)

# Invert black and white.
final_image = cv2.bitwise_not(threshold_r)

# Display results.
cv2.imshow('final', final_image)
cv2.imshow('rotated', rotated)

EDIT:

For text recognition I recomend you see this post from SO Simple Digit Recognition OCR in OpenCV-Python.

The result with the code from mentioned post:

enter image description here

enter image description here

EDIT:

This is my code implemented with the slightly modified code from the mentioned post. All steps are written in the comments. You should save the script and the training image to the same directory. This is my training image:

enter image description here

Code:

import cv2
import numpy as np

# Read image and search for contours. 
img = cv2.imread('rotatec.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, threshold = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(threshold,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)

# Create first mask used for rotation.
mask = np.ones(img.shape, np.uint8)*255

# Draw contours on the mask with size and ratio of borders for threshold.
for cnt in contours:
    size = cv2.contourArea(cnt)
    x,y,w,h = cv2.boundingRect(cnt)
    if 10000 > size > 500 and w*2.5 > h:
        cv2.drawContours(mask, [cnt], -1, (0,0,0), -1)

# Connect neighbour contours and select the biggest one (text).
kernel = np.ones((50,50),np.uint8)
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
gray_op = cv2.cvtColor(opening, cv2.COLOR_BGR2GRAY)
_, threshold_op = cv2.threshold(gray_op, 150, 255, cv2.THRESH_BINARY_INV)
contours_op, hierarchy_op = cv2.findContours(threshold_op, cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
cnt = max(contours_op, key=cv2.contourArea)

# Create rotated rectangle to get the angle of rotation and the 4 points of the rectangle.
_, _, angle = rect = cv2.minAreaRect(cnt)
(h,w) = img.shape[:2]
(center) = (w//2,h//2)

# Rotate the image.
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(img, M, (int(w),int(h)), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT)

# Create bounding box for rotated text (use old points of rotated rectangle).
box = cv2.boxPoints(rect)
a, b, c, d = box = np.int0(box)
bound =[]
bound.append(a)
bound.append(b)
bound.append(c)
bound.append(d)
bound = np.array(bound)
(x1, y1) = (bound[:,0].min(), bound[:,1].min())
(x2, y2) = (bound[:,0].max(), bound[:,1].max())
cv2.drawContours(img,[box],0,(0,0,255),2)

# Crop the image and create new mask for the final image.
rotated = rotated[y1:y2, x1-10:x2]
mask_final = np.ones(rotated.shape, np.uint8)*255

# Remove noise from the final image.
gray_r = cv2.cvtColor(rotated, cv2.COLOR_BGR2GRAY)
_, threshold_r = cv2.threshold(gray_r, 150, 255, cv2.THRESH_BINARY_INV)
contours, hierarchy = cv2.findContours(threshold_r,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
for cnt in contours:
    size = cv2.contourArea(cnt)
    if size < 500:
        cv2.drawContours(threshold_r, [cnt], -1, (0,0,0), -1)

# Invert black and white.
final_image = cv2.bitwise_not(threshold_r)

# Display results.
cv2.imwrite('rotated12.png', final_image)

# Import module for finding path to database.
from pathlib import Path

# This code executes once amd writes two files.
# If file exists it skips this step, else it runs again.
file = Path("generalresponses.data")
if file.is_file() == False:

    # Reading the training image
    im = cv2.imread('pitrain1.png')
    im3 = im.copy()
    gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
    blur = cv2.GaussianBlur(gray,(5,5),0)
    thresh = cv2.adaptiveThreshold(blur,255,1,1,11,2)

    # Finding contour

    _,contours,hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)

    # Creates array and list for appending data
    samples =  np.empty((0,100))
    responses = []

    # Value serving to increment the "automatic" learning
    i = 0

    # Iterating through contours and appending the array and list with "learned" values
    for cnt in contours:
        i+=1
        [x,y,w,h] = cv2.boundingRect(cnt)
        cv2.rectangle(im,(x,y),(x+w,y+h),(0,0,255),2)
        roi = thresh[y:y+h,x:x+w] # Croping ROI to bounding rectangle
        roismall = cv2.resize(roi,(10,10)) # Resizing ROI to smaller image
        cv2.imshow('norm',im)
        
        # Appending values based on the pitrain1.png image
        if i < 36:
            responses.append(int(45))
        elif 35 < i < 80:
            responses.append(int(48))
        elif 79 < i < 125:
            responses.append(int(57))
        elif 124 < i < 160:
            responses.append(int(56))
        elif 159 < i < 205:
            responses.append(int(55))
        elif 204 < i < 250:
            responses.append(int(54))
        elif 249 < i < 295:
            responses.append(int(53))
        elif 294 < i < 340:
            responses.append(int(52))
        elif 339 < i < 385:
            responses.append(int(51))
        elif 384 < i < 430:
            responses.append(int(50))
        elif 429 < i < 485:
            responses.append(int(49))
        else:
            break
        sample = roismall.reshape((1,100))
        samples = np.append(samples,sample,0)

    # Reshaping and saving database
    responses = np.array(responses)
    responses = responses.reshape((responses.size,1))
    print('end')
    np.savetxt('generalsamples.data',samples)
    np.savetxt('generalresponses.data',responses, fmt='%s')

################### Recognition ########################

# Dictionary for numbers and characters (in this sample code the only
# character is " - ")
number = {
48 : "0",
53 : "5",
52 : "4",
50 : "2",
45 : "-",
55 : "7",
51 : "3",
57 : "9",
56 : "8",
54 : "6",
49 : "1"
}


#######   training part    ############### 
samples = np.loadtxt('generalsamples.data',np.float32)
responses = np.loadtxt('generalresponses.data',np.float32)
responses = responses.reshape((responses.size,1))

model = cv2.ml.KNearest_create()
model.train(samples,cv2.ml.ROW_SAMPLE,responses)

############################# testing part  #########################

im = cv2.imread('rotated12.png')
out = np.zeros(im.shape,np.uint8)
gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
thresh = cv2.adaptiveThreshold(gray,255,1,1,11,2)

contours,hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)

for cnt in contours:
        [x,y,w,h] = cv2.boundingRect(cnt)
        cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),2)
        roi = thresh[y:y+h,x:x+w]
        roismall = cv2.resize(roi,(10,10))
        roismall = roismall.reshape((1,100))
        roismall = np.float32(roismall)
        retval, results, neigh_resp, dists = model.findNearest(roismall,k=5)
        string = int((results[0][0]))
        string2 = number.get(string)
        print(string2)
        cv2.putText(out,str(string2),(x,y+h),0,1,(0,255,0))

cv2.imshow('im',im)
cv2.imshow('out',out)
cv2.waitKey(0)
cv2.destroyAllWindows()

Result:

enter image description here

enter image description here

kavko
  • 2,751
  • 13
  • 27
  • Hello, Thanks for the example realy. But trying to compile it on my windows machine using pycharm(assuming is uses the python3 ) the example seem to work after setting up the right package (But I want to try it up using my raspberry pi machine sins the tesseract seems to act wierd on the windows one not showing the text as string)but when I compile it (in python 2.7 probably) it shows me varius errors I try to get around such as: File "ImgToString_example.py", line 45, in box = cv2.boxPoints(rect) AttributeError: 'module' object has no attribute 'boxPoints' and other stuff – MikeLemo Aug 07 '18 at 23:31
  • I assume it has something to do with the python 3 but I'm realy struggling setting it up on that machine not realy understanding what older packages support it and actually accessing the folders to paste the modules from 2.7 site packages to python 3 module path folders Also trying changing it to box = cv2.BoxPoints(rect) doesn't work – MikeLemo Aug 07 '18 at 23:36
  • As I looked at your posted code the problem was that you have opencv 2.4 installed (example is written in opencv 3)...hence the difference in code - contours returning only two vales, etc. That being said I don't know why there is a difference...the code works fine on my PC. My advise is to visualize more steps of the code (with "cv2.imshow()" ) - the first threshold, opening, first mask... and see where something isn't as it should be. I am betting though that the problem lies in the "# Remove noise from the final image". Try visualizing before and after this step. Hope it helps. Cheers! – kavko Aug 08 '18 at 05:17
  • Anyways I found out tesseract works just fine with the windows machine on other pictures but trying to tesseract final image just doesn't work for some reason despite how clean it is for the software. Any leads to why is that? Here's the tesseract code: im = Image.open('FinalImg.jpg') txtStr = pytesseract.image_to_string(Image.open('FinalImg.jpg')) print("OCR output:") print(txtStr) doesn't print that txtStr value... – MikeLemo Aug 08 '18 at 14:12
  • I haven't worked with tesseract yet so I am sorry I can't be of help here. If you can't work it out you should take a look at the link I added in the bottom...I tried the code with your image (after I runned it with my code ofcorse) and every number was correctly recognized as you can see in the bottom image. – kavko Aug 08 '18 at 14:25
  • Yes this seems clean and accurate but from what I've read there it's also size dependent and not just pattern depended also I didn't understand there how that "training" works and not sure how universal it is sins I want it to read several types of signs. – MikeLemo Aug 08 '18 at 16:53
  • I have added the implemented code to make an "automated" training. You should check it out maybe it will become more clear on how it works. Steps are written in the comments. Note that this is written for OpenCV 3. Cheers! – kavko Aug 08 '18 at 20:15
  • Some of this code is outdated, you should definitely try and keep it up to date because it's useful. Just not at the moment – Nicolas Gervais Feb 11 '20 at 16:36
  • @Nicolas Gervais Thanks for your feedback. Will try to take a look at it as soon as I can. Cheers! – kavko Mar 12 '20 at 05:18
  • @NicolasGervais The code has been updated and should work with newer versions of OpenCV – kavko Dec 18 '20 at 09:56
0

Sorry for begin a complete moron in it, I'm realy trying to learn as much as I can about coding,everything that goes around the computer and openCV with the very little time I have But here's the edited code I've managed to get partly working:

from PIL import Image
import pytesseract

import os

import picamera
import time

import cv2
import numpy as np


# Read image and search for contours. 
img = cv2.imread('Example1.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, threshold = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(threshold,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE) #EDITED

# Create first mask used for rotation.
mask = np.ones(img.shape, np.uint8)*255

# Draw contours on the mask with size and ratio of borders for threshold.
for cnt in contours:
    size = cv2.contourArea(cnt)
    x,y,w,h = cv2.boundingRect(cnt)
    if 10000 > size > 500 and w*2.5 > h:
        cv2.drawContours(mask, [cnt], -1, (0,0,0), -1)

# Connect neighbour contours and select the biggest one (text).
kernel = np.ones((50,50),np.uint8)
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
gray_op = cv2.cvtColor(opening, cv2.COLOR_BGR2GRAY)
_, threshold_op = cv2.threshold(gray_op, 150, 255, cv2.THRESH_BINARY_INV)
contours_op, hierarchy_op = cv2.findContours(threshold_op, cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
cnt = max(contours_op, key=cv2.contourArea)

# Create rotated rectangle to get the angle of rotation and the 4 points of the rectangle.
_, _, angle = rect = cv2.minAreaRect(cnt)
(h,w) = img.shape[:2]
(center) = (w//2,h//2)

# Rotate the image.
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(img, M, (int(w),int(h)), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT)

# Create bounding box for rotated text (use old points of rotated rectangle).
box = cv2.cv.BoxPoints(rect) #edited
a, b, c, d = box = np.int0(box)
bound =[]
bound.append(a)
bound.append(b)
bound.append(c)
bound.append(d)
bound = np.array(bound)
(x1, y1) = (bound[:,0].min(), bound[:,1].min())
(x2, y2) = (bound[:,0].max(), bound[:,1].max())
cv2.drawContours(img,[box],0,(0,0,255),2)

# Crop the image and create new mask for the final image.
rotated = rotated[y1:y2, x1:x2]
mask_final = np.ones(rotated.shape, np.uint8)*255

# Remove noise from the final image.
gray_r = cv2.cvtColor(rotated, cv2.COLOR_BGR2GRAY)
_, threshold_r = cv2.threshold(gray_r, 150, 255, cv2.THRESH_BINARY_INV)
contours, hierarchy = cv2.findContours(threshold_r,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
for cnt in contours:
    size = cv2.contourArea(cnt)
    if size < 500:
        cv2.drawContours(threshold_r, [cnt], -1, (0,0,0), -1)

# Invert black and white.
final_image = cv2.bitwise_not(threshold_r)

# Display results.
cv2.imshow('final', final_image)

cv2.imshow('rotated', rotated)


#OCR Stage:

# write the grayscale image to disk as a temporary file so we can
# apply OCR to it
filename = "{}.png".format(os.getpid())
cv2.imwrite('Final_proc.jpg', final_image)

# load the image as a PIL/Pillow image, apply OCR, and then delete
# the temporary file
text = pytesseract.image_to_string(Image.open('Final_proc.jpg'))
os.remove('Final_proc.jpg')
print("\n" + text)


cv2.waitKey(0)
cv2.destroyAllWindows()

When compiling it now it gives me this output: [img]https://i.stack.imgur.com/txG9D.jpg

which is a little different from what you showed and compiled on the windows machine but still super close.

anyidea what happened? just after that this should be realy easy to dissect the code and learn it easily.

Again thank you very much for your time! :D

MikeLemo
  • 521
  • 1
  • 5
  • 11
0

So for the python 3 and openCV 3 version of the code in order to make the img work with tesseract you'd need to add an around 20px white boarder to extend the image for somereason (I assume it's because the convolutional matrix scanning effort) according to my other post:

pytesseract struggling to recognize clean black and white pictures with font numbers and 7 seg digits(python)

and here's how you'd add the boarder:

how to add border around an image in opencv python

In one line of code:

outputImage = cv2.copyMakeBorder(
                 inputImage, 
                 topBorderWidth, 
                 bottomBorderWidth, 
                 leftBorderWidth, 
                 rightBorderWidth, 
                 cv2.BORDER_CONSTANT, 
                 value=color of border
              )
MikeLemo
  • 521
  • 1
  • 5
  • 11