0

我正在尝试开发 OCR 系统。我正在尝试使用 MSER 从图像中提取字符,然后将字符传递到 CNN 以识别这些字符。这是我的字符提取代码:

import cv2
import numpy as np

# create MSER object
mser = cv2.MSER_create()

# read the image
img = cv2.imread('textArea01.png')

# convert to gray scale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

# store copy of the image
vis = img.copy()

# detect regions in the image
regions,_ = mser.detectRegions(gray)

# find convex hulls of the regions and draw them onto the original image
hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]

cv2.polylines(vis, hulls, 1, (0, 255, 0))

# create mask for the detected region
mask = np.zeros((img.shape[0], img.shape[1], 1), dtype=np.uint8)
mask = cv2.dilate(mask, np.ones((150, 150), np.uint8))

for contour in hulls:

    cv2.drawContours(mask, [contour], -1, (255, 255, 255), -1)

    #this is used to find only text regions, remaining are ignored
    text_only = cv2.bitwise_and(img, img, mask=mask)


cv2.imshow('img', vis)
cv2.waitKey(0)
cv2.imshow('mask', mask)
cv2.waitKey(0)
cv2.imshow('text', text_only)
cv2.waitKey(0)

这对大多数图像都很好,但对于像这样的一些图像: 在此处输入图像描述

外边界也被检测为一个区域,并在掩码中绘制轮廓,这样边界内的所有区域都被检测为文本区域。所以,里面的轮廓没有效果。如何防止这种情况,以便仅检测到文本?检测到船体: 在此处输入图像描述 结果是面罩: 在此处输入图像描述

4

2 回答 2

3

您可以在轮廓区域上设置一个阈值,以便它忽略覆盖图像中某个区域以上的所有形状。

for contour in hulls:
    if cv.contourArea(contour) < ThresholdArea:
        continue

    cv2.drawContours(mask, [contour], -1, (255, 255, 255), -1)        
    #this is used to find only text regions, remaining are ignored
    text_only = cv2.bitwise_and(img, img, mask=mask)
于 2019-12-24T05:57:51.060 回答
3

我使用此代码的结果:

import cv2
import numpy as np

img = cv2.imread("img.png")

# grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow('gray', gray)

# binary
# ret, thresh = cv2.threshold(gray, 250, 255, cv2.THRESH_BINARY_INV)
thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 35, 180)
cv2.imshow('threshold', thresh)

# dilation
kernel = np.ones((1, 1), np.uint8)
img_dilation = cv2.dilate(thresh, kernel, iterations=1)
cv2.imshow('dilated', img_dilation)

# find contours
# cv2.findCountours() function changed from OpenCV3 to OpenCV4: now it have only two parameters instead of 3
cv2MajorVersion = cv2.__version__.split(".")[0]
# check for contours on thresh
if int(cv2MajorVersion) >= 4:
    ctrs, hier = cv2.findContours(img_dilation.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
else:
    im2, ctrs, hier = cv2.findContours(img_dilation.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

# sort contours
sorted_ctrs = sorted(ctrs, key=lambda ctr: cv2.boundingRect(ctr)[0])

for i, ctr in enumerate(sorted_ctrs):
    # Get bounding box
    x, y, w, h = cv2.boundingRect(ctr)

    # Getting ROI
    roi = img[y:y + h, x:x + w]

    # show ROI
    # cv2.imshow('segment no:'+str(i),roi)
    cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 1)

    # if you want to save the letters without green bounding box, comment the line above
    if w > 5:
        cv2.imwrite('C:\\Users\\PC\\Desktop\\output\\{}.png'.format(i), roi)

cv2.imshow('marked areas', img)

cv2.waitKey(0)

结果

于 2019-12-26T21:42:33.577 回答