|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
|
|
|
|
原图每一行剪出一张图, 对这张图做字符级标注 |
|
|
|
|
|
给 DBNet 官方代码用 |
|
|
|
|
|
将阿里OCR 的识别结果(图片和标注)转换成 icdar2015 格式 (注意:它的文本是含 utf8 bom 的) |
|
|
|
|
|
""" |
|
|
|
|
|
|
|
|
""" |
|
|
|
|
|
icdar2015 文本检测数据集 |
|
|
标注格式: x1,y1,x2,y2,x3,y3,x4,y4,text |
|
|
|
|
|
其中, x1,y1为左上角坐标,x2,y2为右上角坐标,x3,y3为右下角坐标,x4,y4为左下角坐标。 |
|
|
|
|
|
### 表示text难以辨认。 |
|
|
|
|
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import random |
|
|
from pathlib import Path |
|
|
import os |
|
|
import glob |
|
|
import base64 |
|
|
from importlib.resources import path |
|
|
import math |
|
|
import numpy as np |
|
|
import cv2 |
|
|
import json |
|
|
import decimal |
|
|
import datetime |
|
|
from pickletools import uint8 |
|
|
class DecimalEncoder(json.JSONEncoder): |
|
|
def default(self, o): |
|
|
if isinstance(o, decimal.Decimal): |
|
|
return float(o) |
|
|
elif isinstance(o, datetime.datetime): |
|
|
return str(o) |
|
|
super(DecimalEncoder, self).default(o) |
|
|
|
|
|
|
|
|
def save_json(filename, dics): |
|
|
with open(filename, 'w', encoding='utf-8') as fp: |
|
|
json.dump(dics, fp, indent=4, cls=DecimalEncoder, ensure_ascii=False) |
|
|
fp.close() |
|
|
|
|
|
|
|
|
def load_json(filename): |
|
|
with open(filename, encoding='utf-8') as fp: |
|
|
js = json.load(fp) |
|
|
fp.close() |
|
|
return js |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def parse(s): |
|
|
return json.loads(s, strict=False) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def string(d): |
|
|
return json.dumps(d, cls=DecimalEncoder, ensure_ascii=False) |
|
|
|
|
|
|
|
|
def transform(points, M): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ones = np.ones(shape=(len(points), 1)) |
|
|
|
|
|
points_ones = np.hstack([points, ones]) |
|
|
|
|
|
|
|
|
transformed_points = M.dot(points_ones.T).T |
|
|
|
|
|
transformed_points_int = np.round( |
|
|
transformed_points, decimals=0).astype(np.int32) |
|
|
|
|
|
return transformed_points_int |
|
|
|
|
|
|
|
|
def cutPoly(img, pts): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
rect = cv2.boundingRect(pts) |
|
|
x,y,w,h = rect |
|
|
croped = img[y:y+h, x:x+w].copy() |
|
|
|
|
|
|
|
|
pts = pts - pts.min(axis=0) |
|
|
|
|
|
mask = np.zeros(croped.shape[:2], np.uint8) |
|
|
cv2.drawContours(mask, [pts], -1, (255, 255, 255), -1, cv2.LINE_AA) |
|
|
|
|
|
|
|
|
dst = cv2.bitwise_and(croped, croped, mask=mask) |
|
|
|
|
|
|
|
|
bg = np.ones_like(croped, np.uint8)*255 |
|
|
cv2.bitwise_not(bg,bg, mask=mask) |
|
|
dst2 = bg+ dst |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return dst2 |
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
im = './icdar2015_aliocr_char/train_images/img_00000001.jpg' |
|
|
gt = './icdar2015_aliocr_char/train_gts/gt_img_00000001.txt' |
|
|
|
|
|
if os.path.exists(gt): |
|
|
|
|
|
items = [] |
|
|
reader = open(gt, 'r', encoding='utf-8-sig').readlines() |
|
|
for line in reader: |
|
|
item = {} |
|
|
parts = line.strip().split(',') |
|
|
label = parts[-1] |
|
|
if 'TD' in gt and label == '1': |
|
|
label = '###' |
|
|
line = [i.strip('\ufeff').strip('\xef\xbb\xbf') for i in parts] |
|
|
if 'icdar' in gt: |
|
|
poly = np.array(list(map(float, line[:8]))).reshape( |
|
|
(-1, 2)).tolist() |
|
|
else: |
|
|
num_points = math.floor((len(line) - 1) / 2) * 2 |
|
|
poly = np.array(list(map(float, line[:num_points]))).reshape( |
|
|
(-1, 2)).tolist() |
|
|
item['poly'] = poly |
|
|
item['text'] = label |
|
|
|
|
|
item['points'] = poly |
|
|
|
|
|
item['ignore'] = True if label == '###' else False |
|
|
items.append(item) |
|
|
|
|
|
img = cv2.imdecode(np.fromfile(im, dtype=np.uint8), -1) |
|
|
|
|
|
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) |
|
|
|
|
|
for i in range(len(items)): |
|
|
poly = items[i]['poly'] |
|
|
poly = np.array(poly) |
|
|
poly = poly.astype(np.int32) |
|
|
|
|
|
|
|
|
|
|
|
b = random.randint(0, 255) |
|
|
g = random.randint(0, 255) |
|
|
r = random.randint(0, 255) |
|
|
|
|
|
|
|
|
cv2.polylines(img, [poly], isClosed=True, |
|
|
color=(b, g, r), thickness=1) |
|
|
|
|
|
cv2.imwrite("poly.jpg", img) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
out_dir = 'icdar2015_aliocr_char' |
|
|
if os.path.exists(out_dir): |
|
|
import shutil |
|
|
shutil.rmtree(out_dir) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
dir_json = './data/json' |
|
|
dir_img = './data/img' |
|
|
|
|
|
train_list = [] |
|
|
train_list_path = os.path.join(out_dir, 'train_list.txt') |
|
|
|
|
|
test_list = [] |
|
|
test_list_path = os.path.join(out_dir, 'test_list.txt') |
|
|
|
|
|
g_count = 1 |
|
|
count = 1 |
|
|
|
|
|
json_paths = glob.glob('{}/*.json'.format(dir_json), recursive=True) |
|
|
|
|
|
for json_path in json_paths: |
|
|
|
|
|
base = Path(json_path).stem |
|
|
|
|
|
img_train_path = os.path.join(dir_img, '{}.txt'.format(base)) |
|
|
|
|
|
if not os.path.exists(img_train_path): |
|
|
continue |
|
|
|
|
|
jsn = load_json(json_path) |
|
|
|
|
|
with open(img_train_path, "r", encoding="utf-8") as fp: |
|
|
imgdata = fp.read() |
|
|
imgdata = base64.b64decode(imgdata) |
|
|
imgdata = np.frombuffer(imgdata, np.uint8) |
|
|
img = cv2.imdecode(imgdata, cv2.IMREAD_UNCHANGED) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if len(img.shape) != 3: |
|
|
img_color = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) |
|
|
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) |
|
|
|
|
|
else: |
|
|
img_color = img.copy() |
|
|
|
|
|
img_color_origin = img_color.copy() |
|
|
img_color_origin2 = img_color.copy() |
|
|
|
|
|
|
|
|
wordsInfo = jsn['prism_wordsInfo'] |
|
|
for j in range(len(wordsInfo)): |
|
|
jo = wordsInfo[j] |
|
|
word = jo["word"] |
|
|
charInfo = jo["charInfo"] |
|
|
|
|
|
angle = jo['angle'] |
|
|
|
|
|
img_color = img_color_origin.copy() |
|
|
|
|
|
""" |
|
|
x y 宽高全部不靠谱, pos 里是对的 |
|
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pos = jo["pos"] |
|
|
x = int(pos[0]["x"]) |
|
|
y = int(pos[0]["y"]) |
|
|
|
|
|
x2 = int(pos[2]["x"]) |
|
|
y2 = int(pos[2]["y"]) |
|
|
|
|
|
lu = [pos[0]['x'], pos[0]['y']] |
|
|
ru = [pos[1]['x'], pos[1]['y']] |
|
|
rd = [pos[2]['x'], pos[2]['y']] |
|
|
ld = [pos[3]['x'], pos[3]['y']] |
|
|
|
|
|
min_x = min(lu[0], ld[0]) |
|
|
max_x = max(ru[0], rd[0]) |
|
|
|
|
|
min_y = min(lu[1], ru[1]) |
|
|
max_y = max(rd[1], ld[1]) |
|
|
|
|
|
rows, cols = img.shape[:2] |
|
|
if max_y >= rows: |
|
|
max_y = rows - 1 |
|
|
if max_x >= cols: |
|
|
max_x = cols - 1 |
|
|
|
|
|
crop = img[min_y:max_y+1, min_x:max_x+1] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
is_train_img = random.choices([0, 1], weights=[0.15, 0.85])[0] |
|
|
|
|
|
|
|
|
img_name = "img_{:08d}.jpg".format(g_count) |
|
|
gt_name = "gt_img_{:08d}.txt".format(g_count) |
|
|
|
|
|
|
|
|
gt_txt_list = [] |
|
|
|
|
|
img_train_path = os.path.join(out_dir, 'train_images', img_name) |
|
|
img_train_gt_path = os.path.join(out_dir, 'train_gts', gt_name) |
|
|
img_test_path = os.path.join(out_dir, 'test_images', img_name) |
|
|
img_test_gt_path = os.path.join(out_dir, 'test_gts', gt_name) |
|
|
|
|
|
dir1 = os.path.dirname(img_train_path) |
|
|
dir2 = os.path.dirname(img_train_gt_path) |
|
|
dir3 = os.path.dirname(img_test_path) |
|
|
dir4 = os.path.dirname(img_test_gt_path) |
|
|
|
|
|
if not os.path.exists(dir1): |
|
|
os.makedirs(dir1) |
|
|
if not os.path.exists(dir2): |
|
|
os.makedirs(dir2) |
|
|
if not os.path.exists(dir3): |
|
|
os.makedirs(dir3) |
|
|
if not os.path.exists(dir4): |
|
|
os.makedirs(dir4) |
|
|
|
|
|
if is_train_img: |
|
|
train_list.append(img_name) |
|
|
cv2.imwrite(img_train_path, crop) |
|
|
else: |
|
|
test_list.append(img_name) |
|
|
cv2.imwrite(img_test_path, crop) |
|
|
|
|
|
|
|
|
for info in charInfo: |
|
|
wd = info["word"] |
|
|
wd_x = info["x"] |
|
|
wd_y = info["y"] |
|
|
wd_w = info["w"] |
|
|
wd_h = info["h"] |
|
|
|
|
|
wd_crop = img[wd_y:wd_y+wd_h, wd_x:wd_x+wd_w] |
|
|
|
|
|
wd_x_local = wd_x - min_x |
|
|
wd_y_local = wd_y - min_y |
|
|
|
|
|
wd_crop2 = crop[wd_y_local:wd_y_local+wd_h, wd_x_local:wd_x_local+wd_w] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
lu_wd = [wd_x_local, wd_y_local] |
|
|
ru_wd = [wd_x_local+wd_w, wd_y_local] |
|
|
rd_wd = [wd_x_local+wd_w, wd_y_local+wd_h] |
|
|
ld_wd = [wd_x_local, wd_y_local+wd_h] |
|
|
|
|
|
|
|
|
gt_txt_list.append( "{},{},{},{},{},{},{},{},{}".format(lu_wd[0], lu_wd[1], ru_wd[0], ru_wd[1], rd_wd[0], rd_wd[1], ld_wd[0], ld_wd[1], wd) ) |
|
|
|
|
|
|
|
|
gt_txt = "\n".join(gt_txt_list) |
|
|
|
|
|
if is_train_img: |
|
|
with open(img_train_gt_path, 'w', encoding='utf-8') as f: |
|
|
f.write(gt_txt) |
|
|
else: |
|
|
with open(img_test_gt_path, 'w', encoding='utf-8') as f: |
|
|
f.write(gt_txt) |
|
|
|
|
|
|
|
|
g_count += 1 |
|
|
|
|
|
|
|
|
print(f'### one task one. {count} / {len(json_paths)}') |
|
|
|
|
|
count += 1 |
|
|
|
|
|
train_list_txt = "\n".join(train_list) |
|
|
test_list_txt = "\n".join(test_list) |
|
|
|
|
|
with open(os.path.join(out_dir, "train_list.txt"), 'w', encoding='utf-8') as f: |
|
|
f.write(train_list_txt) |
|
|
|
|
|
with open(os.path.join(out_dir, "test_list.txt"), 'w', encoding='utf-8') as f: |
|
|
f.write(test_list_txt) |
|
|
|
|
|
print('### all task done.') |
|
|
|
|
|
|
|
|
|