fasdfsa commited on
Commit
2da3364
·
1 Parent(s): 44989e3

icdar2015_aliocr_char done

Browse files
Files changed (3) hide show
  1. .gitignore +1 -0
  2. aliocr_IC15_char_convert.py +385 -0
  3. readme.txt +0 -29
.gitignore CHANGED
@@ -1,2 +1,3 @@
1
  icdar2015_aliocr/
 
2
  poly.jpg
 
1
  icdar2015_aliocr/
2
+ icdar2015_aliocr_char/
3
  poly.jpg
aliocr_IC15_char_convert.py ADDED
@@ -0,0 +1,385 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # pip install numpy==1.26.4 opencv-python==4.6.0.66
3
+
4
+ # see doc\lang\programming\pytorch\文本检测\DBNET 论文代码都有
5
+
6
+ """
7
+
8
+ 原图每一行剪出一张图, 对这张图做字符级标注
9
+
10
+ 给 DBNet 官方代码用
11
+
12
+ 将阿里OCR 的识别结果(图片和标注)转换成 icdar2015 格式 (注意:它的文本是含 utf8 bom 的)
13
+
14
+ """
15
+
16
+
17
+ """
18
+
19
+ icdar2015 文本检测数据集
20
+ 标注格式: x1,y1,x2,y2,x3,y3,x4,y4,text
21
+
22
+ 其中, x1,y1为左上角坐标,x2,y2为右上角坐标,x3,y3为右下角坐标,x4,y4为左下角坐标。
23
+
24
+ ### 表示text难以辨认。
25
+
26
+ """
27
+
28
+
29
+
30
+
31
+ import random
32
+ from pathlib import Path
33
+ import os
34
+ import glob
35
+ import base64
36
+ from importlib.resources import path
37
+ import math
38
+ import numpy as np
39
+ import cv2
40
+ import json
41
+ import decimal
42
+ import datetime
43
+ from pickletools import uint8
44
+ class DecimalEncoder(json.JSONEncoder):
45
+ def default(self, o):
46
+ if isinstance(o, decimal.Decimal):
47
+ return float(o)
48
+ elif isinstance(o, datetime.datetime):
49
+ return str(o)
50
+ super(DecimalEncoder, self).default(o)
51
+
52
+
53
+ def save_json(filename, dics):
54
+ with open(filename, 'w', encoding='utf-8') as fp:
55
+ json.dump(dics, fp, indent=4, cls=DecimalEncoder, ensure_ascii=False)
56
+ fp.close()
57
+
58
+
59
+ def load_json(filename):
60
+ with open(filename, encoding='utf-8') as fp:
61
+ js = json.load(fp)
62
+ fp.close()
63
+ return js
64
+
65
+ # convert string to json
66
+
67
+
68
+ def parse(s):
69
+ return json.loads(s, strict=False)
70
+
71
+ # convert dict to string
72
+
73
+
74
+ def string(d):
75
+ return json.dumps(d, cls=DecimalEncoder, ensure_ascii=False)
76
+
77
+
78
+ def transform(points, M):
79
+ # points 算出四个点变换后移动到哪里了
80
+ # points = np.array([[word_x, word_y], # 左上
81
+ # [word_x + word_width, word_y], # 右上
82
+ # [word_x + word_width, word_y + word_height], # 右下
83
+ # [word_x, word_y + word_height], # 左下
84
+ # ])
85
+ # add ones
86
+ ones = np.ones(shape=(len(points), 1))
87
+
88
+ points_ones = np.hstack([points, ones])
89
+
90
+ # transform points
91
+ transformed_points = M.dot(points_ones.T).T
92
+
93
+ transformed_points_int = np.round(
94
+ transformed_points, decimals=0).astype(np.int32) # 批量四舍五入
95
+
96
+ return transformed_points_int
97
+
98
+
99
+ def cutPoly(img, pts):
100
+ # img = cv2.imdecode(np.fromfile('./t.png', dtype=np.uint8), -1)
101
+ # pts = np.array([[10,150],[150,100],[300,150],[350,100],[310,20],[35,10]])
102
+
103
+ ## (1) Crop the bounding rect
104
+ rect = cv2.boundingRect(pts)
105
+ x,y,w,h = rect
106
+ croped = img[y:y+h, x:x+w].copy()
107
+
108
+ ## (2) make mask
109
+ pts = pts - pts.min(axis=0)
110
+
111
+ mask = np.zeros(croped.shape[:2], np.uint8)
112
+ cv2.drawContours(mask, [pts], -1, (255, 255, 255), -1, cv2.LINE_AA)
113
+
114
+ ## (3) do bit-op
115
+ dst = cv2.bitwise_and(croped, croped, mask=mask)
116
+
117
+ ## (4) add the white background
118
+ bg = np.ones_like(croped, np.uint8)*255
119
+ cv2.bitwise_not(bg,bg, mask=mask)
120
+ dst2 = bg+ dst
121
+
122
+
123
+ # cv2.imwrite("croped.png", croped)
124
+ # cv2.imwrite("mask.png", mask)
125
+ # cv2.imwrite("dst.png", dst)
126
+ # cv2.imwrite("dst2.png", dst2)
127
+
128
+ return dst2
129
+
130
+
131
+
132
+ if __name__ == "__main__":
133
+
134
+ # 验证原版的文本标记框
135
+ # im = './datasets/icdar2015/train_images/img_1.jpg'
136
+ # gt = './datasets/icdar2015/train_gts/gt_img_1.txt'
137
+
138
+ # 验证自已生成的标记框
139
+ im = './icdar2015_aliocr_char/train_images/img_00000001.jpg'
140
+ gt = './icdar2015_aliocr_char/train_gts/gt_img_00000001.txt'
141
+
142
+ if os.path.exists(gt):
143
+
144
+ items = []
145
+ reader = open(gt, 'r', encoding='utf-8-sig').readlines()
146
+ for line in reader:
147
+ item = {}
148
+ parts = line.strip().split(',')
149
+ label = parts[-1]
150
+ if 'TD' in gt and label == '1':
151
+ label = '###'
152
+ line = [i.strip('\ufeff').strip('\xef\xbb\xbf') for i in parts]
153
+ if 'icdar' in gt:
154
+ poly = np.array(list(map(float, line[:8]))).reshape(
155
+ (-1, 2)).tolist()
156
+ else:
157
+ num_points = math.floor((len(line) - 1) / 2) * 2
158
+ poly = np.array(list(map(float, line[:num_points]))).reshape(
159
+ (-1, 2)).tolist()
160
+ item['poly'] = poly
161
+ item['text'] = label
162
+ # 多边形是用一个个的点表示的,起点连接第二个点,第二个连接第三个 ... 最后一点连接起点,构成一个闭合的区域
163
+ item['points'] = poly
164
+ # 此标记表示文字模糊不可辨认,文本框的标记是不可靠的
165
+ item['ignore'] = True if label == '###' else False
166
+ items.append(item)
167
+
168
+ img = cv2.imdecode(np.fromfile(im, dtype=np.uint8), -1)
169
+ # DBNet 原版代码只能处理彩图,所以统一处理成彩图
170
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
171
+
172
+ for i in range(len(items)):
173
+ poly = items[i]['poly']
174
+ poly = np.array(poly)
175
+ poly = poly.astype(np.int32)
176
+
177
+ # cv2.fillPoly(img, pts=[ poly ], color=(0, 0, 255))
178
+
179
+ b = random.randint(0, 255) # 用来生成[a,b]之间的随意整数,包括两个边界值。
180
+ g = random.randint(0, 255)
181
+ r = random.randint(0, 255)
182
+
183
+ # 只画线,不填充 # 就是画线,从起点连到第二个点 ... 最后一个点连到第一个点
184
+ cv2.polylines(img, [poly], isClosed=True,
185
+ color=(b, g, r), thickness=1)
186
+
187
+ cv2.imwrite("poly.jpg", img)
188
+
189
+ # cv2.imshow("poly", img)
190
+ # cv2.waitKey()
191
+
192
+ # 开始转换
193
+
194
+ out_dir = 'icdar2015_aliocr_char'
195
+ if os.path.exists(out_dir):
196
+ import shutil
197
+ shutil.rmtree(out_dir)
198
+
199
+
200
+ # https://help.aliyun.com/document_detail/294540.html 阿里云ocr结果字段定义
201
+ # prism-wordsInfo 里的 angle 文字块的角度,这个角度只影响width和height,当角度为-90、90、-270、270,width和height的值需要自行互换
202
+
203
+ dir_json = './data/json' # '/yingedu/www/ocr_server/data/json'
204
+ dir_img = './data/img' # '/yingedu/www/ocr_server/data/img'
205
+
206
+ train_list = []
207
+ train_list_path = os.path.join(out_dir, 'train_list.txt')
208
+
209
+ test_list = []
210
+ test_list_path = os.path.join(out_dir, 'test_list.txt')
211
+
212
+ g_count = 1
213
+ count = 1
214
+
215
+ json_paths = glob.glob('{}/*.json'.format(dir_json), recursive=True)
216
+
217
+ for json_path in json_paths:
218
+
219
+ base = Path(json_path).stem
220
+
221
+ img_train_path = os.path.join(dir_img, '{}.txt'.format(base))
222
+
223
+ if not os.path.exists(img_train_path): # 没有相应的图片,可能被删除了
224
+ continue
225
+
226
+ jsn = load_json(json_path)
227
+
228
+ with open(img_train_path, "r", encoding="utf-8") as fp:
229
+ imgdata = fp.read()
230
+ imgdata = base64.b64decode(imgdata)
231
+ imgdata = np.frombuffer(imgdata, np.uint8)
232
+ img = cv2.imdecode(imgdata, cv2.IMREAD_UNCHANGED)
233
+
234
+ # cv2.imshow('img', img)
235
+ # cv2.waitKey(0)
236
+
237
+ if len(img.shape) != 3: # 转彩图
238
+ img_color = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
239
+ img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) # DBNet 原版只能处理彩图,这里转一下
240
+
241
+ else:
242
+ img_color = img.copy()
243
+
244
+ img_color_origin = img_color.copy()
245
+ img_color_origin2 = img_color.copy()
246
+
247
+
248
+ wordsInfo = jsn['prism_wordsInfo']
249
+ for j in range(len(wordsInfo)):
250
+ jo = wordsInfo[j]
251
+ word = jo["word"]
252
+ charInfo = jo["charInfo"]
253
+ # prism-wordsInfo 里的 angle 文字块的角度,这个角度只影响width和height,当角度为-90、90、-270、270,width和height的值需要自行互换
254
+ angle = jo['angle']
255
+
256
+ img_color = img_color_origin.copy()
257
+
258
+ """
259
+ x y 宽高全部不靠谱, pos 里是对的
260
+ """
261
+
262
+
263
+
264
+ # 四个角的位置 # 左上、右上、右下、左下,当NeedRotate为true时,如果最外层的angle不为0,需要按照angle矫正图片后,坐标才准确
265
+ pos = jo["pos"]
266
+ x = int(pos[0]["x"]) # 左上
267
+ y = int(pos[0]["y"])
268
+
269
+ x2 = int(pos[2]["x"]) # 右下
270
+ y2 = int(pos[2]["y"])
271
+
272
+ lu = [pos[0]['x'], pos[0]['y']] # left up 四个角顺时针方向数
273
+ ru = [pos[1]['x'], pos[1]['y']]
274
+ rd = [pos[2]['x'], pos[2]['y']]
275
+ ld = [pos[3]['x'], pos[3]['y']]
276
+
277
+ min_x = min(lu[0], ld[0])
278
+ max_x = max(ru[0], rd[0])
279
+
280
+ min_y = min(lu[1], ru[1])
281
+ max_y = max(rd[1], ld[1])
282
+
283
+ rows, cols = img.shape[:2]
284
+ if max_y >= rows:
285
+ max_y = rows - 1
286
+ if max_x >= cols:
287
+ max_x = cols - 1
288
+
289
+ crop = img[min_y:max_y+1, min_x:max_x+1]
290
+
291
+ # cv2.imshow("crop", crop)
292
+ # cv2.waitKey()
293
+
294
+ is_train_img = random.choices([0, 1], weights=[0.15, 0.85])[0]
295
+ # 85% 的概率是训练图
296
+
297
+ img_name = "img_{:08d}.jpg".format(g_count)
298
+ gt_name = "gt_img_{:08d}.txt".format(g_count)
299
+
300
+
301
+ gt_txt_list = []
302
+
303
+ img_train_path = os.path.join(out_dir, 'train_images', img_name)
304
+ img_train_gt_path = os.path.join(out_dir, 'train_gts', gt_name)
305
+ img_test_path = os.path.join(out_dir, 'test_images', img_name)
306
+ img_test_gt_path = os.path.join(out_dir, 'test_gts', gt_name)
307
+
308
+ dir1 = os.path.dirname(img_train_path)
309
+ dir2 = os.path.dirname(img_train_gt_path)
310
+ dir3 = os.path.dirname(img_test_path)
311
+ dir4 = os.path.dirname(img_test_gt_path)
312
+
313
+ if not os.path.exists(dir1):
314
+ os.makedirs(dir1)
315
+ if not os.path.exists(dir2):
316
+ os.makedirs(dir2)
317
+ if not os.path.exists(dir3):
318
+ os.makedirs(dir3)
319
+ if not os.path.exists(dir4):
320
+ os.makedirs(dir4)
321
+
322
+ if is_train_img:
323
+ train_list.append(img_name)
324
+ cv2.imwrite(img_train_path, crop)
325
+ else:
326
+ test_list.append(img_name)
327
+ cv2.imwrite(img_test_path, crop)
328
+
329
+
330
+ for info in charInfo:
331
+ wd = info["word"]
332
+ wd_x = info["x"]
333
+ wd_y = info["y"]
334
+ wd_w = info["w"]
335
+ wd_h = info["h"]
336
+
337
+ wd_crop = img[wd_y:wd_y+wd_h, wd_x:wd_x+wd_w]
338
+
339
+ wd_x_local = wd_x - min_x
340
+ wd_y_local = wd_y - min_y
341
+
342
+ wd_crop2 = crop[wd_y_local:wd_y_local+wd_h, wd_x_local:wd_x_local+wd_w]
343
+
344
+ # cv2.imshow("wd_crop", wd_crop2)
345
+ # cv2.waitKey()
346
+
347
+
348
+ lu_wd = [wd_x_local, wd_y_local]
349
+ ru_wd = [wd_x_local+wd_w, wd_y_local]
350
+ rd_wd = [wd_x_local+wd_w, wd_y_local+wd_h]
351
+ ld_wd = [wd_x_local, wd_y_local+wd_h]
352
+
353
+ # 生成 icdar2015 格式的人工标记训练数据(用于训练官方DB)
354
+ gt_txt_list.append( "{},{},{},{},{},{},{},{},{}".format(lu_wd[0], lu_wd[1], ru_wd[0], ru_wd[1], rd_wd[0], rd_wd[1], ld_wd[0], ld_wd[1], wd) )
355
+
356
+
357
+ gt_txt = "\n".join(gt_txt_list)
358
+
359
+ if is_train_img:
360
+ with open(img_train_gt_path, 'w', encoding='utf-8') as f:
361
+ f.write(gt_txt)
362
+ else:
363
+ with open(img_test_gt_path, 'w', encoding='utf-8') as f:
364
+ f.write(gt_txt)
365
+
366
+
367
+ g_count += 1
368
+
369
+
370
+ print(f'### one task one. {count} / {len(json_paths)}')
371
+
372
+ count += 1
373
+
374
+ train_list_txt = "\n".join(train_list)
375
+ test_list_txt = "\n".join(test_list)
376
+
377
+ with open(os.path.join(out_dir, "train_list.txt"), 'w', encoding='utf-8') as f:
378
+ f.write(train_list_txt)
379
+
380
+ with open(os.path.join(out_dir, "test_list.txt"), 'w', encoding='utf-8') as f:
381
+ f.write(test_list_txt)
382
+
383
+ print('### all task done.')
384
+
385
+
readme.txt DELETED
@@ -1,29 +0,0 @@
1
-
2
- see doc\lang\programming\pytorch\文本检测\DBNET 论文代码都有
3
-
4
- see 深入理解神经网络:从逻辑回归到CNN.md -> DBNet 可微分二值化
5
-
6
- see https://docs.opencv.org/4.x/d4/d43/tutorial_dnn_text_spotting.html
7
-
8
- - DB_IC15_resnet50.onnx:
9
- url: https://drive.google.com/uc?export=dowload&id=17_ABp79PlFt9yPCxSaarVc_DKTmrSGGf
10
- sha: bef233c28947ef6ec8c663d20a2b326302421fa3
11
- recommended parameter setting: -inputHeight=736, -inputWidth=1280;
12
- description: This model is trained on ICDAR2015, so it can only detect English text instances.
13
-
14
- - DB_IC15_resnet18.onnx:
15
- url: https://drive.google.com/uc?export=dowload&id=1vY_KsDZZZb_svd5RT6pjyI8BS1nPbBSX
16
- sha: 19543ce09b2efd35f49705c235cc46d0e22df30b
17
- recommended parameter setting: -inputHeight=736, -inputWidth=1280;
18
- description: This model is trained on ICDAR2015, so it can only detect English text instances.
19
-
20
- see huggingface/ColorTextEditorV2
21
- /imradv3
22
- /iWeChatOcr
23
-
24
-
25
- // 验证
26
- CUDA_VISIBLE_DEVICES=0 python demo.py experiments/seg_detector/ic15_resnet18_deform_thre.yaml --image_path datasets/icdar2015/test_images/img_97.jpg --resume /root/final --polygon --box_thresh 0.7 --visualize
27
-
28
-
29
- opencv-python==4.6.0.66