# see huggingface/BallonsTranslator/main.py # see huggingface/project/flask_auto_selection.py from modules.textdetector.ctd.inference import TextDetector as CTDModel from modules.ocr.mit48px import Model48pxOCR CTD_ONNX_PATH = 'data/models/comictextdetector.pt.onnx' device = 'cpu' detect_size = 1280 ctd_model = CTDModel(CTD_ONNX_PATH, detect_size=detect_size, device=device) OCR48PXMODEL_PATH = 'data/models/ocr_ar_48px.ckpt' ocr_model = Model48pxOCR(OCR48PXMODEL_PATH, device) import json, os, sys, time, io import os.path as osp from PIL import Image import PIL import cv2 import numpy as np is_debug = True dic_cache = {} from flask import Flask, request, jsonify app = Flask(__name__) import base64 import math, re, uuid def save_json(filename, dics): with open(filename, 'w', encoding='utf-8') as fp: json.dump(dics, fp, indent=4, ensure_ascii=False) fp.close() def load_json(filename): with open(filename, encoding='utf-8') as fp: js = json.load(fp) fp.close() return js def jsonparse(s): return json.loads(s, strict=False) def jsonstring(d): return json.dumps(d, ensure_ascii=False) def show_img(image, target_width=400): # 获取原始图片的宽度和高度 original_height, original_width = image.shape[:2] # 计算缩放比例和目标高度 scale = target_width / original_width target_height = int(original_height * scale) # 等比例缩放图片 resized_image = cv2.resize(image, (target_width, target_height), interpolation=cv2.INTER_AREA) cv2.imshow("green", resized_image) cv2.waitKey(0) return resized_image # see utils\io_utils.py def imread(imgpath, read_type=cv2.IMREAD_COLOR, max_retry_limit=5, retry_interval=0.1): if not osp.exists(imgpath): return None num_tries = 0 while True: try: img = Image.open(imgpath) if read_type == cv2.IMREAD_GRAYSCALE: img = img.convert('L') img = np.array(img) if read_type != cv2.IMREAD_GRAYSCALE: if img.ndim == 3 and img.shape[-1] == 1: img = img[..., :2] if img.ndim == 2: img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) if img.ndim == 3 and img.shape[-1] == 4: if np.all(img[..., -1] == 255): img = np.ascontiguousarray(img[..., :3]) break except PIL.UnidentifiedImageError as e: # IMG I/O thread might not finished yet num_tries += 1 if max_retry_limit is not None and num_tries >= max_retry_limit: return None time.sleep(retry_interval) return img def chunks(lst, n): """Yield successive n-sized chunks from lst.""" for i in range(0, len(lst), n): yield lst[i:i + n] def ocr(img): # All text detectors only support 3 channels input if img.ndim == 3 and img.shape[2] == 4: img = cv2.cvtColor(img, cv2.COLOR_RGBA2RGB) _, mask, blk_list = ctd_model(img) fnt_rsz = 1.0 fnt_max = -1 fnt_min = -1 for blk in blk_list: sz = blk._detected_font_size * fnt_rsz if fnt_max > 0: sz = min(fnt_max, sz) if fnt_min > 0: sz = max(fnt_min, sz) blk.font_size = sz blk._detected_font_size = sz ksize = 2 if ksize > 0: element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2 * ksize + 1, 2 * ksize + 1),(ksize, ksize)) # 创建一个椭圆形的结构元素(kernel),用于后续的形态学操作 # 元素的尺寸 # (ksize, ksize) :椭圆的锚点(中心点) mask = cv2.dilate(mask, element) # 对 mask 图像进行膨胀操作(dilate),使用上面创建的椭圆结构元素。膨胀操作可以让白色区域(通常是前景或目标区域)变大,常用于去除小的黑洞、连接断开的区域等。 for blk in blk_list: blk.det_model = 'ctd' need_save_mask = True detect_counter = 0 detect_counter += 1 for blk in blk_list: blk.text = [] split_textblk = False seg_func = None model_text_height = 48 model_maxwidth = 8100 from utils.textblock import collect_textblock_regions chunk_size = 16 regions, textblk_lst_indices = collect_textblock_regions(img, blk_list, model_text_height, model_maxwidth, split_textblk, seg_func) ocr_model(blk_list, regions, textblk_lst_indices, chunk_size=chunk_size) img_draw = img.copy() results = [] # ui\mainwindow.py for blk in blk_list: texts = blk.text lines = blk.lines results.append( { "texts": texts, "lines":lines } ) for line in blk.lines: img_draw = cv2.rectangle(img_draw, line[0], line[2], (0, 0, 255), 2) jsn = { "width": img.shape[1], "height": img.shape[0], "prism_wordsInfo": [] } for result in results: texts, lines = ( result["texts"], result["lines"]) word = ''.join(texts) pos = [] charInfo = [] min_x = 999 min_y = 999 max_x = -1 max_y = -1 for text, line in zip(texts, lines): lu = line[0] ru = line[1] rd = line[2] ld = line[3] minx = min(lu[0], ld[0]) maxx = max(ru[0], rd[0]) miny = min(lu[1], ru[1]) maxy = max(rd[1], ld[1]) if min_x > minx: min_x = minx if max_x < maxx: max_x = maxx if min_y > miny: min_y = miny if max_y < maxy: max_y = maxy for c in text: charInfo.append( {"word": c, "x":minx , "y":miny, "w":maxx - minx , "h":maxy - miny, "guid": str( uuid.uuid4() ), "isDeleted": 0 } ) pass pos = [ { "x":min_x, "y":min_y }, { "x":max_x, "y":min_y }, { "x":max_x, "y":max_y }, { "x":min_x, "y":max_y } ] jsn["prism_wordsInfo"].append( { "word":word, "x":min_x, "y":min_y, "width":max_x - min_x, "height":max_y - min_y, "pos":pos, "charInfo":charInfo} ) # { # "width": 1200, # "height": 1801, # "prism_wordsInfo": [ # { # "word": "# 简易字", # "prob": 0.6273085474967957, # "x": 593, # "y": 54, # "width": 127, # "height": 25, # "pos": [ # { # "x": 593, # "y": 54 # }, # { # "x": 720, # "y": 54 # }, # { # "x": 720, # "y": 79 # }, # { # "x": 593, # "y": 79 # } # ], # "charInfo": [ # { # "h": 25, # "w": 43, # "word": " ", # "x": 595, # "y": 54, # "guid": "164e9305-3e8e-4467-bd76-1c13ee9b6a53", # "isDeleted": 0 # }, # { # "h": 25, # "w": 36, # "word": "易", # "x": 638, # "y": 54, # "guid": "17319ab0-7dca-4492-b5b3-bfe1d3aee0be", # "isDeleted": 0 # }, # { # "h": 25, # "w": 46, # "word": "字", # "x": 674, # "y": 54, # "guid": "71cdd286-192e-4461-b89f-89b19548e62f", # "isDeleted": 0 # } # ] # }, return jsn, img_draw @app.route('/comicocr', methods=['post']) def comicocr(): global dic_cache # request.json 只能够接受方法为POST、Body为raw,header 内容为 application/json类型的数据 # print(request.json, type(request.json)) img_b64_str = request.json['img'] img_bytes = base64.b64decode(img_b64_str) imgData = np.frombuffer(img_bytes, dtype=np.uint8) img = cv2.imdecode(imgData, -1) # All text detectors only support 3 channels input if img.ndim == 3 and img.shape[2] == 4: img = cv2.cvtColor(img, cv2.COLOR_RGBA2RGB) # cv2.imshow('test', img) # cv2.waitKey() jsn, img_draw = ocr(img) return jsonify(jsn) def main(): if is_debug: img = imread('E:/huggingface/BallonsTranslator/assets/kcc-0010.jpg') jsn, img_draw = ocr(img) cv2.imwrite("E:/xxxxxxxxxxxxxxxx.jpg", img_draw) else: app.run(host="0.0.0.0", port=2393, debug=True) return from modules.textdetector.ctd.inference import TextDetector as CTDModel from modules.ocr.mit48px import Model48pxOCR CTD_ONNX_PATH = 'data/models/comictextdetector.pt.onnx' device = 'cpu' detect_size = 1280 ctd_model = CTDModel(CTD_ONNX_PATH, detect_size=detect_size, device=device) OCR48PXMODEL_PATH = 'data/models/ocr_ar_48px.ckpt' ocr_model = Model48pxOCR(OCR48PXMODEL_PATH, device) img = imread('E:/huggingface/BallonsTranslator/assets/kcc-0010.jpg') # All text detectors only support 3 channels input if img.ndim == 3 and img.shape[2] == 4: img = cv2.cvtColor(img, cv2.COLOR_RGBA2RGB) _, mask, blk_list = ctd_model(img) fnt_rsz = 1.0 fnt_max = -1 fnt_min = -1 for blk in blk_list: sz = blk._detected_font_size * fnt_rsz if fnt_max > 0: sz = min(fnt_max, sz) if fnt_min > 0: sz = max(fnt_min, sz) blk.font_size = sz blk._detected_font_size = sz ksize = 2 if ksize > 0: element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2 * ksize + 1, 2 * ksize + 1),(ksize, ksize)) # 创建一个椭圆形的结构元素(kernel),用于后续的形态学操作 # 元素的尺寸 # (ksize, ksize) :椭圆的锚点(中心点) mask = cv2.dilate(mask, element) # 对 mask 图像进行膨胀操作(dilate),使用上面创建的椭圆结构元素。膨胀操作可以让白色区域(通常是前景或目标区域)变大,常用于去除小的黑洞、连接断开的区域等。 for blk in blk_list: blk.det_model = 'ctd' need_save_mask = True detect_counter = 0 detect_counter += 1 # self.ocr.run_ocr(img, blk_list) for blk in blk_list: blk.text = [] split_textblk = False seg_func = None model_text_height = 48 model_maxwidth = 8100 from utils.textblock import collect_textblock_regions chunk_size = 16 regions, textblk_lst_indices = collect_textblock_regions(img, blk_list, model_text_height, model_maxwidth, split_textblk, seg_func) ocr_model(blk_list, regions, textblk_lst_indices, chunk_size=chunk_size) img_draw = img.copy() # from qtpy.QtWidgets import QApplication # from qtpy.QtGui import QIcon, QFontDatabase, QGuiApplication, QFont, QFontMetrics # ui\mainwindow.py for blk in blk_list: text = blk.get_text() for line in blk.lines: img_draw = cv2.rectangle(img_draw, line[0], line[3], (0, 0, 255), 2) # 在一行坚排文字的左边画一条红线 # app_font = QFont('Microsoft YaHei UI') # fontMetrics = QFontMetrics(app_font) # rect = fontMetrics.boundingRect(text[0]) # textWidth = rect.width() pass # blk.text = self.ocrSubWidget.sub_text(text) cv2.imwrite("E:/xxxxxxxxxxxxxxxx.jpg", img_draw) pass if __name__ == '__main__': main()