| import os |
| from paddleocr import PaddleOCR, draw_ocr |
| from PIL import Image |
| import gradio as gr |
| import paddle |
| import numpy as np |
|
|
| image_path = '/home/user/app/ocr_cover.png' |
| absolute_path = os.path.abspath(image_path) |
|
|
| rec_model_dir = "/home/user/app/custom_rec_model" |
| det_model_dir = "/home/user/app/custom_det_model" |
|
|
|
|
| ocr = PaddleOCR(use_angle_cls=True, |
| rec_model_dir=rec_model_dir, |
| det_model_dir=det_model_dir, lang="en", use_gpu=False) |
|
|
|
|
| def inference(img_pil): |
| original_width, original_height = img_pil.size |
|
|
| new_width = original_width // 2 |
| new_height = original_height // 2 |
|
|
| img_pil = img_pil.resize((new_width, new_height)) |
| |
| img = np.array(img_pil) |
| |
| result = ocr.ocr(img, det=True, rec=True, cls=True) |
|
|
|
|
| if len(result) == 0: |
| no_text_img = Image.open("/home/user/app/no_text.png") |
| |
| return no_text_img |
| |
| if len(result) == 1 and result[0] is None: |
| no_text_img = Image.open("/home/user/app/no_text.png") |
| |
| return no_text_img |
| |
| result = result[0] |
|
|
| boxes = [line[0] for line in result] |
| txts = [line[1][0] for line in result] |
| scores = [line[1][1] for line in result] |
|
|
| im_show = draw_ocr(img_pil, boxes, txts, scores, font_path='/home/user/app/simfang.ttf') |
| im_show = Image.fromarray(im_show) |
| im_show.save('result_ocr.jpg') |
| return 'result_ocr.jpg' |
|
|
|
|
| title = 'OCR (PaddleOCR)' |
| description = 'Demo for PaddleOCR. To use it, simply upload your image. (Only English and Arabic numbers are supported.)' |
| article = "<p style='text-align: center'><a href='https://www.linkedin.com/in/suesarn-wilainuch-7a1528113/' style='color: white'>Implemented by Suesarn Wilainuch, LinkedIn</a></p>" |
|
|
|
|
| css_code = """ |
| .gradio-container { |
| background: url("file=ocr_cover.png"); |
| background-size: cover; /* Set background size to cover the entire container */ |
| background-repeat: no-repeat; /* Prevent background from repeating */ |
| background-position: center; /* Center the background image */ |
| width: 100%; /* Set container width to 100% */ |
| height: 100%; /* Set container height to 100% */ |
| } |
| |
| h1, p { |
| color: white; /* Set font color to white */ |
| } |
| |
| h1 { |
| font-size: 36px; /* Adjust font size for title */ |
| } |
| |
| p { |
| font-size: 18px; /* Adjust font size for description */ |
| } |
| """ |
|
|
| |
| gr.Interface( |
| inference, |
| gr.Image(type='pil', label='Input'), |
| gr.Image(type='pil', label='Output'), |
| title=title, |
| description=description, |
| article=article, |
| css=css_code, |
| ).launch(debug=False, allowed_paths=[absolute_path]) |