Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,13 +1,11 @@
|
|
| 1 |
from paddleocr import PaddleOCR
|
| 2 |
from gliner import GLiNER
|
| 3 |
-
import json
|
| 4 |
from PIL import Image
|
| 5 |
import gradio as gr
|
| 6 |
import numpy as np
|
| 7 |
import cv2
|
| 8 |
import logging
|
| 9 |
import os
|
| 10 |
-
from pathlib import Path
|
| 11 |
import tempfile
|
| 12 |
import pandas as pd
|
| 13 |
import io
|
|
@@ -29,16 +27,8 @@ except Exception as e:
|
|
| 29 |
logger.error("Failed to load GLiNER model")
|
| 30 |
raise e
|
| 31 |
|
| 32 |
-
# Helper functions
|
| 33 |
-
# Get a random color (used for drawing bounding boxes, if needed)
|
| 34 |
def get_random_color():
|
| 35 |
-
return tuple(np.random.randint(0, 256, 3).tolist()
|
| 36 |
-
|
| 37 |
-
def draw_ocr_bbox(image, boxes, colors):
|
| 38 |
-
for i in range(len(boxes)):
|
| 39 |
-
box = np.reshape(np.array(boxes[i]), [-1, 1, 2]).astype(np.int64)
|
| 40 |
-
image = cv2.polylines(np.array(image), [box], True, colors[i], 2)
|
| 41 |
-
return image
|
| 42 |
|
| 43 |
def scan_qr_code(image):
|
| 44 |
try:
|
|
@@ -55,32 +45,45 @@ def extract_emails(text):
|
|
| 55 |
return re.findall(email_regex, text)
|
| 56 |
|
| 57 |
def extract_websites(text):
|
| 58 |
-
website_regex = r"(?:https?://)?(?:www\.)?[A-Za-z0-9-]+\.[A-Za-z]{2,}(?:/\S*)
|
| 59 |
matches = re.findall(website_regex, text)
|
| 60 |
return [m for m in matches if '@' not in m]
|
| 61 |
|
| 62 |
def clean_phone_number(phone):
|
| 63 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
|
| 65 |
-
# Main inference function
|
| 66 |
def inference(img: Image.Image, confidence):
|
| 67 |
try:
|
| 68 |
-
# Initialize PaddleOCR
|
| 69 |
ocr = PaddleOCR(use_angle_cls=True, lang='en', use_gpu=False,
|
| 70 |
det_model_dir='./models/det/en',
|
| 71 |
cls_model_dir='./models/cls/en',
|
| 72 |
rec_model_dir='./models/rec/en')
|
| 73 |
|
| 74 |
-
# OCR Processing
|
| 75 |
img_np = np.array(img)
|
| 76 |
result = ocr.ocr(img_np, cls=True)[0]
|
| 77 |
ocr_texts = [line[1][0] for line in result]
|
| 78 |
ocr_text = " ".join(ocr_texts)
|
| 79 |
|
| 80 |
-
# Entity Extraction
|
| 81 |
labels = ["person name", "company name", "job title",
|
| 82 |
-
"phone number", "email address", "
|
| 83 |
-
"website
|
| 84 |
entities = gliner_model.predict_entities(ocr_text, labels, threshold=confidence, flat_ner=True)
|
| 85 |
|
| 86 |
results = {
|
|
@@ -89,70 +92,94 @@ def inference(img: Image.Image, confidence):
|
|
| 89 |
"Job Title": [],
|
| 90 |
"Phone Number": [],
|
| 91 |
"Email Address": [],
|
| 92 |
-
"
|
| 93 |
-
"Website
|
| 94 |
"QR Code": []
|
| 95 |
}
|
| 96 |
|
| 97 |
-
# Process
|
| 98 |
for entity in entities:
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
elif label == "
|
| 106 |
-
results["
|
| 107 |
-
elif label
|
| 108 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
|
| 110 |
# Regex fallbacks
|
| 111 |
-
|
| 112 |
-
|
| 113 |
|
| 114 |
-
if not results["Website Url"]:
|
| 115 |
-
results["Website Url"] = extract_websites(ocr_text)
|
| 116 |
-
|
| 117 |
# Phone number validation
|
| 118 |
-
|
| 119 |
-
for
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
if
|
| 127 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 128 |
|
| 129 |
# Create CSV
|
| 130 |
csv_data = {k: "; ".join(v) for k, v in results.items() if v}
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
csv_io.seek(0)
|
| 134 |
-
|
| 135 |
-
with tempfile.NamedTemporaryFile(suffix=".csv", delete=False, mode="wb") as tmp_file:
|
| 136 |
-
tmp_file.write(csv_io.getvalue())
|
| 137 |
csv_path = tmp_file.name
|
| 138 |
|
| 139 |
return ocr_text, csv_data, csv_path, ""
|
| 140 |
-
|
| 141 |
except Exception as e:
|
| 142 |
logger.error(f"Processing failed: {traceback.format_exc()}")
|
| 143 |
return "", {}, None, f"Error: {str(e)}\n{traceback.format_exc()}"
|
| 144 |
|
| 145 |
# Gradio Interface
|
| 146 |
title = 'Enhanced Business Card Parser'
|
| 147 |
-
description = '
|
| 148 |
-
|
| 149 |
-
examples = [
|
| 150 |
-
['example_imgs/example.jpg', 0.4],
|
| 151 |
-
['example_imgs/demo003.jpeg', 0.5],
|
| 152 |
-
]
|
| 153 |
-
|
| 154 |
-
css = """.output_image, .input_image {height: 40rem !important; width: 100% !important;}
|
| 155 |
-
.gr-interface {max-width: 800px !important;}"""
|
| 156 |
|
| 157 |
if __name__ == '__main__':
|
| 158 |
demo = gr.Interface(
|
|
@@ -165,9 +192,6 @@ if __name__ == '__main__':
|
|
| 165 |
gr.Textbox(label="Error Log")],
|
| 166 |
title=title,
|
| 167 |
description=description,
|
| 168 |
-
|
| 169 |
-
css=css,
|
| 170 |
-
cache_examples=True
|
| 171 |
)
|
| 172 |
-
demo.queue(max_size=20)
|
| 173 |
demo.launch()
|
|
|
|
| 1 |
from paddleocr import PaddleOCR
|
| 2 |
from gliner import GLiNER
|
|
|
|
| 3 |
from PIL import Image
|
| 4 |
import gradio as gr
|
| 5 |
import numpy as np
|
| 6 |
import cv2
|
| 7 |
import logging
|
| 8 |
import os
|
|
|
|
| 9 |
import tempfile
|
| 10 |
import pandas as pd
|
| 11 |
import io
|
|
|
|
| 27 |
logger.error("Failed to load GLiNER model")
|
| 28 |
raise e
|
| 29 |
|
|
|
|
|
|
|
| 30 |
def get_random_color():
|
| 31 |
+
return tuple(np.random.randint(0, 256, 3).tolist()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
def scan_qr_code(image):
|
| 34 |
try:
|
|
|
|
| 45 |
return re.findall(email_regex, text)
|
| 46 |
|
| 47 |
def extract_websites(text):
|
| 48 |
+
website_regex = r"\b(?:https?://)?(?:www\.)?([A-Za-z0-9-]+\.[A-Za-z]{2,})(?:/\S*)?\b"
|
| 49 |
matches = re.findall(website_regex, text)
|
| 50 |
return [m for m in matches if '@' not in m]
|
| 51 |
|
| 52 |
def clean_phone_number(phone):
|
| 53 |
+
cleaned = re.sub(r"(?!^\+)[^\d]", "", phone)
|
| 54 |
+
if len(cleaned) < 9 or (len(cleaned) == 9 and cleaned.startswith("+")):
|
| 55 |
+
return None
|
| 56 |
+
return cleaned
|
| 57 |
+
|
| 58 |
+
def normalize_website(url):
|
| 59 |
+
url = url.lower().replace("www.", "").split('/')[0]
|
| 60 |
+
if not re.match(r"^[a-z0-9-]+\.[a-z]{2,}$", url):
|
| 61 |
+
return None
|
| 62 |
+
return f"www.{url}"
|
| 63 |
+
|
| 64 |
+
def extract_address(ocr_texts):
|
| 65 |
+
address_keywords = ["block", "street", "ave", "area", "industrial", "road"]
|
| 66 |
+
address_parts = []
|
| 67 |
+
for text in ocr_texts:
|
| 68 |
+
if any(kw in text.lower() for kw in address_keywords):
|
| 69 |
+
address_parts.append(text)
|
| 70 |
+
return " ".join(address_parts) if address_parts else None
|
| 71 |
|
|
|
|
| 72 |
def inference(img: Image.Image, confidence):
|
| 73 |
try:
|
|
|
|
| 74 |
ocr = PaddleOCR(use_angle_cls=True, lang='en', use_gpu=False,
|
| 75 |
det_model_dir='./models/det/en',
|
| 76 |
cls_model_dir='./models/cls/en',
|
| 77 |
rec_model_dir='./models/rec/en')
|
| 78 |
|
|
|
|
| 79 |
img_np = np.array(img)
|
| 80 |
result = ocr.ocr(img_np, cls=True)[0]
|
| 81 |
ocr_texts = [line[1][0] for line in result]
|
| 82 |
ocr_text = " ".join(ocr_texts)
|
| 83 |
|
|
|
|
| 84 |
labels = ["person name", "company name", "job title",
|
| 85 |
+
"phone number", "email address", "address",
|
| 86 |
+
"website"]
|
| 87 |
entities = gliner_model.predict_entities(ocr_text, labels, threshold=confidence, flat_ner=True)
|
| 88 |
|
| 89 |
results = {
|
|
|
|
| 92 |
"Job Title": [],
|
| 93 |
"Phone Number": [],
|
| 94 |
"Email Address": [],
|
| 95 |
+
"Address": [],
|
| 96 |
+
"Website": [],
|
| 97 |
"QR Code": []
|
| 98 |
}
|
| 99 |
|
| 100 |
+
# Process entities with validation
|
| 101 |
for entity in entities:
|
| 102 |
+
text = entity["text"].strip()
|
| 103 |
+
label = entity["label"].lower()
|
| 104 |
+
|
| 105 |
+
if label == "phone number":
|
| 106 |
+
if (cleaned := clean_phone_number(text)):
|
| 107 |
+
results["Phone Number"].append(cleaned)
|
| 108 |
+
elif label == "email address" and "@" in text:
|
| 109 |
+
results["Email Address"].append(text.lower())
|
| 110 |
+
elif label == "website":
|
| 111 |
+
if (normalized := normalize_website(text)):
|
| 112 |
+
results["Website"].append(normalized)
|
| 113 |
+
elif label == "address":
|
| 114 |
+
results["Address"].append(text)
|
| 115 |
+
elif label == "company name":
|
| 116 |
+
results["Company Name"].append(text)
|
| 117 |
+
elif label == "person name":
|
| 118 |
+
results["Person Name"].append(text)
|
| 119 |
+
elif label == "job title":
|
| 120 |
+
results["Job Title"].append(text.title())
|
| 121 |
|
| 122 |
# Regex fallbacks
|
| 123 |
+
results["Email Address"] += extract_emails(ocr_text)
|
| 124 |
+
results["Website"] += [normalize_website(w) for w in extract_websites(ocr_text)]
|
| 125 |
|
|
|
|
|
|
|
|
|
|
| 126 |
# Phone number validation
|
| 127 |
+
seen_phones = set()
|
| 128 |
+
for phone in results["Phone Number"] + re.findall(r'\+\d{8,}|\d{9,}', ocr_text):
|
| 129 |
+
if (cleaned := clean_phone_number(phone)) and cleaned not in seen_phones:
|
| 130 |
+
results["Phone Number"].append(cleaned)
|
| 131 |
+
seen_phones.add(cleaned)
|
| 132 |
+
results["Phone Number"] = list(seen_phones)
|
| 133 |
+
|
| 134 |
+
# Address processing
|
| 135 |
+
if not results["Address"]:
|
| 136 |
+
if (address := extract_address(ocr_texts)):
|
| 137 |
+
results["Address"].append(address)
|
| 138 |
+
|
| 139 |
+
# Website normalization
|
| 140 |
+
seen_websites = set()
|
| 141 |
+
final_websites = []
|
| 142 |
+
for web in results["Website"]:
|
| 143 |
+
if web and web not in seen_websites:
|
| 144 |
+
final_websites.append(web)
|
| 145 |
+
seen_websites.add(web)
|
| 146 |
+
results["Website"] = final_websites
|
| 147 |
+
|
| 148 |
+
# Company name fallback
|
| 149 |
+
if not results["Company Name"]:
|
| 150 |
+
if results["Email Address"]:
|
| 151 |
+
domain = results["Email Address"][0].split('@')[-1].split('.')[0]
|
| 152 |
+
results["Company Name"].append(domain.title())
|
| 153 |
+
elif results["Website"]:
|
| 154 |
+
domain = results["Website"][0].split('.')[1]
|
| 155 |
+
results["Company Name"].append(domain.title())
|
| 156 |
+
|
| 157 |
+
# Name fallback
|
| 158 |
+
if not results["Person Name"]:
|
| 159 |
+
for text in ocr_texts:
|
| 160 |
+
if re.match(r"^(?:[A-Z][a-z]+\s?){2,}$", text):
|
| 161 |
+
results["Person Name"].append(text)
|
| 162 |
+
break
|
| 163 |
+
|
| 164 |
+
# QR Code
|
| 165 |
+
if (qr_data := scan_qr_code(img)):
|
| 166 |
+
results["QR Code"].append(qr_data)
|
| 167 |
|
| 168 |
# Create CSV
|
| 169 |
csv_data = {k: "; ".join(v) for k, v in results.items() if v}
|
| 170 |
+
with tempfile.NamedTemporaryFile(suffix=".csv", delete=False, mode="w") as tmp_file:
|
| 171 |
+
pd.DataFrame([csv_data]).to_csv(tmp_file, index=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 172 |
csv_path = tmp_file.name
|
| 173 |
|
| 174 |
return ocr_text, csv_data, csv_path, ""
|
| 175 |
+
|
| 176 |
except Exception as e:
|
| 177 |
logger.error(f"Processing failed: {traceback.format_exc()}")
|
| 178 |
return "", {}, None, f"Error: {str(e)}\n{traceback.format_exc()}"
|
| 179 |
|
| 180 |
# Gradio Interface
|
| 181 |
title = 'Enhanced Business Card Parser'
|
| 182 |
+
description = 'Accurate entity extraction with combined AI and regex validation'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 183 |
|
| 184 |
if __name__ == '__main__':
|
| 185 |
demo = gr.Interface(
|
|
|
|
| 192 |
gr.Textbox(label="Error Log")],
|
| 193 |
title=title,
|
| 194 |
description=description,
|
| 195 |
+
css=".gr-interface {max-width: 800px !important;}"
|
|
|
|
|
|
|
| 196 |
)
|
|
|
|
| 197 |
demo.launch()
|