triflix commited on
Commit
1597dd9
·
verified ·
1 Parent(s): 558ed65

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +319 -0
app.py ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import io
3
+ import uuid
4
+ import json
5
+ import time
6
+ import tempfile
7
+ import unicodedata
8
+ import re
9
+ from dataclasses import dataclass
10
+ from typing import List, Dict, Tuple
11
+
12
+ import cv2
13
+ import numpy as np
14
+ import torch
15
+ from paddleocr import TextDetection
16
+ from easyocr import Reader
17
+ from rapidfuzz import fuzz
18
+
19
+ import gradio as gr
20
+
21
+ # ============ CORE VALIDATORS (UNCHANGED) ============
22
+ class VerhoeffValidator:
23
+ d_table = [[0,1,2,3,4,5,6,7,8,9],[1,2,3,4,0,6,7,8,9,5],[2,3,4,0,1,7,8,9,5,6],[3,4,0,1,2,8,9,5,6,7],[4,0,1,2,3,9,5,6,7,8],[5,9,8,7,6,0,4,3,2,1],[6,5,9,8,7,1,0,4,3,2],[7,6,5,9,8,2,1,0,4,3],[8,7,6,5,9,3,2,1,0,4],[9,8,7,6,5,4,3,2,1,0]]
24
+ p_table = [[0,1,2,3,4,5,6,7,8,9],[1,5,7,6,2,8,3,0,9,4],[5,8,0,3,7,9,6,1,4,2],[8,9,1,6,0,4,3,5,2,7],[9,4,5,3,1,2,6,8,7,0],[4,2,8,6,5,7,3,9,0,1],[2,7,9,3,8,0,6,4,1,5],[7,0,4,6,9,1,3,2,5,8]]
25
+ @classmethod
26
+ def validate(cls, n: str) -> bool:
27
+ if not n or len(n)!=12 or not n.isdigit() or n[0] in '01': return False
28
+ c=0
29
+ for i,ch in enumerate(reversed(n)): c=cls.d_table[c][cls.p_table[i%8][int(ch)]]
30
+ return c==0
31
+
32
+ class PatternValidator:
33
+ @staticmethod
34
+ def find_aadhaar(t: str) -> List[str]:
35
+ return [re.sub(r'\s','',m) for p in [r'\b[2-9]\d{3}\s?\d{4}\s?\d{4}\b', r'\b[2-9]\d{11}\b']
36
+ for m in re.findall(p,t) if VerhoeffValidator.validate(re.sub(r'\s','',m))]
37
+ @staticmethod
38
+ def find_pan(t: str) -> List[str]:
39
+ return list(set(re.findall(r'\b[A-Z]{3}[PCHFATBLJG][A-Z]\d{4}[A-Z]\b', t.upper())))
40
+
41
+ class TextNormalizer:
42
+ OCR_CORRECTIONS = {'O':'0','o':'0','l':'1','I':'1','Z':'2','z':'2','S':'5','G':'6','b':'6','T':'7','B':'8','g':'9','q':'9'}
43
+ @staticmethod
44
+ def normalize(text: str, aggressive: bool=False) -> str:
45
+ if not text: return ""
46
+ text = ''.join(ch for ch in unicodedata.normalize('NFKC',text) if unicodedata.category(ch)[0]!='C')
47
+ if aggressive:
48
+ def fix(m):
49
+ s=m.group(0)
50
+ for o,n in TextNormalizer.OCR_CORRECTIONS.items(): s=s.replace(o,n)
51
+ return s
52
+ text = re.sub(r'\b[0-9OolIZzSGbTBgq]{4,}\b', fix, text)
53
+ return re.sub(r'\s+',' ',re.sub(r'[^\w\s\u0900-\u097F.,/-]','',text)).strip()
54
+
55
+ # ============ CONFIGURATION ============
56
+ @dataclass
57
+ class Config:
58
+ fuzzy_threshold: int = 80
59
+ min_keywords: int = 1
60
+ max_image_dim: int = 2000
61
+ languages: List[str] = None
62
+ doc_keywords: Dict[str, List[str]] = None
63
+ def __post_init__(self):
64
+ if self.languages is None: self.languages = ['en','hi']
65
+ if self.doc_keywords is None:
66
+ self.doc_keywords = {
67
+ "Aadhaar": ["uidai","aadhaar","aadhar","government","india","mera","naam","pehchaan","यूआईडीएआई","आधार","भारत","सरकार","जन्म","तिथि"],
68
+ "PAN": ["permanent","account","number","income","tax","incometaxindia","pan","स्थायी","खाता","आयकर","पिता","नाम"],
69
+ "Driving_License": ["driving","licence","motor","vehicles","rto","mcwg","lmv","ड्राइविंग","वाहन","परिवहन","चालविण्याचा","परवाना"],
70
+ "Passport": ["passport","republic","india","ministry","external","affairs","पासपोर्ट","गणराज्य","विदेश","मंत्रालय"],
71
+ "Ration_Card": ["ration","card","food","civil","supplies","apl","bpl","राशन","कार्ड","खाद्य","नागरी","पुरवठा"]
72
+ }
73
+
74
+ # ============ MAIN PIPELINE ============
75
+ class DocumentOCRVerifier:
76
+ def __init__(self, config: Config=None):
77
+ self.cfg = config or Config()
78
+ # initialize PaddleOCR detector and EasyOCR reader
79
+ try:
80
+ self.detector = TextDetection(model_name="PP-OCRv5_mobile_det")
81
+ except Exception:
82
+ self.detector = None
83
+ self.reader = Reader(self.cfg.languages, gpu=torch.cuda.is_available())
84
+
85
+ def _preprocess(self, img: np.ndarray) -> np.ndarray:
86
+ img = self._resize(img)
87
+ img = self._deskew(img)
88
+ return self._enhance(img)
89
+
90
+ def _resize(self, img: np.ndarray) -> np.ndarray:
91
+ h,w = img.shape[:2]
92
+ if max(h,w) > self.cfg.max_image_dim:
93
+ scale = self.cfg.max_image_dim / max(h,w)
94
+ img = cv2.resize(img, (int(w*scale), int(h*scale)), interpolation=cv2.INTER_AREA)
95
+ return img
96
+
97
+ def _deskew(self, img: np.ndarray) -> np.ndarray:
98
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
99
+ _, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
100
+ contours,_ = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
101
+ if contours:
102
+ rect = cv2.minAreaRect(max(contours, key=cv2.contourArea))
103
+ angle = rect[-1]
104
+ if angle < -45: angle = 90 + angle
105
+ elif angle > 45: angle -= 90
106
+ if abs(angle) > 0.5:
107
+ h,w = img.shape[:2]
108
+ M = cv2.getRotationMatrix2D((w//2, h//2), angle, 1.0)
109
+ img = cv2.warpAffine(img, M, (w,h), borderValue=(255,255,255))
110
+ return img
111
+
112
+ def _enhance(self, img: np.ndarray) -> np.ndarray:
113
+ denoised = cv2.fastNlMeansDenoisingColored(img, None, 10, 10, 7, 21)
114
+ lab = cv2.cvtColor(denoised, cv2.COLOR_BGR2LAB)
115
+ l,a,b = cv2.split(lab)
116
+ l = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)).apply(l)
117
+ enhanced = cv2.cvtColor(cv2.merge([l,a,b]), cv2.COLOR_LAB2BGR)
118
+ kernel = np.array([[0,-1,0],[-1,5,-1],[0,-1,0]])
119
+ return cv2.addWeighted(cv2.filter2D(enhanced, -1, kernel), 0.6, enhanced, 0.4, 0)
120
+
121
+ def _extract_keywords(self, text: str) -> List[str]:
122
+ if not text: return []
123
+ return [t for t in re.split(r'\s+', text.strip()) if t]
124
+
125
+ def _classify(self, text: str) -> Tuple[str, float, List[str]]:
126
+ norm_text = TextNormalizer.normalize(text, aggressive=True)
127
+ scores = {}
128
+ for doc_type, keywords in self.cfg.doc_keywords.items():
129
+ matched = []
130
+ for kw in keywords:
131
+ if kw.lower() in norm_text.lower(): matched.append(kw); continue
132
+ words = norm_text.lower().split()
133
+ for i,w in enumerate(words):
134
+ if fuzz.ratio(kw.lower(), w) >= self.cfg.fuzzy_threshold: matched.append(kw); break
135
+ phrase = " ".join(words[i:min(i+5, len(words))])
136
+ if fuzz.ratio(kw.lower(), phrase) >= self.cfg.fuzzy_threshold: matched.append(kw); break
137
+ score = len(matched)
138
+ if doc_type == "Aadhaar" and PatternValidator.find_aadhaar(text): score = 100
139
+ elif doc_type == "PAN" and PatternValidator.find_pan(text): score = 100
140
+ scores[doc_type] = {"score": score, "matched": matched}
141
+ winner = max(scores.items(), key=lambda x: x[1]["score"])
142
+ if winner[1]["score"] >= self.cfg.min_keywords:
143
+ conf = 0.95 if winner[1]["score"] == 100 else min(0.90, len(winner[1]["matched")]/len(self.cfg.doc_keywords[winner[0]]) + 0.3) if len(self.cfg.doc_keywords[winner[0]])>0 else 0.0
144
+ return winner[0], conf, winner[1]["matched"]
145
+ return "UNCLASSIFIED", 0.0, []
146
+
147
+ def verify(self, image_path: str, user_keywords: List[str]) -> Dict:
148
+ img = cv2.imread(image_path)
149
+ if img is None: return {"error": "Image not found", "imagePath": image_path}
150
+
151
+ img = self._preprocess(img)
152
+
153
+ # Region-based OCR with word-level granularity
154
+ ocr_keywords = []
155
+ all_text = ""
156
+
157
+ if self.detector:
158
+ try:
159
+ regions = self.detector.predict(input=image_path, batch_size=1)
160
+ except Exception:
161
+ regions = []
162
+ else:
163
+ regions = []
164
+
165
+ # If detector provided regions, use them; otherwise fallback to whole-image read
166
+ if regions:
167
+ for res in regions:
168
+ for poly, score in zip(res.get("dt_polys", []), res.get("dt_scores", [])):
169
+ pts = np.array(poly, dtype=np.int32)
170
+ x,y,w,h = cv2.boundingRect(pts)
171
+ cropped = img[y:y+h, x:x+w]
172
+ texts = self.reader.readtext(cropped, detail=0)
173
+ if texts:
174
+ text = texts[0]
175
+ words = self._extract_keywords(text)
176
+ ocr_keywords.extend(words)
177
+ all_text += " " + text
178
+ else:
179
+ # fallback: run reader on whole image
180
+ texts = self.reader.readtext(img, detail=0)
181
+ if texts:
182
+ for t in texts:
183
+ ocr_keywords.extend(self._extract_keywords(t))
184
+ all_text += " " + t
185
+
186
+ # Classification
187
+ doc_type, accuracy, matched_keywords = self._classify(all_text)
188
+
189
+ # Verification - match against combined text for phrase support
190
+ raw_input_keywords = user_keywords
191
+ minimal_norm_user_keywords = [kw.strip() for kw in raw_input_keywords if kw is not None]
192
+ exact_matches = list(set([kw for kw in minimal_norm_user_keywords if kw.lower() in all_text.lower()]))
193
+ status = "verified" if exact_matches else "not_verified"
194
+
195
+ return {
196
+ "documentType": doc_type,
197
+ "documentTypeAccuracy": round(accuracy, 4),
198
+ "ocrKeywords": ocr_keywords,
199
+ "inputUserKeywords": minimal_norm_user_keywords,
200
+ "rawInputUserKeywords": raw_input_keywords,
201
+ "exactMatchingKeywords": exact_matches,
202
+ "verificationStatus": status,
203
+ "imagePath": image_path
204
+ }
205
+
206
+ # ============ APP ============
207
+
208
+ verifier = DocumentOCRVerifier()
209
+
210
+
211
+ def save_upload_to_tmp(uploaded_file) -> str:
212
+ """
213
+ Save an uploaded file-like object (from Gradio) to /tmp with a unique name.
214
+ Returns absolute path.
215
+ """
216
+ if isinstance(uploaded_file, str) and os.path.exists(uploaded_file):
217
+ return uploaded_file
218
+ tmp_dir = "/tmp/ocr_app"
219
+ os.makedirs(tmp_dir, exist_ok=True)
220
+ ext = ".jpg"
221
+ # preserve original extension if available
222
+ if hasattr(uploaded_file, "name") and uploaded_file.name:
223
+ _, e = os.path.splitext(uploaded_file.name)
224
+ if e:
225
+ ext = e
226
+ fname = f"{int(time.time())}_{uuid.uuid4().hex}{ext}"
227
+ out_path = os.path.join(tmp_dir, fname)
228
+ # uploaded_file could be bytes or file path
229
+ if isinstance(uploaded_file, bytes):
230
+ with open(out_path, "wb") as f:
231
+ f.write(uploaded_file)
232
+ else:
233
+ # Gradio sometimes gives a path
234
+ try:
235
+ with open(uploaded_file, "rb") as src, open(out_path, "wb") as dst:
236
+ dst.write(src.read())
237
+ except Exception:
238
+ # last resort: try to read as PIL image (if provided)
239
+ try:
240
+ import PIL.Image as Image
241
+ im = Image.open(uploaded_file).convert("RGB")
242
+ im.save(out_path)
243
+ except Exception:
244
+ raise
245
+ return out_path
246
+
247
+
248
+ def run_ocr(image, keywords_raw: str):
249
+ """
250
+ image: uploaded file path or bytes (Gradio File component)
251
+ keywords_raw: raw string entered by user. Split by comma EXACTLY to form keywords. Preserve internal spacing.
252
+ Returns: image preview (path), HTML summary, parsed JSON (dict) for gr.JSON, raw JSON string
253
+ """
254
+ # Parse user keywords preserving internal spaces
255
+ if keywords_raw is None:
256
+ user_keywords = []
257
+ else:
258
+ user_keywords = [s if s is not None else "" for s in re.split(r',', keywords_raw)]
259
+ user_keywords = [s.rstrip("\n\r\t ").lstrip("\n\r\t ") for s in user_keywords]
260
+
261
+ image_path = save_upload_to_tmp(image)
262
+ result = verifier.verify(image_path=image_path, user_keywords=user_keywords)
263
+
264
+ # build a simple card-style HTML summary with color scheme for verification status
265
+ status = result.get("verificationStatus", "not_verified")
266
+ doc_type = result.get("documentType", "UNCLASSIFIED")
267
+ accuracy = result.get("documentTypeAccuracy", 0.0)
268
+ input_keys = result.get("inputUserKeywords", [])
269
+
270
+ status_color = "#1a7f37" if status == "verified" else "#b22222"
271
+ card_html = f"""
272
+ <div style='border-radius:8px;padding:14px;box-shadow:0 4px 12px rgba(0,0,0,0.08);max-width:640px;font-family:system-ui, -apple-system, Segoe UI, Roboto, "Helvetica Neue", Arial;'>
273
+ <div style='display:flex;align-items:center;justify-content:space-between;margin-bottom:8px;'>
274
+ <div style='font-size:16px;font-weight:600'>Document Summary</div>
275
+ <div style='padding:6px 10px;border-radius:14px;background:{status_color};color:white;font-weight:700'>{status.upper()}</div>
276
+ </div>
277
+ <div style='display:grid;grid-template-columns:1fr 1fr;gap:8px;'>
278
+ <div style='background:#fafafa;padding:10px;border-radius:6px;'>
279
+ <div style='font-size:12px;color:#555'>Document Type</div>
280
+ <div style='font-size:15px;font-weight:600'>{doc_type}</div>
281
+ </div>
282
+ <div style='background:#fafafa;padding:10px;border-radius:6px;'>
283
+ <div style='font-size:12px;color:#555'>Document Accuracy</div>
284
+ <div style='font-size:15px;font-weight:600'>{accuracy*100:.2f}%</div>
285
+ </div>
286
+ <div style='grid-column:1 / -1;background:#fff8e6;padding:10px;border-radius:6px;'>
287
+ <div style='font-size:12px;color:#555'>Input Keywords</div>
288
+ <div style='font-size:14px;font-weight:600'>{', '.join(input_keys) if input_keys else '—'}</div>
289
+ </div>
290
+ </div>
291
+ </div>
292
+ """
293
+
294
+ raw_json_str = json.dumps(result, indent=2, ensure_ascii=False)
295
+
296
+ # return values: image preview path (Gradio Image accepts file path), summary HTML, parsed JSON, raw JSON string
297
+ return image_path, card_html, result, raw_json_str
298
+
299
+
300
+ with gr.Blocks(title="Document OCR Verifier") as demo:
301
+ gr.Markdown("Upload an image and provide comma-separated keywords (preserve spacing inside keywords).")
302
+ with gr.Row():
303
+ with gr.Column(scale=1):
304
+ img_in = gr.File(label="Image (JPEG/PNG). The file will be stored in /tmp.")
305
+ kws = gr.Textbox(label="User keywords (comma-separated). Example: ROHIT, KUMAR, SINGH")
306
+ run_btn = gr.Button("Run OCR & Verify")
307
+ with gr.Column(scale=1):
308
+ img_preview = gr.Image(label="Uploaded Image Preview")
309
+ summary_card = gr.HTML(label="Summary")
310
+
311
+ # Raw JSON output area (both parsed JSON and raw string)
312
+ with gr.Row():
313
+ json_view = gr.JSON(label="Result (parsed JSON)")
314
+ raw_out = gr.Textbox(label="Result (raw JSON)", lines=20)
315
+
316
+ run_btn.click(fn=run_ocr, inputs=[img_in, kws], outputs=[img_preview, summary_card, json_view, raw_out])
317
+
318
+ if __name__ == "__main__":
319
+ demo.launch(server_name="0.0.0.0", server_port=7860)