ConiferousYogi commited on
Commit
43b62cd
·
verified ·
1 Parent(s): a882ac6

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +556 -0
app.py ADDED
@@ -0,0 +1,556 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, render_template, jsonify, flash, redirect, url_for
2
+ import cv2
3
+ import pytesseract
4
+ from PIL import Image
5
+ from PIL.ExifTags import TAGS
6
+ from ultralytics import YOLO
7
+ import os
8
+ import re
9
+ import numpy as np
10
+ from werkzeug.utils import secure_filename
11
+ import tempfile
12
+ import numpy as np
13
+ from huggingface_hub import hf_hub_download
14
+ from supervision import Detections
15
+ import requests
16
+ from skimage.metrics import structural_similarity as ssim
17
+ import xml.etree.ElementTree as ET
18
+ from pyzbar.pyzbar import decode
19
+ import supervision as sv
20
+
21
+
22
+ import firebase_admin
23
+ from firebase_admin import credentials, firestore
24
+
25
+ # Initializing Firebase
26
+ try:
27
+ cred = credentials.Certificate("firebase-credentials.json")
28
+ firebase_admin.initialize_app(cred)
29
+ db = firestore.client()
30
+ print("Successfully connected to Firebase.")
31
+ except Exception as e:
32
+ print(f"Error connecting to Firebase: {e}")
33
+ db = None
34
+
35
+ tesseract_path = os.getenv("TESSERACT_PATH", r"C:\Program Files\Tesseract-OCR\tesseract.exe")
36
+ pytesseract.pytesseract.tesseract_cmd = tesseract_path
37
+
38
+ app = Flask(__name__)
39
+ app.config['SECRET_KEY'] = 'secret-key'
40
+ app.config['UPLOAD_FOLDER'] = 'uploads'
41
+ app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 # 16MB max file size
42
+
43
+
44
+ os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)
45
+
46
+ try:
47
+ model = YOLO("yolov8n.pt")
48
+ except:
49
+ model = None
50
+ print("Warning: YOLO model not loaded. Install ultralytics and download yolov8n.pt")
51
+
52
+
53
+ # Loading the pre-trained YOLO Object Detection model
54
+ try:
55
+ OBJECT_DETECTION_MODEL_PATH = "./models/best.pt"
56
+ object_detection_model = YOLO(OBJECT_DETECTION_MODEL_PATH)
57
+ print("Object detection model loaded successfully.")
58
+ print(f"Object Detection Model classes: {object_detection_model.names}")
59
+ except Exception as e:
60
+ object_detection_model = None
61
+ print("Warning: YOLO model not loaded. Install ultralytics and download yolov8n.pt")
62
+ print(f"Warning: Custom Object Detection model not loaded. Error: {e}")
63
+
64
+ # Allowed file extensions
65
+ ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'bmp'}
66
+
67
+ def allowed_file(filename):
68
+ return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
69
+
70
+ # Cayley Table for Verhoeff Checksum
71
+ _d = (
72
+ (0, 1, 2, 3, 4, 5, 6, 7, 8, 9),
73
+ (1, 2, 3, 4, 0, 6, 7, 8, 9, 5),
74
+ (2, 3, 4, 0, 1, 7, 8, 9, 5, 6),
75
+ (3, 4, 0, 1, 2, 8, 9, 5, 6, 7),
76
+ (4, 0, 1, 2, 3, 9, 5, 6, 7, 8),
77
+ (5, 9, 8, 7, 6, 0, 4, 3, 2, 1),
78
+ (6, 5, 9, 8, 7, 1, 0, 4, 3, 2),
79
+ (7, 6, 5, 9, 8, 2, 1, 0, 4, 3),
80
+ (8, 7, 6, 5, 9, 3, 2, 1, 0, 4),
81
+ (9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
82
+ )
83
+
84
+ # permutation table for Verhoeff Checksum
85
+ _p = (
86
+ (0, 1, 2, 3, 4, 5, 6, 7, 8, 9),
87
+ (1, 5, 7, 6, 2, 8, 3, 0, 9, 4),
88
+ (5, 8, 0, 3, 7, 9, 6, 1, 4, 2),
89
+ (8, 9, 1, 6, 0, 4, 3, 5, 2, 7),
90
+ (9, 4, 5, 3, 1, 2, 6, 8, 7, 0),
91
+ (4, 2, 8, 6, 5, 7, 3, 9, 0, 1),
92
+ (2, 7, 9, 3, 8, 0, 6, 4, 1, 5),
93
+ (7, 0, 4, 6, 9, 1, 3, 2, 5, 8)
94
+ )
95
+
96
+ _inv = (0, 4, 3, 2, 1, 5, 6, 7, 8, 9)
97
+
98
+ def generate_checksum(num_str):
99
+ c = 0
100
+ num_digits = [int(d) for d in num_str]
101
+ for i, digit in enumerate(reversed(num_digits)):
102
+ c = _d[c][_p[(i % 8)][digit]]
103
+ return _inv[c]
104
+
105
+ def validate_checksum(num_str_with_checksum):
106
+ c = 0
107
+ num_digits = [int(d) for d in num_str_with_checksum]
108
+ for i, digit in enumerate(reversed(num_digits)):
109
+ c = _d[c][_p[(i % 8)][digit]]
110
+ return c == 0
111
+
112
+ def get_exif_data(image_path):
113
+ """Extract EXIF metadata from image"""
114
+ try:
115
+ image = Image.open(image_path)
116
+ exif_data = {}
117
+ if hasattr(image, '_getexif'):
118
+ info = image._getexif()
119
+ if info:
120
+ for tag, value in info.items():
121
+ decoded = TAGS.get(tag, tag)
122
+ exif_data[decoded] = value
123
+ return exif_data
124
+ except Exception as e:
125
+ return {"error": str(e)}
126
+
127
+
128
+ # Loading general object detection model (YOLO v8)
129
+ try:
130
+ general_model = YOLO("yolov8n.pt")
131
+ except:
132
+ general_model = None
133
+ print("Warning: General YOLO model not loaded. Install ultralytics and download yolov8n.pt")
134
+
135
+ # Loading the pre-trained Aadhaar-specific YOLO model
136
+ repo_config = dict(
137
+ repo_id="arnabdhar/YOLOv8-nano-aadhar-card",
138
+ filename="model.pt",
139
+ local_dir="./models"
140
+ )
141
+
142
+ def detect_objects_yolo(image_path):
143
+ #Detecting objects in image using YOLO
144
+ try:
145
+ if general_model is None:
146
+ return {"error": "YOLO model not available"}
147
+
148
+ img = cv2.imread(image_path)
149
+ results = general_model(img)
150
+ labels = [general_model.names[int(cls)] for cls in results[0].boxes.cls]
151
+
152
+ human_detected = "person" in labels
153
+ return {
154
+ "detected_objects": labels,
155
+ "human_detected": human_detected,
156
+ "fraud_indicator": not human_detected if labels else False
157
+ }
158
+ except Exception as e:
159
+ return {"error": str(e)}
160
+
161
+
162
+ # Loading the pre-trained YOLO Aadhar model
163
+ aadhaar_model = YOLO(hf_hub_download(**repo_config))
164
+ id2label = aadhaar_model.names
165
+ print(id2label)
166
+
167
+
168
+ # Verifying if the image is of frudulent Aadhar card or not using object detection
169
+ def run_object_verification(image_path, object_model_raw_results):
170
+ if object_detection_model is None:
171
+ return {"error": "Object detection model not available."}
172
+
173
+ try:
174
+ detected_objects = []
175
+ is_tampered = False
176
+
177
+ for box in object_model_raw_results.boxes:
178
+ class_id = int(box.cls[0])
179
+ class_name = object_detection_model.names[class_id]
180
+ detected_objects.append(class_name)
181
+ if class_name == 'Tampered': is_tampered = True
182
+
183
+ return {
184
+ "detected_objects": list(set(detected_objects)),
185
+ "is_tampered": is_tampered
186
+ }
187
+ except Exception as e:
188
+ return {"error": f"Object verification failed: {str(e)}"}
189
+
190
+
191
+
192
+ def decode_aadhaar_qr(image_path):
193
+ try:
194
+ image = Image.open(image_path)
195
+ decoded_objects = decode(image)
196
+
197
+ if not decoded_objects:
198
+ return {"error": "QR Code not found or could not be read."}
199
+
200
+ qr_data_raw = decoded_objects[0].data.decode('utf-8', errors='ignore')
201
+
202
+ try:
203
+ root = ET.fromstring(qr_data_raw)
204
+ qr_attributes = root.attrib
205
+ return {
206
+ "name": qr_attributes.get("name"),
207
+ "dob": qr_attributes.get("dob"),
208
+ "gender": qr_attributes.get("gender"),
209
+ "uid": qr_attributes.get("uid"),
210
+ }
211
+ except ET.ParseError:
212
+ return {"error": "QR data is not valid XML.", "raw_data": qr_data_raw}
213
+
214
+ except Exception as e:
215
+ return {"error": f"QR Code processing failed: {str(e)}"}
216
+
217
+
218
+ def extract_aadhaar_data(image_path, text_model_raw_results):
219
+ try:
220
+ detections = Detections.from_ultralytics(text_model_raw_results)
221
+ image = np.array(Image.open(image_path))
222
+ aadhaar_data = {}
223
+ key_mapping = {
224
+ 'NAME': 'Name',
225
+ 'AADHAR_NUMBER': 'Aadhaar Number',
226
+ 'GENDER': 'Gender',
227
+ 'DATE_OF_BIRTH': 'Date of Birth',
228
+ 'ADDRESS': 'Address'
229
+ }
230
+
231
+ for bbox, cls_name in zip(detections.xyxy, detections.data['class_name']):
232
+ x1, y1, x2, y2 = map(int, bbox)
233
+ roi = image[y1:y2, x1:x2]
234
+ if roi.size == 0:
235
+ continue
236
+
237
+ # --- FIX: Define a custom config for Tesseract ---
238
+ config = '--psm 6' # A good default for other fields
239
+ cls_name_str = str(cls_name)
240
+
241
+ if cls_name_str == 'AADHAR_NUMBER':
242
+ # Use a specific, stricter config for the Aadhaar number
243
+ config = '--psm 7 -c tessedit_char_whitelist=0123456789 '
244
+
245
+ # OCR on the region of interest with the specified config
246
+ text = pytesseract.image_to_string(
247
+ roi,
248
+ lang="eng+hin",
249
+ config=config # Apply the custom config here
250
+ ).strip()
251
+
252
+ normalized_key = key_mapping.get(cls_name_str, cls_name_str)
253
+ aadhaar_data[normalized_key] = text
254
+
255
+ print(f"Final Aadhaar Data: {aadhaar_data}")
256
+ return aadhaar_data
257
+ except Exception as e:
258
+ return {"error": f"OCR failed: {str(e)}"}
259
+
260
+ def validate_aadhaar_number(aadhaar_data):
261
+ # Validating Aadhaar number using Verhoeff checksum
262
+ try:
263
+ if not aadhaar_data.get("Aadhaar Number"):
264
+ result = {"valid": False, "reason": "No Aadhaar number found"}
265
+ print(f"Validation result: {result}")
266
+ return result
267
+
268
+ clean_number = aadhaar_data["Aadhaar Number"].replace(" ", "")
269
+
270
+ if len(clean_number) != 12:
271
+ return {"valid": False, "reason": f"Invalid length: {len(clean_number)} (should be 12)"}
272
+
273
+
274
+ if not clean_number.isdigit():
275
+ return {"valid": False, "reason": "Contains non-digit characters"}
276
+
277
+
278
+ is_valid = validate_checksum(clean_number)
279
+
280
+ return {
281
+ "valid": is_valid,
282
+ "reason": "Valid Aadhaar number" if is_valid else "Invalid checksum",
283
+ "clean_number": clean_number
284
+ }
285
+ except Exception as e:
286
+ result = {"valid":False, "reason":f"Error: {str(e)}"}
287
+ print(f"Validation error: {result}")
288
+ return result
289
+
290
+
291
+ def create_annotated_image(image_path, text_model_results, object_model_results):
292
+ try:
293
+ image = cv2.imread(image_path)
294
+
295
+ # Annotations from the text extraction model (Blue boxes)
296
+ text_detections = Detections.from_ultralytics(text_model_results)
297
+ text_box_annotator = sv.BoxAnnotator(color=sv.Color.BLUE, thickness=2)
298
+ text_label_annotator = sv.LabelAnnotator(color=sv.Color.BLUE, text_color=sv.Color.WHITE, text_scale=0.5)
299
+
300
+ image = text_box_annotator.annotate(scene=image.copy(), detections=text_detections)
301
+ image = text_label_annotator.annotate(scene=image, detections=text_detections)
302
+
303
+ # Annotations from your custom object verification model (Red boxes)
304
+ object_detections = Detections.from_ultralytics(object_model_results)
305
+ object_box_annotator = sv.BoxAnnotator(color=sv.Color.RED, thickness=2)
306
+ object_label_annotator = sv.LabelAnnotator(color=sv.Color.RED, text_color=sv.Color.WHITE, text_scale=0.5)
307
+
308
+ image = object_box_annotator.annotate(scene=image, detections=object_detections)
309
+ image = object_label_annotator.annotate(scene=image, detections=object_detections)
310
+
311
+ # Saving the annotated image to the static folder
312
+ annotated_filename = "annotated_" + os.path.basename(image_path)
313
+ save_path = os.path.join('static', annotated_filename)
314
+ cv2.imwrite(save_path, image)
315
+
316
+ return annotated_filename
317
+ except Exception as e:
318
+ print(f"Error creating annotated image: {e}")
319
+ return None
320
+
321
+
322
+ def analyze_aadhar_pair(front_path, back_path):
323
+ # running the text extaction model on both front and back images
324
+ text_model_raw_results_front = aadhaar_model.predict(front_path, verbose=False)[0]
325
+ text_model_raw_results_back = aadhaar_model.predict(back_path, verbose=False)[0]
326
+
327
+ front_ocr_results = extract_aadhaar_data(front_path, text_model_raw_results_front)
328
+ back_ocr_results = extract_aadhaar_data(back_path, text_model_raw_results_back)
329
+
330
+
331
+
332
+ # running tamper detection on front
333
+ object_model_raw_results_front = object_detection_model(front_path, verbose=False)[0]
334
+ object_results_front = run_object_verification(front_path, object_model_raw_results_front)
335
+
336
+ # running tamper detection on back
337
+ object_model_raw_results_back = object_detection_model(back_path, verbose=False)[0]
338
+ object_results_back = run_object_verification(back_path, object_model_raw_results_back)
339
+
340
+ # running exif on both front and back
341
+ exif_results_front = get_exif_data(front_path)
342
+ exif_results_back = get_exif_data(back_path)
343
+
344
+ # qr code analysis
345
+ qr_results = decode_aadhaar_qr(back_path)
346
+
347
+ general_model_results_front = general_model(front_path, verbose=False)[0]
348
+
349
+ general_labels = [general_model.names[int(cls)] for cls in general_model_results_front.boxes.cls]
350
+ human_detected = "person" in general_labels
351
+ results = {
352
+ "front": {
353
+ "object_verification": object_results_front,
354
+ "exif_analysis": exif_results_front,
355
+ "ocr_analysis": front_ocr_results,
356
+ "face_detection": {"human_detected": human_detected, "detected_objects": general_labels},
357
+ },
358
+ "back": {
359
+ "object_verification": object_results_back,
360
+ "exif_analysis": exif_results_back,
361
+ "ocr_analysis": back_ocr_results,
362
+ "qr_analysis": qr_results,
363
+ "general_detection": {"human_detected":human_detected, "detected_objects": general_labels}
364
+ },
365
+ "fraud_indicators": [],
366
+ "raw_results": {
367
+ "text_front": text_model_raw_results_front,
368
+ "text_back": text_model_raw_results_back,
369
+ "object_front": object_model_raw_results_front,
370
+ "object_back": object_model_raw_results_back
371
+ }
372
+ }
373
+
374
+ combined_ocr_results = front_ocr_results.copy()
375
+ if "Address" in back_ocr_results:
376
+ combined_ocr_results["Address"] = back_ocr_results["Address"]
377
+ results['combined_ocr'] = combined_ocr_results
378
+
379
+
380
+ fraud_score = 0
381
+
382
+ if "error" not in object_results_front and object_results_front.get("is_tampered"):
383
+ results["fraud_indicators"].append("Tampered region detected on the front of the card.")
384
+ fraud_score += 3
385
+
386
+ if not human_detected:
387
+ results["fraud_indicators"].append("No human detected in the photo area (possible fake document).")
388
+ fraud_score += 1
389
+
390
+ if "error" not in combined_ocr_results and "error" not in qr_results:
391
+ ocr_name = combined_ocr_results.get("Name", "").strip().lower()
392
+ qr_name = qr_results.get("name", "").strip().lower()
393
+ if ocr_name and qr_name and ocr_name not in qr_name:
394
+ results["fraud_indicators"].append("Mismatch: Printed Name vs. QR Code Name.")
395
+ fraud_score += 3
396
+
397
+ # for gender
398
+ ocr_gender = combined_ocr_results.get("Gender", "").strip.lower()
399
+ qr_gender = qr_results.get("gender", "").strip().lower()
400
+ if ocr_gender and qr_gender and ocr_gender not in qr_gender and qr_gender not in ocr_gender:
401
+ results["fraud_indicators"].append("Mismatch between printed gender and QR code name.")
402
+ fraud_score += 1
403
+
404
+ # for dob
405
+ ocr_dob_raw = combined_ocr_results.get("DATE_OF_BIRTH", "")
406
+ ocr_dob = ocr_dob_raw.replace("-", "/").strip()
407
+ qr_dob = qr_results.get("dob", "").replace("-","/").strip()
408
+ if((ocr_dob and qr_dob) and (ocr_dob != qr_dob)):
409
+ results["fraud_indicators"].append("Mismatch between printed date of birth (DOB) and QR date of birth (DOB).")
410
+ fraud_score += 1
411
+
412
+ # for aadhar number
413
+ ocr_num = combined_ocr_results.get("AADHAR_NUMBER", "").strip().lower()
414
+ qr_num = qr_results.get("aadhar_num", "").strip().lower()
415
+ if ocr_num and qr_num and ocr_num not in qr_num and qr_num not in ocr_num:
416
+ results["fraud_indicators"].append("Mismatch between printed date of birth (DOB) and QR date of birth (DOB).")
417
+ fraud_score += 1
418
+
419
+ # for address
420
+ ocr_address = combined_ocr_results.get("ADDRESS", "").strip().lower()
421
+ qr_address = qr_results.get("address", "").strip().lower()
422
+ if ocr_address and qr_address and ocr_address not in qr_address and qr_address not in ocr_address:
423
+ results["fraud_indicators"].append("Mismatch between printed address and QR code address.")
424
+ fraud_score += 1
425
+
426
+
427
+ if "error" not in results["front"]["ocr_analysis"]:
428
+ results["aadhaar_validation"] = validate_aadhaar_number(results["front"]["ocr_analysis"])
429
+
430
+
431
+ # Check object detection for fraud indicators
432
+ if "error" not in object_results_front:
433
+ if object_results_front.get("fraud_indicator"):
434
+ results["fraud_indicators"].append("No human detected in image (possible fake document)")
435
+ fraud_score += 1
436
+
437
+ if(("error" not in exif_results_front or len(results["exif_analysis_front"]) == 0) or ("error" not in exif_results_back or len(results["exif_analysis_back"]) == 0)):
438
+ results["fraud_indicators"].append("No EXIF metadata found")
439
+ fraud_score += 0
440
+
441
+ if "aadhaar_validation" in results and not results["aadhaar_validation"]["valid"]:
442
+ results["fraud_indicators"].append(
443
+ f"Invalid Aadhaar number: {results['aadhaar_validation']['reason']}"
444
+ )
445
+ fraud_score += 2
446
+
447
+ results["fraud_score"] = fraud_score
448
+ results["assessment"] = (
449
+ "HIGH FRAUD RISK" if fraud_score >= 3 else
450
+ "MODERATE FRAUD RISK" if fraud_score >= 1 else
451
+ "LOW FRAUD RISK"
452
+ )
453
+
454
+ # saving the results to the firebase db
455
+ if db:
456
+ try:
457
+ results_for_firestore = results.copy()
458
+
459
+ # removing the key that contains raw YOLO objects
460
+ if 'raw_results' in results_for_firestore:
461
+ del results_for_firestore['raw_results']
462
+
463
+ results_for_firestore['timestamp'] = firestore.SERVER_TIMESTAMP
464
+
465
+ db.collection('analyses').add(results_for_firestore)
466
+ print("Analysis results saved to Firestore.")
467
+ except Exception as e:
468
+ print(f"Error saving to Firestore: {e}")
469
+
470
+ return results
471
+
472
+
473
+ @app.route('/')
474
+ def home():
475
+ return render_template('index.html')
476
+
477
+ @app.route('/upload', methods=['POST'])
478
+ def upload_file():
479
+ if 'front_image' not in request.files or 'back_image' not in request.files:
480
+ flash('Please upload both front and back of the Aadhar card')
481
+ return redirect(request.url)
482
+
483
+ front_file = request.files['front_image']
484
+ back_file = request.files['back_image']
485
+
486
+ if front_file.filename == '' or back_file.filename == '':
487
+ flash('Either one or both images are missing')
488
+ return redirect(request.url)
489
+
490
+ if (front_file and allowed_file(front_file.filename)) and (back_file and allowed_file(back_file.filename)):
491
+ filename = secure_filename(front_file.filename)
492
+
493
+
494
+ with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(front_file.filename)[1]) as tmp_front:
495
+ front_file.save(tmp_front.name)
496
+ front_path = tmp_front.name
497
+
498
+ with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(back_file.filename)[1]) as tmp_back:
499
+ back_file.save(tmp_back.name)
500
+ back_path = tmp_back.name
501
+
502
+
503
+ try:
504
+ # Analyzing the image
505
+ analysis_results = analyze_aadhar_pair(front_path, back_path)
506
+
507
+ # creating annotated images using raw results from the analysis
508
+ raw = analysis_results['raw_results']
509
+ annotated_image_filename_front = create_annotated_image(front_path, raw['text_front'], raw['object_front'])
510
+
511
+ annotated_image_filename_back = create_annotated_image(back_path, raw['text_back'], raw['object_back'])
512
+
513
+ return render_template('results.html',
514
+ results=analysis_results,
515
+ filename=f"{front_file.filename} & {back_file.filename}",
516
+ annotated_image_filename_front=annotated_image_filename_front,
517
+ annotated_image_filename_back=annotated_image_filename_back)
518
+ finally:
519
+ try:
520
+ os.unlink(front_path)
521
+ os.unlink(back_path)
522
+ except OSError:
523
+ pass
524
+ else:
525
+ flash('Invalid file type. Please upload an image file.')
526
+ return redirect(request.url)
527
+
528
+ @app.route('/api/analyze', methods=['POST'])
529
+ def api_analyze():
530
+ """API endpoint for programmatic access"""
531
+ if 'file' not in request.files:
532
+ return jsonify({"error": "No file provided"}), 400
533
+
534
+ file = request.files['file']
535
+
536
+ if file.filename == '' or not allowed_file(file.filename):
537
+ return jsonify({"error": "Invalid file"}), 400
538
+
539
+ filename = secure_filename(file.filename)
540
+
541
+ tmp_file = tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(filename)[1])
542
+ tmp_file_path = tmp_file.name
543
+ tmp_file.close()
544
+
545
+ try:
546
+ file.save(tmp_file_path)
547
+ analysis_results = analyze_aadhar_pair(tmp_file_path)
548
+ return jsonify(analysis_results)
549
+ finally:
550
+ try:
551
+ os.unlink(tmp_file_path)
552
+ except OSError:
553
+ pass
554
+
555
+ if __name__ == '__main__':
556
+ app.run(debug=True, host='0.0.0.0', port=5000)