Spaces:
Runtime error
Runtime error
| """ | |
| Deepfake-Proof eKYC System (DLIB-Free) | |
| ------------------------------------- | |
| This script: | |
| β Installs required dependencies | |
| β Downloads ONNX & auxiliary models via gdown | |
| β Initializes FaceAnalysis (buffalo_l) | |
| β Runs a Gradio web interface for identity + liveness verification | |
| """ | |
| import os | |
| import warnings | |
| import subprocess | |
| import numpy as np | |
| import cv2 | |
| import onnxruntime as ort | |
| from insightface.app import FaceAnalysis | |
| from PIL import Image | |
| import gradio as gr | |
| import time | |
| import sys | |
| from typing import Optional, Tuple, Any | |
| # ============================================================================== | |
| # 1. INSTALLATION (for Colab / Local Run) | |
| # ============================================================================== | |
| print("--- 1. Installing Required Libraries ---") | |
| try: | |
| subprocess.run([ | |
| "pip", "install", | |
| "insightface==0.7.3", "numpy", "onnxruntime", | |
| "opencv-python", "matplotlib", "tqdm", "gdown", "gradio" | |
| ], check=True) | |
| except Exception as e: | |
| print(f"β οΈ Installation failed: {e}") | |
| # Suppress warnings for a cleaner output | |
| warnings.filterwarnings("ignore") | |
| # ============================================================================== | |
| # 2. MODEL DOWNLOAD SETUP | |
| # ============================================================================== | |
| TARGET_DIR = './models' | |
| os.makedirs(TARGET_DIR, exist_ok=True) | |
| MODEL_PATHS = { | |
| "mobilenetv3": os.path.join(TARGET_DIR, "mobilenetv3_small_100_final.onnx"), | |
| "efficientnet_b0": os.path.join(TARGET_DIR, "efficientnet_b0_final.onnx"), | |
| "edgenext": os.path.join(TARGET_DIR, "edgenext_small_final.onnx"), | |
| } | |
| MODEL_FILES = { | |
| # Deepfake Detector Components (for compatibility; not used in DLIB-free flow) | |
| "deploy.prototxt": "1V02QA7eOnrkKixTdnP6cvIBx4Qxqwhmw", | |
| "res10_300x300_ssd_iter_140000_fp16.caffemodel": "14n7DryxHqwqac9z0HzpIqtipBp5EfRvA", | |
| "shape_predictor_81_face_landmarks.dat": "1sixwbA4oOn7Ijmm85sAODL8AtwjCq6a9", | |
| # Deepfake Classification Models | |
| "mobilenetv3_small_100_final.onnx": "1spFbTIL8nRmIBG_F6j6-aF01fWGVGo_f", | |
| "efficientnet_b0_final.onnx": "1TsHUbx0cd-55XDygQIAmEbXFUGHxBT_x", | |
| "edgenext_small_final.onnx": "15hnhznZVyASYhSOYOFSsgMGEfsyh1MBY" | |
| } | |
| def download_models_from_drive(): | |
| """Downloads all required model files from Google Drive.""" | |
| print(f"\n--- 2. Downloading Deepfake Models to {TARGET_DIR} ---") | |
| import gdown | |
| for filename, file_id in MODEL_FILES.items(): | |
| local_path = os.path.join(TARGET_DIR, filename) | |
| if os.path.exists(local_path) and os.path.getsize(local_path) > 0: | |
| continue | |
| try: | |
| print(f"β¬οΈ Downloading {filename} ...") | |
| gdown.download(id=file_id, output=local_path, quiet=True, fuzzy=True) | |
| except Exception as e: | |
| print(f"β οΈ Failed to download {filename}: {e}") | |
| download_models_from_drive() | |
| print("β Model files are ready.") | |
| # ============================================================================== | |
| # 3. MODEL INITIALIZATION | |
| # ============================================================================== | |
| SIM_MODEL_NAME = 'buffalo_l' | |
| CTX_ID = -1 # CPU | |
| ID_MATCH_THRESHOLD = 0.50 | |
| FAKE_SCORE_THRESHOLD = 0.50 | |
| ONNX_SESSIONS = {} | |
| app: Optional[FaceAnalysis] = None | |
| print("\n--- 3. Initializing Face and Deepfake Models ---") | |
| try: | |
| app = FaceAnalysis(name=SIM_MODEL_NAME, providers=['CPUExecutionProvider']) | |
| app.prepare(ctx_id=CTX_ID, det_size=(640, 640), det_thresh=0.5, | |
| allowed_modules=['detection', 'landmark', 'recognition']) | |
| for model_name, path in MODEL_PATHS.items(): | |
| if os.path.exists(path): | |
| ONNX_SESSIONS[model_name] = ort.InferenceSession(path, providers=['CPUExecutionProvider']) | |
| print(f"β Loaded {model_name.upper()} model.") | |
| else: | |
| print(f"β οΈ Missing {model_name.upper()} at {path}") | |
| if not ONNX_SESSIONS: | |
| raise FileNotFoundError("No ONNX deepfake models could be loaded.") | |
| except Exception as e: | |
| print(f"β Model initialization failed: {e}") | |
| app = None | |
| print("β Model initialization complete.") | |
| # ============================================================================== | |
| # 4. HELPER FUNCTIONS | |
| # ============================================================================== | |
| def get_largest_face(faces: list) -> Optional[Any]: | |
| if not faces: return None | |
| def area(face): bbox = face.bbox.astype(np.int32); return (bbox[2]-bbox[0])*(bbox[3]-bbox[1]) | |
| return max(faces, key=area) | |
| def get_face_data(img_array_rgb: np.ndarray): | |
| if app is None: return None, None, None, None | |
| img_bgr = cv2.cvtColor(img_array_rgb, cv2.COLOR_RGB2BGR) | |
| faces = app.get(img_bgr) | |
| if not faces: return None, None, img_bgr, None | |
| face = get_largest_face(faces) | |
| return face.embedding, face.lmk, img_bgr, face.bbox | |
| def calculate_similarity(e1, e2): | |
| if e1 is None or e2 is None: return 0.0 | |
| e1_norm = e1 / np.linalg.norm(e1) | |
| e2_norm = e2 / np.linalg.norm(e2) | |
| return float(np.dot(e1_norm, e2_norm)) | |
| def align_face_insightface(img_bgr, landmarks_5pt, output_size=160): | |
| dst = np.array([ | |
| [30.2946, 51.6963], | |
| [65.5318, 51.6963], | |
| [48.0252, 71.7366], | |
| [33.5493, 92.3655], | |
| [62.7299, 92.3655] | |
| ], dtype=np.float32) * (output_size / 96) | |
| src = landmarks_5pt.astype(np.float32) | |
| M, _ = cv2.estimateAffinePartial2D(src, dst, method=cv2.LMEDS) | |
| return cv2.warpAffine(img_bgr, M, (output_size, output_size), flags=cv2.INTER_CUBIC) | |
| def get_liveness_score(img_rgb, landmarks_5pt, model_choice): | |
| if model_choice not in ONNX_SESSIONS: return 0.0 | |
| try: | |
| session = ONNX_SESSIONS[model_choice] | |
| img_bgr = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2BGR) | |
| face_crop = align_face_insightface(img_bgr, landmarks_5pt, 160) | |
| face_rgb = cv2.cvtColor(face_crop, cv2.COLOR_BGR2RGB) | |
| normalized = (face_rgb / 255.0 - 0.5) / 0.5 | |
| input_tensor = np.transpose(normalized, (2, 0, 1))[None, ...].astype("float32") | |
| input_name = session.get_inputs()[0].name | |
| output_name = session.get_outputs()[0].name | |
| logit = session.run([output_name], {input_name: input_tensor})[0] | |
| probability = 1 / (1 + np.exp(-logit)) | |
| return float(np.ravel(probability)[0]) | |
| except Exception as e: | |
| print(f"Liveness check failed: {e}") | |
| return 0.0 | |
| # ============================================================================== | |
| # 5. UNIFIED eKYC LOGIC | |
| # ============================================================================== | |
| def unified_ekyc_analysis(model_choice: str, img_A_pil: Image.Image, img_B_pil: Image.Image): | |
| if app is None or not ONNX_SESSIONS or model_choice not in ONNX_SESSIONS: | |
| err = "# β Models not initialized. Please restart or check logs." | |
| return None, None, err | |
| start = time.time() | |
| img_A, img_B = np.array(img_A_pil.convert('RGB')), np.array(img_B_pil.convert('RGB')) | |
| e1, lmk_A, visA, bboxA = get_face_data(img_A) | |
| e2, lmk_B, visB, bboxB = get_face_data(img_B) | |
| if e1 is None or e2 is None or lmk_A is None or lmk_B is None: | |
| return img_A_pil, img_B_pil, "π **Face not detected properly in one/both images.**" | |
| match_score = calculate_similarity(e1, e2) | |
| match_ok = match_score > ID_MATCH_THRESHOLD | |
| liveness_A = get_liveness_score(img_A, lmk_A, model_choice) if match_ok else 0.0 | |
| liveness_B = get_liveness_score(img_B, lmk_B, model_choice) if match_ok else 0.0 | |
| is_real_A, is_real_B = liveness_A <= FAKE_SCORE_THRESHOLD, liveness_B <= FAKE_SCORE_THRESHOLD | |
| accept = match_ok and is_real_A and is_real_B | |
| bbox_color = (0, 255, 0) if accept else (0, 0, 255) | |
| for vis, bbox in [(visA, bboxA), (visB, bboxB)]: | |
| b = bbox.astype(int) | |
| cv2.rectangle(vis, (b[0], b[1]), (b[2], b[3]), bbox_color, 3) | |
| report = f""" | |
| ## π¦ ZenTej eKYC Report | |
| **Final Decision:** {'β ACCEPT' if accept else 'β REJECT'} | |
| **Reason:** {'All checks passed' if accept else 'Mismatch or Forgery detected'} | |
| | Check | Value | Status | | |
| |:--|:--|:--| | |
| | Cosine Similarity | `{match_score:.4f}` | {'MATCH' if match_ok else 'MISMATCH'} | | |
| | Live Image Fake Score | `{liveness_A:.4f}` | {'REAL' if is_real_A else 'FAKE'} | | |
| | Doc Image Fake Score | `{liveness_B:.4f}` | {'REAL' if is_real_B else 'FAKE'} | | |
| | Thresholds | ID>{ID_MATCH_THRESHOLD}, FAKE<={FAKE_SCORE_THRESHOLD} | | | |
| β± Time: {time.time()-start:.3f}s | Model: **{model_choice.upper()}** | |
| """ | |
| return Image.fromarray(cv2.cvtColor(visA, cv2.COLOR_BGR2RGB)), Image.fromarray(cv2.cvtColor(visB, cv2.COLOR_BGR2RGB)), report | |
| # ============================================================================== | |
| # 6. GRADIO INTERFACE | |
| # ============================================================================== | |
| if __name__ == "__main__": | |
| print("\n--- 4. Launching Gradio App ---") | |
| models = list(ONNX_SESSIONS.keys()) | |
| default = "edgenext" if "edgenext" in models else (models[0] if models else None) | |
| if not default: | |
| print("β No deepfake models available.") | |
| sys.exit(1) | |
| iface = gr.Interface( | |
| fn=unified_ekyc_analysis, | |
| inputs=[ | |
| gr.Dropdown(label="Select Deepfake Model", choices=models, value=default), | |
| gr.Image(label="Input 1: Live Selfie", type="pil", sources=["upload", "webcam"]), | |
| gr.Image(label="Input 2: Document Photo", type="pil") | |
| ], | |
| outputs=[ | |
| gr.Image(label="Processed Input 1"), | |
| gr.Image(label="Processed Input 2"), | |
| gr.Markdown(label="Verification Report") | |
| ], | |
| title="DLIB-Free eKYC Deepfake-Proof Verification", | |
| description="Performs two-step verification: Identity Match + Deepfake/Liveness Detection.", | |
| ) | |
| iface.launch(server_name="0.0.0.0", server_port=7860) | |