Spaces:
Sleeping
Sleeping
| import numpy as np | |
| import gradio as gr | |
| import cv2 | |
| import warnings | |
| import traceback | |
| import os | |
| from insightface.app import FaceAnalysis | |
| from transformers import pipeline as hf_pipeline | |
| # Suppress minor warnings for a cleaner console output | |
| warnings.filterwarnings("ignore", category=UserWarning) | |
| warnings.filterwarnings(action='ignore', category=FutureWarning) | |
| # ============================================================================== | |
| # 1. CONFIGURATION AND GLOBAL INITIALIZATION | |
| # ============================================================================== | |
| SIM_MODEL_NAME = 'buffalo_s' | |
| SIMILARITY_THRESHOLD = 0.5 | |
| CTX_ID = -1 | |
| LIVE_MODEL_ID = "nguyenkhoa/mobilevitv2_Liveness_detection_v1.0" | |
| sim_app = None | |
| live_pipe = None | |
| global_init_error = "" | |
| try: | |
| print(f"1. Initializing InsightFace model: {SIM_MODEL_NAME}...") | |
| sim_app = FaceAnalysis(name=SIM_MODEL_NAME) | |
| sim_app.prepare(ctx_id=CTX_ID, det_size=(640, 640)) | |
| print("β InsightFace model prepared.") | |
| except Exception as e: | |
| error_message = f"{e.__class__.__name__}: {e}" | |
| global_init_error += f"\nβ SIMILARITY ERROR: InsightFace failed to load. \n Detail: {error_message}" | |
| sim_app = None | |
| try: | |
| print(f"2. Loading Hugging Face pipeline: {LIVE_MODEL_ID}...") | |
| live_pipe = hf_pipeline("image-classification", model=LIVE_MODEL_ID) | |
| print("β Liveness pipeline loaded.") | |
| except Exception as e: | |
| full_trace = traceback.format_exc() | |
| critical_error = full_trace.strip().split('\n')[-1] | |
| global_init_error += ( | |
| f"\nβ LIVENESS ERROR: HF pipeline failed to load.\n" | |
| f" CRITICAL DETAIL: {critical_error}\n" | |
| f" Suggestion: Dependency conflict. Try running: !pip install --upgrade transformers accelerate huggingface-hub" | |
| ) | |
| live_pipe = None | |
| # ============================================================================== | |
| # 2. HELPER FUNCTIONS | |
| # ============================================================================== | |
| def get_largest_face(faces): | |
| if not faces: | |
| return None | |
| def get_area(face): | |
| bbox = face.bbox.astype(np.int32) | |
| return (bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) | |
| return max(faces, key=get_area) | |
| def extract_frame_from_video(video_path): | |
| print(f" [LOG] Extracting frame from video: {video_path}") | |
| if not os.path.exists(video_path): | |
| return None, "Error: Video file not found." | |
| cap = cv2.VideoCapture(video_path) | |
| if not cap.isOpened(): | |
| return None, "Error: Could not open video file." | |
| ret, frame = cap.read() | |
| cap.release() | |
| if ret: | |
| frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | |
| return frame_rgb, "Frame extracted successfully." | |
| else: | |
| return None, "Error: Could not read a frame from the video." | |
| # ============================================================================== | |
| # 3. CORE SEQUENTIAL FUNCTION | |
| # ============================================================================== | |
| def combined_face_check(input1, input2): | |
| print("\n--- NEW REQUEST ---") | |
| if global_init_error: | |
| print(" [LOG] Returning Fatal Initialization Error.") | |
| return f"<h3 style='color:red;'>FATAL INITIALIZATION ERROR</h3><pre>{global_init_error}</pre>" | |
| # --- Pre-Step: Normalize Inputs (Image/Video) --- | |
| img1_numpy = None | |
| img2_numpy = None | |
| def normalize_input(input_data, name): | |
| print(f" [LOG] Normalizing Input {name}...") | |
| if isinstance(input_data, str) and os.path.isfile(input_data): | |
| if input_data.lower().endswith(('.mp4', '.avi', '.mov', '.mkv')): | |
| frame, error_msg = extract_frame_from_video(input_data) | |
| return frame, error_msg, 'VIDEO' | |
| else: | |
| img_bgr = cv2.imread(input_data) | |
| if img_bgr is not None: | |
| return cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB), "Input is an image file.", 'IMAGE' | |
| return None, "Error: Could not read image file.", 'UNKNOWN' | |
| elif isinstance(input_data, np.ndarray): | |
| return input_data, "Input is a direct numpy image.", 'IMAGE' | |
| else: | |
| return None, "Invalid input type or missing file path.", 'UNKNOWN' | |
| img1_numpy, norm1_msg, norm1_type = normalize_input(input1, '1') | |
| img2_numpy, norm2_msg, norm2_type = normalize_input(input2, '2') | |
| if img1_numpy is None or img2_numpy is None: | |
| print(" [LOG] Input normalization failed.") | |
| return (f"<h3 style='color:red;'>INPUT ERROR</h3>" | |
| f"Input 1 Status ({norm1_type}): {norm1_msg}<br>" | |
| f"Input 2 Status ({norm2_type}): {norm2_msg}") | |
| # --- Step 1: Face Similarity Check --- | |
| print(" [LOG] Starting Similarity Check...") | |
| similarity_result_str = "" | |
| is_same_person = False | |
| try: | |
| img1_bgr = cv2.cvtColor(img1_numpy, cv2.COLOR_RGB2BGR) | |
| img2_bgr = cv2.cvtColor(img2_numpy, cv2.COLOR_RGB2BGR) | |
| all_faces1 = sim_app.get(img1_bgr) | |
| all_faces2 = sim_app.get(img2_bgr) | |
| face1 = get_largest_face(all_faces1) | |
| face2 = get_largest_face(all_faces2) | |
| face_count_msg = ( | |
| f"Input 1 ({norm1_type}): {len(all_faces1)} face(s) detected, largest used. " | |
| f"Input 2 ({norm2_type}): {len(all_faces2)} face(s) detected, largest used." | |
| ) | |
| if face1 is None or face2 is None: | |
| similarity_result_str = f"π Similarity Check Failed: No face detected in one or both inputs.\nDetail: {face_count_msg}" | |
| print(" [LOG] Similarity check failed: No face detected.") | |
| else: | |
| e1 = face1.embedding | |
| e2 = face2.embedding | |
| similarity = np.dot(e1, e2) / (np.linalg.norm(e1) * np.linalg.norm(e2)) | |
| is_same_person = similarity < SIMILARITY_THRESHOLD | |
| similarity_status = "β MATCH" if is_same_person else "β MISMATCH" | |
| similarity_result_str = ( | |
| f"**1. IDENTITY CHECK (InsightFace)**\n" | |
| f" Score: {similarity:.4f} (Threshold: {SIMILARITY_THRESHOLD})\n" | |
| f" Status: {similarity_status}\n" | |
| f" Detail: {face_count_msg}\n" | |
| ) | |
| print(f" [LOG] Similarity calculated: {similarity:.4f}. Status: {similarity_status}") | |
| except Exception as e: | |
| similarity_result_str = f"**1. IDENTITY CHECK FAILED:** Runtime Error ({e.__class__.__name__}).\n" | |
| print(f" [LOG] Similarity check failed with runtime error: {e.__class__.__name__}") | |
| # --- Step 2: Liveness Check --- | |
| print(" [LOG] Starting Liveness Check...") | |
| liveness_result_str = "" | |
| is_live = False | |
| try: | |
| live_predictions = live_pipe([img1_numpy, img2_numpy]) | |
| results = [] | |
| is_all_live = True | |
| for i, pred_list in enumerate(live_predictions): | |
| input_type = norm1_type if i == 0 else norm2_type | |
| top_pred = pred_list[0] | |
| label = top_pred['label'] | |
| score = top_pred['score'] | |
| status = "β LIVE" if label.lower() == 'live' else "β SPOOF" | |
| results.append(f" - Input {i+1} ({input_type}{' FRAME' if input_type == 'VIDEO' else ''}): **{status}** (Pred: {label}, Score: {score:.4f})") | |
| if label.lower() != 'live': | |
| is_all_live = False | |
| is_live = is_all_live | |
| liveness_status = "β SUCCESS: Both inputs are LIVE." if is_all_live else "β FAILURE: One or both inputs are SPOOF." | |
| results_str = "\n".join(results) | |
| liveness_result_str = ( | |
| f"**2. LIVENESS CHECK (MobileViTV2)**\n" | |
| f"{liveness_status}\n" | |
| f"{results_str}\n" | |
| ) | |
| print(f" [LOG] Liveness check complete. All Live: {is_live}") | |
| except Exception as e: | |
| liveness_result_str = f"**2. LIVENESS CHECK FAILED:** Runtime Error ({e.__class__.__name__}).\n" | |
| print(f" [LOG] Liveness check failed with runtime error: {e.__class__.__name__}") | |
| # --- Step 3: Final Combined Output --- | |
| final_status = "π UNCERTAIN" | |
| if is_same_person and is_live: | |
| final_status = "β IDENTITY & LIVENESS VERIFIED" | |
| final_color = "color: green;" | |
| elif not is_same_person: | |
| final_status = "β IDENTITY MISMATCH" | |
| final_color = "color: red;" | |
| elif not is_live: | |
| final_status = "β LIVENESS FAILURE" | |
| final_color = "color: red;" | |
| else: | |
| final_status = "β οΈ PARTIAL FAILURE" | |
| final_color = "color: orange;" | |
| final_output = ( | |
| f"--- Sequential Verification Result ---\n" | |
| f"<pre>{similarity_result_str}\n" | |
| f"{liveness_result_str}</pre>\n" | |
| f"<h2 style='{final_color}'>FINAL STATUS: {final_status}</h2>\n" | |
| ) | |
| print(f" [LOG] Final Status: {final_status}") | |
| print("--- REQUEST END ---\n") | |
| return final_output | |
| # ============================================================================== | |
| # 4. GRADIO INTERFACE | |
| # ============================================================================== | |
| iface = gr.Interface( | |
| fn=combined_face_check, | |
| inputs=[ | |
| gr.File(type="filepath", label="Input 1 (Image or Short Video)", file_types=["image", "video"]), | |
| gr.File(type="filepath", label="Input 2 (Image or Short Video)", file_types=["image", "video"]) | |
| ], | |
| outputs=gr.HTML(label="Verification Report"), | |
| title="Sequential Face Verification (Identity & Liveness)", | |
| description=( | |
| "Upload two inputs (Image or Short Video). For videos, the first frame is used. " | |
| "The system performs two checks sequentially, using the **largest detected face** from each input:\n" | |
| "1. **Identity (Similarity)**: Are Input 1 and 2 the same person?\n" | |
| "2. **Liveness**: Are both Input 1 and 2 'Live' (Anti-Spoofing Check)?\n" | |
| "Both checks must pass for final verification." | |
| ), | |
| allow_flagging='never') | |
| if __name__ == "__main__": | |
| print("\nπ Launching Gradio interface...") | |
| iface.launch() |