import numpy as np import gradio as gr import cv2 import warnings import traceback import os from insightface.app import FaceAnalysis from transformers import pipeline as hf_pipeline # Suppress minor warnings for a cleaner console output warnings.filterwarnings("ignore", category=UserWarning) warnings.filterwarnings(action='ignore', category=FutureWarning) # ============================================================================== # 1. CONFIGURATION AND GLOBAL INITIALIZATION # ============================================================================== SIM_MODEL_NAME = 'buffalo_s' SIMILARITY_THRESHOLD = 0.5 CTX_ID = -1 LIVE_MODEL_ID = "nguyenkhoa/mobilevitv2_Liveness_detection_v1.0" sim_app = None live_pipe = None global_init_error = "" try: print(f"1. Initializing InsightFace model: {SIM_MODEL_NAME}...") sim_app = FaceAnalysis(name=SIM_MODEL_NAME) sim_app.prepare(ctx_id=CTX_ID, det_size=(640, 640)) print("ā InsightFace model prepared.") except Exception as e: error_message = f"{e.__class__.__name__}: {e}" global_init_error += f"\nā SIMILARITY ERROR: InsightFace failed to load. \n Detail: {error_message}" sim_app = None try: print(f"2. Loading Hugging Face pipeline: {LIVE_MODEL_ID}...") live_pipe = hf_pipeline("image-classification", model=LIVE_MODEL_ID) print("ā Liveness pipeline loaded.") except Exception as e: full_trace = traceback.format_exc() critical_error = full_trace.strip().split('\n')[-1] global_init_error += ( f"\nā LIVENESS ERROR: HF pipeline failed to load.\n" f" CRITICAL DETAIL: {critical_error}\n" f" Suggestion: Dependency conflict. Try running: !pip install --upgrade transformers accelerate huggingface-hub" ) live_pipe = None # ============================================================================== # 2. HELPER FUNCTIONS # ============================================================================== def get_largest_face(faces): if not faces: return None def get_area(face): bbox = face.bbox.astype(np.int32) return (bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) return max(faces, key=get_area) def extract_frame_from_video(video_path): print(f" [LOG] Extracting frame from video: {video_path}") if not os.path.exists(video_path): return None, "Error: Video file not found." cap = cv2.VideoCapture(video_path) if not cap.isOpened(): return None, "Error: Could not open video file." ret, frame = cap.read() cap.release() if ret: frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) return frame_rgb, "Frame extracted successfully." else: return None, "Error: Could not read a frame from the video." # ============================================================================== # 3. CORE SEQUENTIAL FUNCTION # ============================================================================== def combined_face_check(input1, input2): print("\n--- NEW REQUEST ---") if global_init_error: print(" [LOG] Returning Fatal Initialization Error.") return f"
{global_init_error}"
# --- Pre-Step: Normalize Inputs (Image/Video) ---
img1_numpy = None
img2_numpy = None
def normalize_input(input_data, name):
print(f" [LOG] Normalizing Input {name}...")
if isinstance(input_data, str) and os.path.isfile(input_data):
if input_data.lower().endswith(('.mp4', '.avi', '.mov', '.mkv')):
frame, error_msg = extract_frame_from_video(input_data)
return frame, error_msg, 'VIDEO'
else:
img_bgr = cv2.imread(input_data)
if img_bgr is not None:
return cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB), "Input is an image file.", 'IMAGE'
return None, "Error: Could not read image file.", 'UNKNOWN'
elif isinstance(input_data, np.ndarray):
return input_data, "Input is a direct numpy image.", 'IMAGE'
else:
return None, "Invalid input type or missing file path.", 'UNKNOWN'
img1_numpy, norm1_msg, norm1_type = normalize_input(input1, '1')
img2_numpy, norm2_msg, norm2_type = normalize_input(input2, '2')
if img1_numpy is None or img2_numpy is None:
print(" [LOG] Input normalization failed.")
return (f"{similarity_result_str}\n"
f"{liveness_result_str}\n"
f"