facematch-pro / app.py
wanchoi's picture
Update app.py
f0f598b verified
import gradio as gr
import numpy as np
import cv2
import insightface
from insightface.app import FaceAnalysis
# Global variable to store the InsightFace app instance
app = FaceAnalysis(providers=['CPUExecutionProvider'])
app.prepare(ctx_id=0, det_size=(640, 640))
def liveness_check_passive(image):
"""
Checks if the face in the image is a live person.
Returns True for live, False for spoof.
"""
# Placeholder for a liveness detection model.
return True
def liveness_check_active(video_path):
"""
Performs an active liveness check on a video feed.
A real implementation would check for user actions like blinks or head turns.
"""
# Placeholder for active liveness detection logic.
return True
def run_face_verification(webcam_video, id_card_image):
"""
Compares a live webcam video with an ID card image for face verification.
"""
# Check for inputs
if webcam_video is None or id_card_image is None:
return "Please provide both a live video and an ID card image.", None, None
# Step 1: Liveness Detection and Image Capture
if not liveness_check_active(webcam_video):
return "Active liveness check failed. This appears to be a spoof attempt.", None, None
# Use the first frame from the video for face matching
cap = cv2.VideoCapture(webcam_video)
ret, frame = cap.read()
if not ret:
return "Could not capture a frame from the video.", None, None
webcam_image_np = frame
cap.release()
liveness_result = "Active liveness check: Passed βœ…"
# Step 2: Face Detection & Embedding for Selfie
faces_selfie = app.get(webcam_image_np)
if not faces_selfie:
return "No face detected in the video.", None, None
face_selfie = faces_selfie[0]
embedding_selfie = face_selfie.embedding
# Step 3: Convert PIL Images to NumPy arrays for ID
id_card_image_np = np.array(id_card_image)
# Step 4: Face Detection & Embedding for ID Card Photo
faces_id = app.get(id_card_image_np)
if not faces_id:
return "No face detected in the ID card image.", None, None
face_id = faces_id[0]
embedding_id = faces_id[0].embedding
# Step 5: Facial Matching
similarity_score = np.dot(embedding_selfie, embedding_id)
# Define a threshold for a positive match.
confidence_threshold = 100
# Step 6: Verification Logic
result_text = f"{liveness_result}\n"
result_text += f"Similarity Score: {similarity_score:.4f}\n"
if similarity_score >= confidence_threshold:
result_text += "Verdict: Faces match! βœ…"
match_found = True
else:
result_text += "Verdict: Faces do NOT match! ❌"
match_found = False
result_text += "\n\nNote: ID text verification is not yet implemented in this demo."
return result_text, similarity_score, match_found
# Define the Gradio interface
interface = gr.Interface(
fn=run_face_verification,
inputs=[
gr.Video(label="Live Selfie (Webcam)", sources="webcam"),
gr.Image(label="ID Card Photo", type="pil")
],
outputs=[
gr.Textbox(label="Verification Result"),
gr.Number(label="Confidence Score"),
gr.Checkbox(label="Match Found")
],
title="Open-Source Face and ID Verification",
description="Use your webcam for a live check to match your face with an ID photo."
)
if __name__ == "__main__":
interface.launch()