Spaces:
Sleeping
Sleeping
| # SecureFace ID – FIXED VERSION | |
| import os | |
| import cv2 | |
| import numpy as np | |
| import gradio as gr | |
| from ultralytics import YOLO | |
| from huggingface_hub import hf_hub_download | |
| import insightface | |
| from insightface.app import FaceAnalysis | |
| import faiss | |
| # ==================== PATHS ==================== | |
| KNOWN_EMBS_PATH = "known_embeddings.npy" | |
| KNOWN_NAMES_PATH = "known_names.npy" | |
| # ==================== MODELS ==================== | |
| # Load YOLO for fast detection | |
| print("Loading YOLOv8...") | |
| model_path = hf_hub_download(repo_id="arnabdhar/YOLOv8-Face-Detection", filename="model.pt") | |
| detector = YOLO(model_path) | |
| # Load InsightFace for embedding extraction | |
| print("Loading InsightFace...") | |
| recognizer = FaceAnalysis(name='buffalo_l', providers=['CPUExecutionProvider']) | |
| recognizer.prepare(ctx_id=0, det_size=(640,640)) | |
| # FAISS index setup | |
| index = faiss.IndexHNSWFlat(512, 32) | |
| index.hnsw.efSearch = 16 | |
| known_names = [] | |
| # Load database at startup | |
| if os.path.exists(KNOWN_EMBS_PATH) and os.path.exists(KNOWN_NAMES_PATH): | |
| try: | |
| embs = np.load(KNOWN_EMBS_PATH) | |
| known_names = np.load(KNOWN_NAMES_PATH, allow_pickle=True).tolist() | |
| if embs.shape[0] > 0: | |
| index.add(embs.astype('float32')) | |
| print(f"✅ Loaded {len(known_names)} identities from disk.") | |
| except Exception as e: | |
| print(f"⚠️ Database Error: {e}") | |
| # ==================== PROCESS FRAME ==================== | |
| def process_frame(frame, blur_type="gaussian", intensity=50, expand=1.4, show_labels=True): | |
| if frame is None: return None | |
| img = frame.copy() | |
| h, w = img.shape[:2] | |
| # 1. Detect Faces with YOLO | |
| results = detector(img, conf=0.4, verbose=False)[0] | |
| for box in results.boxes: | |
| x1, y1, x2, y2 = map(int, box.xyxy[0]) | |
| # Calculate expanded crop for recognition context | |
| ew = int((x2-x1)*(expand-1)/2) | |
| eh = int((y2-y1)*(expand-1)/2) | |
| cx1 = max(0, x1-ew); cy1 = max(0, y1-eh) | |
| cx2 = min(w, x2+ew); cy2 = min(h, y2+eh) | |
| # 2. Recognition Logic | |
| # We crop the face and convert to BGR (InsightFace expects BGR) | |
| crop = cv2.cvtColor(img[cy1:cy2, cx1:cx2], cv2.COLOR_RGB2BGR) | |
| # Run InsightFace on the crop | |
| faces = recognizer.get(crop) | |
| name = "Unknown" | |
| match_found = False | |
| if faces and index.ntotal > 0: | |
| # Take the largest face in the crop (usually the correct one) | |
| main_face = max(faces, key=lambda x: (x.bbox[2]-x.bbox[0]) * (x.bbox[3]-x.bbox[1])) | |
| emb = main_face.normed_embedding.reshape(1, -1).astype('float32') | |
| D, I = index.search(emb, k=1) | |
| # Threshold: Lower is better for L2 distance. | |
| # 0.8 is a safe balance; 0.6 is very strict. | |
| if D[0][0] < 0.8: | |
| name = known_names[I[0][0]] | |
| match_found = True | |
| # 3. Blur Logic (Privacy) | |
| if blur_type != "none": | |
| face_region = img[y1:y2, x1:x2] # Blur only the tight box, not expanded | |
| if blur_type == "gaussian": | |
| k = max(21, int(min(x2-x1, y2-y1) * intensity / 100) | 1) | |
| blurred = cv2.GaussianBlur(face_region, (k,k), 0) | |
| img[y1:y2, x1:x2] = blurred | |
| elif blur_type == "pixelate": | |
| # Map intensity 10-100 to pixel block size 20-3 | |
| block_size = max(3, int(20 * (1 - intensity/120))) | |
| small = cv2.resize(face_region, (max(1, (x2-x1)//block_size), max(1, (y2-y1)//block_size))) | |
| blurred = cv2.resize(small, (x2-x1, y2-y1), interpolation=cv2.INTER_NEAREST) | |
| img[y1:y2, x1:x2] = blurred | |
| elif blur_type == "solid": | |
| cv2.rectangle(img, (x1,y1), (x2,y2), (0,0,0), -1) | |
| # 4. Draw Labels (Identity) | |
| if show_labels: | |
| color = (0, 255, 0) if match_found else (0, 0, 255) # Green for known, Red for unknown | |
| # Draw box | |
| cv2.rectangle(img, (x1,y1), (x2,y2), color, 2) | |
| # Draw label background and text | |
| label_str = name | |
| (tw, th), _ = cv2.getTextSize(label_str, cv2.FONT_HERSHEY_DUPLEX, 0.8, 2) | |
| cv2.rectangle(img, (x1, y1-30), (x1+tw+10, y1), color, -1) | |
| cv2.putText(img, label_str, (x1+5, y1-8), cv2.FONT_HERSHEY_DUPLEX, 0.8, (255,255,255), 2) | |
| return img | |
| # ==================== ENROLL FUNCTION ==================== | |
| def enroll_person(name, face_image): | |
| global index, known_names | |
| # FIX 1: Proper None check for numpy array | |
| if face_image is None or not name or not name.strip(): | |
| return "⚠️ Error: Please provide both a name and a photo." | |
| # Convert RGB (Gradio) to BGR (OpenCV/InsightFace) | |
| bgr = cv2.cvtColor(face_image, cv2.COLOR_RGB2BGR) | |
| faces = recognizer.get(bgr) | |
| if not faces: | |
| return "⚠️ Error: No face detected. Please use a clear frontal photo." | |
| # Pick the largest face if multiple are found | |
| main_face = max(faces, key=lambda x: (x.bbox[2]-x.bbox[0]) * (x.bbox[3]-x.bbox[1])) | |
| new_emb = main_face.normed_embedding.reshape(1, 512) | |
| # Load existing data to ensure sync | |
| if os.path.exists(KNOWN_EMBS_PATH): | |
| embs = np.load(KNOWN_EMBS_PATH) | |
| # Ensure 2D array | |
| if len(embs.shape) == 1: embs = embs.reshape(1, -1) | |
| else: | |
| embs = np.empty((0,512)) | |
| # Append new data | |
| embs = np.vstack([embs, new_emb]) | |
| known_names.append(name) | |
| # Save to disk | |
| np.save(KNOWN_EMBS_PATH, embs) | |
| np.save(KNOWN_NAMES_PATH, np.array(known_names)) | |
| # Rebuild Index | |
| index.reset() | |
| index.add(embs.astype('float32')) | |
| return f"✅ Success: **{name}** has been enrolled!" | |
| # ==================== GRADIO UI ==================== | |
| with gr.Blocks(title="SecureFace ID", theme=gr.themes.Soft()) as demo: | |
| gr.Markdown("# 🛡️ SecureFace ID") | |
| with gr.Tab("📹 Live Surveillance"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| cam = gr.Image(sources=["webcam"], streaming=True, label="Live Feed", height=450) | |
| with gr.Column(): | |
| output = gr.Image(label="Protected Stream", height=450) | |
| with gr.Row(): | |
| blur_type = gr.Radio(["gaussian", "pixelate", "solid", "none"], value="pixelate", label="Privacy Filter") | |
| intensity = gr.Slider(1, 100, 80, label="Blur Intensity") | |
| expand = gr.Slider(1.0, 2.0, 1.3, label="Context Area") | |
| show_names = gr.Checkbox(True, label="Show IDs Overlay") | |
| # Connect the stream | |
| cam.stream(process_frame, [cam, blur_type, intensity, expand, show_names], output) | |
| with gr.Tab("👤 Enroll Person"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| name_input = gr.Textbox(label="Full Name / ID", placeholder="e.g. Agent Smith") | |
| photo_input = gr.Image(label="Reference Photo", sources=["upload", "webcam"], height=300) | |
| enroll_btn = gr.Button("Add to Database", variant="primary") | |
| with gr.Column(): | |
| enroll_status = gr.Markdown("### Status: Waiting...") | |
| db_view = gr.Markdown() | |
| # Database viewer updater | |
| def get_db_status(): | |
| if not known_names: return "Database is empty." | |
| return f"### 📂 Registered Users ({len(known_names)}):\n" + "\n".join([f"- {n}" for n in list(set(known_names))]) | |
| # Event wiring | |
| enroll_btn.click(enroll_person, inputs=[name_input, photo_input], outputs=enroll_status) | |
| enroll_btn.click(get_db_status, outputs=db_view) | |
| demo.load(get_db_status, outputs=db_view) | |
| if __name__ == "__main__": | |
| demo.launch() |