Spaces:
No application file
No application file
| import gradio as gr | |
| import cv2 | |
| import face_recognition | |
| import numpy as np | |
| import pickle | |
| # 1. Load the trained model | |
| # Ensure 'face_model.pkl' is in the same folder! | |
| try: | |
| with open("03.face_model.pkl", "rb") as f: | |
| data = pickle.load(f) | |
| known_encodings = data["encodings"] | |
| known_names = data["names"] | |
| print(f"✅ Model loaded. Found {len(known_names)} known faces.") | |
| except FileNotFoundError: | |
| print("âš Error: 'face_model.pkl' not found. App will crash when analyzing.") | |
| known_encodings = [] | |
| known_names = [] | |
| def recognize_faces(image): | |
| """ | |
| Function to process the image: detect faces, compare, and draw results. | |
| """ | |
| if image is None: | |
| return None | |
| # Gradio passes the image as an RGB NumPy array. | |
| # We copy it to ensure we can modify it. | |
| img_output = image.copy() | |
| # 1. Detect Face Locations (using RGB) | |
| locations = face_recognition.face_locations(img_output) | |
| encodings = face_recognition.face_encodings(img_output, locations) | |
| # 2. Compare & Draw | |
| for (top, right, bottom, left), encoding in zip(locations, encodings): | |
| matches = face_recognition.compare_faces(known_encodings, encoding) | |
| name = "Unknown" | |
| if True in matches: | |
| first_match_index = matches.index(True) | |
| name = known_names[first_match_index] | |
| # Draw Rectangle (Green) | |
| # Note: In RGB, (0, 255, 0) is Green. | |
| cv2.rectangle(img_output, (left, top), (right, bottom), (0, 255, 0), 2) | |
| # Draw Name | |
| cv2.putText(img_output, name, (left, top - 10), | |
| cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) | |
| return img_output | |
| # 2. Define the Gradio Interface | |
| with gr.Blocks(title="Face Recognition System") as demo: | |
| gr.Markdown("# ✨ Face Recognition System") | |
| with gr.Tab("🖼 Upload Image"): | |
| gr.Markdown("Upload an image file from your computer.") | |
| upload_input = gr.Image(sources=["upload"], type="numpy", label="Upload Image") | |
| upload_output = gr.Image(label="Processed Image") | |
| upload_btn = gr.Button("Analyze Upload") | |
| upload_btn.click(fn=recognize_faces, inputs=upload_input, outputs=upload_output) | |
| with gr.Tab("🎥 Webcam"): | |
| gr.Markdown("Click on the camera icon to take a snapshot.") | |
| # 'sources=["webcam"]' creates the camera UI | |
| cam_input = gr.Image(sources=["webcam"], type="numpy", label="Webcam Snapshot") | |
| cam_output = gr.Image(label="Processed Snapshot") | |
| cam_btn = gr.Button("Analyze Snapshot") | |
| cam_btn.click(fn=recognize_faces, inputs=cam_input, outputs=cam_output) | |
| # 3. Launch the App | |
| if __name__ == "__main__": | |
| demo.launch() |