Spaces:
Sleeping
Sleeping
| import base64 | |
| import cv2 | |
| import numpy as np | |
| import requests | |
| from fastapi import FastAPI | |
| from pydantic import BaseModel | |
| import uvicorn | |
| import insightface | |
| import gradio as gr | |
| import threading | |
| # ---------- Load Face Detector + Recognition Model ---------- | |
| model = insightface.app.FaceAnalysis(name="buffalo_l") | |
| model.prepare(ctx_id=0, det_size=(640, 640)) | |
| # ---------- FastAPI App ---------- | |
| app = FastAPI() | |
| # ---------- API Request Schema ---------- | |
| class CompareRequest(BaseModel): | |
| image1: str | None = None # base64 | |
| image2: str | None = None # base64 | |
| image1_url: str | None = None # URL | |
| image2_url: str | None = None # URL | |
| # ---------- Helper: Convert base64 to CV2 image ---------- | |
| def b64_to_img(b64_string): | |
| try: | |
| img_data = base64.b64decode(b64_string) | |
| np_arr = np.frombuffer(img_data, np.uint8) | |
| img = cv2.imdecode(np_arr, cv2.IMREAD_COLOR) | |
| return img | |
| except: | |
| return None | |
| # ---------- Helper: Convert URL to CV2 image ---------- | |
| def url_to_img(url): | |
| try: | |
| resp = requests.get(url, timeout=5) | |
| np_arr = np.frombuffer(resp.content, np.uint8) | |
| img = cv2.imdecode(np_arr, cv2.IMREAD_COLOR) | |
| return img | |
| except: | |
| return None | |
| # ---------- Helper: Extract face embedding ---------- | |
| def get_embedding(img): | |
| faces = model.get(img) | |
| if len(faces) == 0: | |
| return None | |
| return faces[0].embedding # first detected face | |
| # ---------- POST /compare API ---------- | |
| async def compare_faces(req: CompareRequest): | |
| # ---- Load Image 1 ---- | |
| if req.image1: | |
| img1 = b64_to_img(req.image1) | |
| elif req.image1_url: | |
| img1 = url_to_img(req.image1_url) | |
| else: | |
| img1 = None | |
| # ---- Load Image 2 ---- | |
| if req.image2: | |
| img2 = b64_to_img(req.image2) | |
| elif req.image2_url: | |
| img2 = url_to_img(req.image2_url) | |
| else: | |
| img2 = None | |
| if img1 is None or img2 is None: | |
| return {"error": "Invalid image data or URL."} | |
| emb1 = get_embedding(img1) | |
| emb2 = get_embedding(img2) | |
| if emb1 is None or emb2 is None: | |
| return {"error": "No face detected in one or both images."} | |
| # Cosine similarity | |
| similarity = np.dot(emb1, emb2) / (np.linalg.norm(emb1) * np.linalg.norm(emb2)) | |
| matched = similarity > 0.55 # threshold | |
| return { | |
| "similarity": float(similarity), | |
| "match": matched | |
| } | |
| # ---------- Gradio UI ---------- | |
| def gradio_ui(img1_text, img2_text): | |
| def load_any(input_str): | |
| if input_str.startswith("http://") or input_str.startswith("https://"): | |
| return url_to_img(input_str) | |
| else: | |
| return b64_to_img(input_str) | |
| img1 = load_any(img1_text) | |
| img2 = load_any(img2_text) | |
| if img1 is None or img2 is None: | |
| return "Invalid image data or URL." | |
| emb1 = get_embedding(img1) | |
| emb2 = get_embedding(img2) | |
| if emb1 is None or emb2 is None: | |
| return "Face not detected." | |
| similarity = np.dot(emb1, emb2) / (np.linalg.norm(emb1) * np.linalg.norm(emb2)) | |
| matched = similarity > 0.55 | |
| return f"Similarity: {similarity:.3f} | Match: {matched}" | |
| # ---------- Run Gradio in Background Thread ---------- | |
| def launch_gradio(): | |
| gr.Interface( | |
| fn=gradio_ui, | |
| inputs=[ | |
| gr.Textbox(label="Image 1 (base64 or URL)"), | |
| gr.Textbox(label="Image 2 (base64 or URL)") | |
| ], | |
| outputs="text", | |
| title="Face Match API (Text Input)" | |
| ).launch(server_name="0.0.0.0", server_port=7860) | |
| # ---------- MAIN ---------- | |
| if __name__ == "__main__": | |
| # Start Gradio in a separate thread | |
| threading.Thread(target=launch_gradio, daemon=True).start() | |
| # Start FastAPI server | |
| uvicorn.run(app, host="0.0.0.0", port=8000) |