from fastapi import APIRouter, File, UploadFile import numpy as np import cv2 import faiss import pickle import os import torch import insightface from insightface.utils import face_align import sys # ✅ Dockerfile에서 클론한 AdaFace 경로 추가 sys.path.append('/app/AdaFace') import net router = APIRouter() # --- 설정 및 경로 --- faiss_index_name = "face_faiss_index_v2.index" faiss_label_name = "face_faiss_labels_v2.pkl" load_path = os.path.abspath("embedding/person") # 실제 FAISS 파일 위치로 변경 필요 threshold = 45.0 # Unknown 판별 임계값 # ✅ Hugging Face 무료 CPU 강제 설정 device = torch.device('cpu') # --- 1. InsightFace (탐지기) 로드 --- detector = insightface.app.FaceAnalysis(name='buffalo_l', providers=['CPUExecutionProvider'], allowed_modules=['detection']) detector.prepare(ctx_id=0, det_size=(640, 640)) # --- 2. AdaFace (인식기) 로드 --- model_path = "/app/adaface_ir101_webface12m.ckpt" adaface_model = net.build_model('ir_101') statedict = torch.load(model_path, map_location=device)["state_dict"] model_statedict = {key[6:]: val for key, val in statedict.items() if key.startswith("model.")} adaface_model.load_state_dict(model_statedict) adaface_model.to(device) adaface_model.eval() # --- 3. FAISS 로드 --- index = faiss.read_index(os.path.join(load_path, faiss_index_name)) with open(os.path.join(load_path, faiss_label_name), "rb") as f: labels = pickle.load(f) # ✅ AdaFace 임베딩 추출 함수 def extract_adaface_embedding(img_bgr, face_kps): aligned_face = face_align.norm_crop(img_bgr, landmark=face_kps, image_size=112) img_norm = (aligned_face / 255.0 - 0.5) / 0.5 img_tensor = torch.tensor(img_norm.transpose(2, 0, 1)).float().unsqueeze(0).to(device) with torch.no_grad(): embedding, _ = adaface_model(img_tensor) return embedding.cpu().numpy()[0] @router.post("/predict") async def predict(file: UploadFile = File(...)): contents = await file.read() nparr = np.frombuffer(contents, np.uint8) img = cv2.imdecode(nparr, cv2.IMREAD_COLOR) if img is None: return {"success": False, "message": "❌ 이미지를 읽을 수 없습니다."} faces = detector.get(img) if not faces: return {"success": False, "message": "❌ 얼굴을 찾을 수 없습니다."} results = [] for face in faces: # AdaFace 임베딩 추출 및 정규화 emb = extract_adaface_embedding(img, face.kps) emb = np.array([emb], dtype='float32') emb /= np.linalg.norm(emb, axis=1, keepdims=True) # FAISS 검색 distances, indices = index.search(emb, k=1) best_match_idx = indices[0][0] similarity_score = distances[0][0] # 일치율 계산 및 Threshold 적용 score_percent = max(0, similarity_score) * 100 if score_percent >= threshold: predicted_name = labels[best_match_idx] else: predicted_name = "Unknown" box = face.bbox.astype(int).tolist() results.append({ "label": predicted_name, "score": float(score_percent), "bbox": box # 프론트엔드에서 박스를 그릴 수 있도록 좌표 반환 }) return {"success": True, "results": results, "message": f"✅ 총 {len(faces)}명의 얼굴을 처리했습니다."}