| import cv2 | |
| import numpy as np | |
| from numpy.linalg import norm | |
| face_cascade = cv2.CascadeClassifier( | |
| cv2.data.haarcascades + "haarcascade_frontalface_default.xml" | |
| ) | |
| def detect_and_extract_face(img): | |
| gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) | |
| faces = face_cascade.detectMultiScale(gray, 1.3, 5) | |
| if len(faces) == 0: | |
| raise Exception("No face detected") | |
| x, y, w, h = faces[0] | |
| face = gray[y:y+h, x:x+w] | |
| path = "data/intermediate/id_face.jpg" | |
| cv2.imwrite(path, face) | |
| return path | |
| def get_face_embeddings(image_path): | |
| img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE) | |
| img = cv2.resize(img, (100, 100)) | |
| return img.flatten().tolist() | |
| def face_comparison(img1_path, img2_path, threshold=0.6): | |
| emb1 = np.array(get_face_embeddings(img1_path)) | |
| emb2 = np.array(get_face_embeddings(img2_path)) | |
| similarity = np.dot(emb1, emb2) / (norm(emb1) * norm(emb2)) | |
| return similarity > threshold | |