Care_Now_task4 / face_processor.py
TahaFawzyElshrif
uploaded files
efa9bcb
import cv2
import numpy as np
from mtcnn import MTCNN
from keras_facenet import FaceNet
class FaceVerification:
"""
A class for performing face verification using MTCNN for detection
and FaceNet for embedding extraction.
Attributes
----------
detector : MTCNN
Face detector model.
embedder : FaceNet
Face embedding model.
Methods
-------
extract_face(img, target_size=(160, 160)):
Detects and crops the face from the input image.
get_embedding(face):
Extracts L2-normalized FaceNet embedding from the given face image.
cosine_similarity(a, b):
Computes cosine similarity between two embedding vectors.
compare_faces(img1, img2, threshold=0.5):
Compares two face images and determines if they belong to the same person.
"""
def __init__(self):
"""Initialize the MTCNN detector and FaceNet embedder."""
self.detector = MTCNN()
self.embedder = FaceNet()
print("βœ… MTCNN and FaceNet initialized successfully.")
def extract_face(self, img, target_size=(160, 160)):
"""
Detects and crops a face from an image.
Parameters
----------
img : np.ndarray
Input image in BGR format.
target_size : tuple, optional
Desired size for the cropped face (default is (160, 160)).
Returns
-------
np.ndarray
Cropped and resized face image in RGB format.
Raises
------
ValueError
If no face is detected or if the image is invalid.
"""
if img is None:
raise ValueError("Image not found or invalid input.")
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
detections = self.detector.detect_faces(img_rgb)
if not detections:
raise ValueError("❌ No face detected.")
# Use first detected face
x, y, w, h = detections[0]['box']
x, y = abs(x), abs(y)
face = img_rgb[y:y + h, x:x + w]
# Resize to FaceNet input size
face = cv2.resize(face, target_size)
return face
def get_embedding(self, face):
"""
Extracts L2-normalized FaceNet embedding.
Parameters
----------
face : np.ndarray
Cropped face image (RGB format, 160x160).
Returns
-------
np.ndarray
L2-normalized face embedding vector.
"""
emb = self.embedder.embeddings([face])
emb = emb / np.linalg.norm(emb, axis=1, keepdims=True)
return emb[0]
def cosine_similarity(self, a, b):
"""
Computes cosine similarity between two embeddings.
Parameters
----------
a, b : np.ndarray
Face embeddings.
Returns
-------
float
Cosine similarity score.
"""
return np.dot(a, b)
def compare_faces(self, img1, img2, threshold=0.5):
"""
Compares two face images and prints similarity result.
Parameters
----------
img1, img2 : np.ndarray
Two input images (BGR format).
threshold : float, optional
Similarity threshold (default is 0.5).
Returns
-------
json
of keys STATUS, ERROR (None if no error)
"""
try:
face1 = self.extract_face(img1)
face2 = self.extract_face(img2)
except Exception as e:
return {"STATUS":"Failed, try again","ERROR":str(e)}
emb1 = self.get_embedding(face1)
emb2 = self.get_embedding(face2)
similarity = self.cosine_similarity(emb1, emb2)
accuracy = ((similarity + 1) / 2) * 100
print(f"Cosine similarity: {similarity:.4f}")
if similarity > threshold:
print("βœ… Same person (accept)")
return {"STATUS":f"βœ… Same person (accept), accuracy {accuracy:.2f}%", "ERROR":"None"}
else:
return {"STATUS":f"❌ Different persons (reject), accuracy {accuracy:.2f}%", "ERROR":"None"}