Spaces:
Sleeping
Sleeping
File size: 4,138 Bytes
efa9bcb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 |
import cv2
import numpy as np
from mtcnn import MTCNN
from keras_facenet import FaceNet
class FaceVerification:
"""
A class for performing face verification using MTCNN for detection
and FaceNet for embedding extraction.
Attributes
----------
detector : MTCNN
Face detector model.
embedder : FaceNet
Face embedding model.
Methods
-------
extract_face(img, target_size=(160, 160)):
Detects and crops the face from the input image.
get_embedding(face):
Extracts L2-normalized FaceNet embedding from the given face image.
cosine_similarity(a, b):
Computes cosine similarity between two embedding vectors.
compare_faces(img1, img2, threshold=0.5):
Compares two face images and determines if they belong to the same person.
"""
def __init__(self):
"""Initialize the MTCNN detector and FaceNet embedder."""
self.detector = MTCNN()
self.embedder = FaceNet()
print("β
MTCNN and FaceNet initialized successfully.")
def extract_face(self, img, target_size=(160, 160)):
"""
Detects and crops a face from an image.
Parameters
----------
img : np.ndarray
Input image in BGR format.
target_size : tuple, optional
Desired size for the cropped face (default is (160, 160)).
Returns
-------
np.ndarray
Cropped and resized face image in RGB format.
Raises
------
ValueError
If no face is detected or if the image is invalid.
"""
if img is None:
raise ValueError("Image not found or invalid input.")
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
detections = self.detector.detect_faces(img_rgb)
if not detections:
raise ValueError("β No face detected.")
# Use first detected face
x, y, w, h = detections[0]['box']
x, y = abs(x), abs(y)
face = img_rgb[y:y + h, x:x + w]
# Resize to FaceNet input size
face = cv2.resize(face, target_size)
return face
def get_embedding(self, face):
"""
Extracts L2-normalized FaceNet embedding.
Parameters
----------
face : np.ndarray
Cropped face image (RGB format, 160x160).
Returns
-------
np.ndarray
L2-normalized face embedding vector.
"""
emb = self.embedder.embeddings([face])
emb = emb / np.linalg.norm(emb, axis=1, keepdims=True)
return emb[0]
def cosine_similarity(self, a, b):
"""
Computes cosine similarity between two embeddings.
Parameters
----------
a, b : np.ndarray
Face embeddings.
Returns
-------
float
Cosine similarity score.
"""
return np.dot(a, b)
def compare_faces(self, img1, img2, threshold=0.5):
"""
Compares two face images and prints similarity result.
Parameters
----------
img1, img2 : np.ndarray
Two input images (BGR format).
threshold : float, optional
Similarity threshold (default is 0.5).
Returns
-------
json
of keys STATUS, ERROR (None if no error)
"""
try:
face1 = self.extract_face(img1)
face2 = self.extract_face(img2)
except Exception as e:
return {"STATUS":"Failed, try again","ERROR":str(e)}
emb1 = self.get_embedding(face1)
emb2 = self.get_embedding(face2)
similarity = self.cosine_similarity(emb1, emb2)
accuracy = ((similarity + 1) / 2) * 100
print(f"Cosine similarity: {similarity:.4f}")
if similarity > threshold:
print("β
Same person (accept)")
return {"STATUS":f"β
Same person (accept), accuracy {accuracy:.2f}%", "ERROR":"None"}
else:
return {"STATUS":f"β Different persons (reject), accuracy {accuracy:.2f}%", "ERROR":"None"}
|