mostaFAaa2 commited on
Commit
0846293
·
1 Parent(s): 7e8f663

initial deploy

Browse files
Files changed (3) hide show
  1. Dockerfile +51 -0
  2. app.py +628 -0
  3. requirements.txt +9 -0
Dockerfile ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10-slim
2
+
3
+ # Install system dependencies
4
+ RUN apt-get update && apt-get install -y \
5
+ libgl1 \
6
+ libglib2.0-0 \
7
+ libsm6 \
8
+ libxext6 \
9
+ libxrender-dev \
10
+ libgomp1 \
11
+ wget \
12
+ gcc \
13
+ g++ \
14
+ cmake \
15
+ build-essential \
16
+ python3-dev \
17
+ libssl-dev \
18
+ && apt-get clean \
19
+ && rm -rf /var/lib/apt/lists/*
20
+
21
+ WORKDIR /app
22
+
23
+ # Upgrade pip
24
+ RUN pip install --no-cache-dir --upgrade pip --root-user-action=ignore
25
+
26
+ # Install dependencies
27
+ RUN pip install --no-cache-dir --root-user-action=ignore "numpy==1.26.4"
28
+ RUN pip install --no-cache-dir --root-user-action=ignore "onnxruntime>=1.8.0"
29
+ RUN pip install --no-cache-dir --root-user-action=ignore "opencv-python-headless>=4.5.0"
30
+ RUN pip install --no-cache-dir --root-user-action=ignore "insightface>=0.7.0"
31
+ RUN pip install --no-cache-dir --root-user-action=ignore "huggingface-hub>=0.12.0"
32
+
33
+ COPY requirements.txt .
34
+ RUN pip install --no-cache-dir --root-user-action=ignore -r requirements.txt
35
+
36
+ # Download the AuraFace model at BUILD time so startup is instant
37
+ RUN python -c "\
38
+ from huggingface_hub import snapshot_download; \
39
+ snapshot_download('fal/AuraFace-v1', local_dir='models/auraface/models/auraface')"
40
+
41
+ # Copy app source
42
+ COPY app.py .
43
+
44
+ # Create uploads folder
45
+ RUN mkdir -p uploads
46
+
47
+ # HuggingFace Spaces uses port 7860
48
+ ENV PORT=7860
49
+ EXPOSE 7860
50
+
51
+ CMD ["python", "app.py", "--host", "0.0.0.0", "--port", "7860"]
app.py ADDED
@@ -0,0 +1,628 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, jsonify
2
+ import os
3
+ import cv2
4
+ import numpy as np
5
+ import pymongo
6
+ from bson.binary import Binary
7
+ import pickle
8
+ import time
9
+ import uuid
10
+ import logging
11
+ from huggingface_hub import snapshot_download
12
+ from insightface.app import FaceAnalysis
13
+ from werkzeug.utils import secure_filename
14
+
15
+
16
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
17
+ logger = logging.getLogger('FaceRecognitionAPI')
18
+
19
+ class FaceRecognitionAPI:
20
+ def __init__(self, mongodb_uri, db_name, collection_name):
21
+ self.mongodb_uri = mongodb_uri
22
+ self.db_name = db_name
23
+ self.collection_name = collection_name
24
+
25
+ self.client = pymongo.MongoClient(mongodb_uri)
26
+ self.db = self.client[db_name]
27
+ self.collection = self.db[collection_name]
28
+
29
+ self.initialize_model()
30
+
31
+ self.upload_folder = 'uploads'
32
+ os.makedirs(self.upload_folder, exist_ok=True)
33
+
34
+ def initialize_model(self):
35
+ logger.info("Downloading and initializing AuraFace model...")
36
+ try:
37
+ import os
38
+ model_path = "models/auraface/models/auraface"
39
+ logger.info(f"Model path exists: {os.path.exists(model_path)}")
40
+ if os.path.exists(model_path):
41
+ logger.info(f"Files in model path: {os.listdir(model_path)}")
42
+
43
+ snapshot_download(
44
+ "fal/AuraFace-v1",
45
+ local_dir=model_path,
46
+ )
47
+
48
+ logger.info("Starting FaceAnalysis init...")
49
+ self.face_app = FaceAnalysis(
50
+ name="auraface",
51
+ providers=["CPUExecutionProvider"],
52
+ root="models/auraface",
53
+ )
54
+ logger.info("FaceAnalysis created, calling prepare...")
55
+ self.face_app.prepare(ctx_id=0, det_size=(320, 320))
56
+ logger.info("Model initialized successfully")
57
+ except Exception as e:
58
+ import traceback
59
+ logger.error(f"Error initializing model: {e}")
60
+ logger.error(traceback.format_exc())
61
+ raise
62
+
63
+ def process_image(self, image_path):
64
+ """Process an image and detect faces"""
65
+ try:
66
+
67
+ image = cv2.imread(image_path)
68
+ if image is None:
69
+ return None, "Failed to read image"
70
+
71
+
72
+ faces = self.face_app.get(image)
73
+
74
+ if not faces:
75
+ return None, "No face detected in image"
76
+
77
+ if len(faces) > 1:
78
+ return None, "Multiple faces detected, please provide an image with a single face"
79
+
80
+
81
+ return faces[0], "Success"
82
+
83
+ except Exception as e:
84
+ logger.error(f"Error processing image: {e}")
85
+ return None, f"Error processing image: {str(e)}"
86
+
87
+ def detect_face_covering(self, face, image):
88
+ """Detect if a face is covered with mask, sunglasses, etc."""
89
+ try:
90
+ # Get face bounding box
91
+ bbox = face.bbox.astype(np.int32)
92
+ x1, y1, x2, y2 = bbox
93
+
94
+ # Extract face region
95
+ face_region = image[y1:y2, x1:x2]
96
+
97
+ # Get facial landmarks
98
+ if not hasattr(face, 'kps') or face.kps.shape[0] < 5:
99
+ return True, "Cannot detect facial landmarks clearly"
100
+
101
+ landmarks = face.kps
102
+ left_eye = landmarks[0]
103
+ right_eye = landmarks[1]
104
+ nose = landmarks[2]
105
+ left_mouth = landmarks[3]
106
+ right_mouth = landmarks[4]
107
+
108
+ # Calculate regions of interest
109
+ eye_region_height = int((y2 - y1) * 0.2)
110
+ mouth_region_height = int((y2 - y1) * 0.25)
111
+ nose_region_height = int((y2 - y1) * 0.15)
112
+
113
+ # Eye region detection
114
+ eye_y_center = (left_eye[1] + right_eye[1]) / 2
115
+ eye_region_y1 = max(0, int(eye_y_center - eye_region_height/2))
116
+ eye_region_y2 = min(y2-y1, int(eye_y_center + eye_region_height/2))
117
+ eye_region = face_region[eye_region_y1:eye_region_y2, :]
118
+
119
+ # Nose region detection
120
+ nose_y = nose[1] - y1
121
+ nose_region_y1 = max(0, int(nose_y - nose_region_height/2))
122
+ nose_region_y2 = min(y2-y1, int(nose_y + nose_region_height/2))
123
+ nose_region = face_region[nose_region_y1:nose_region_y2, :]
124
+
125
+ # Mouth region detection
126
+ mouth_y_center = ((left_mouth[1] + right_mouth[1]) / 2) - y1
127
+ mouth_region_y1 = max(0, int(mouth_y_center - mouth_region_height/2))
128
+ mouth_region_y2 = min(y2-y1, int(mouth_y_center + mouth_region_height/2))
129
+ mouth_region = face_region[mouth_region_y1:mouth_region_y2, :]
130
+
131
+ # Convert regions to grayscale for analysis
132
+ if len(face_region.shape) == 3:
133
+ gray_eye_region = cv2.cvtColor(eye_region, cv2.COLOR_BGR2GRAY)
134
+ gray_nose_region = cv2.cvtColor(nose_region, cv2.COLOR_BGR2GRAY)
135
+ gray_mouth_region = cv2.cvtColor(mouth_region, cv2.COLOR_BGR2GRAY)
136
+ else:
137
+ gray_eye_region = eye_region
138
+ gray_nose_region = nose_region
139
+ gray_mouth_region = mouth_region
140
+
141
+ # Calculate edge density for each region
142
+ eye_edges = cv2.Canny(gray_eye_region, 50, 150)
143
+ nose_edges = cv2.Canny(gray_nose_region, 50, 150)
144
+ mouth_edges = cv2.Canny(gray_mouth_region, 50, 150)
145
+
146
+ eye_edge_density = np.sum(eye_edges > 0) / eye_edges.size if eye_edges.size > 0 else 0
147
+ nose_edge_density = np.sum(nose_edges > 0) / nose_edges.size if nose_edges.size > 0 else 0
148
+ mouth_edge_density = np.sum(mouth_edges > 0) / mouth_edges.size if mouth_edges.size > 0 else 0
149
+
150
+ # Calculate texture variance for each region
151
+ eye_variance = np.var(gray_eye_region) if gray_eye_region.size > 0 else 0
152
+ nose_variance = np.var(gray_nose_region) if gray_nose_region.size > 0 else 0
153
+ mouth_variance = np.var(gray_mouth_region) if gray_mouth_region.size > 0 else 0
154
+
155
+ # Calculate skin tone ratio for each region
156
+ if len(face_region.shape) == 3:
157
+ hsv_eye_region = cv2.cvtColor(eye_region, cv2.COLOR_BGR2HSV)
158
+ hsv_nose_region = cv2.cvtColor(nose_region, cv2.COLOR_BGR2HSV)
159
+ hsv_mouth_region = cv2.cvtColor(mouth_region, cv2.COLOR_BGR2HSV)
160
+
161
+ # Extended skin tone range
162
+ lower_skin = np.array([0, 15, 60], dtype=np.uint8)
163
+ upper_skin = np.array([25, 255, 255], dtype=np.uint8)
164
+
165
+ eye_skin_mask = cv2.inRange(hsv_eye_region, lower_skin, upper_skin)
166
+ nose_skin_mask = cv2.inRange(hsv_nose_region, lower_skin, upper_skin)
167
+ mouth_skin_mask = cv2.inRange(hsv_mouth_region, lower_skin, upper_skin)
168
+
169
+ eye_skin_ratio = np.sum(eye_skin_mask > 0) / eye_skin_mask.size if eye_skin_mask.size > 0 else 0
170
+ nose_skin_ratio = np.sum(nose_skin_mask > 0) / nose_skin_mask.size if nose_skin_mask.size > 0 else 0
171
+ mouth_skin_ratio = np.sum(mouth_skin_mask > 0) / mouth_skin_mask.size if mouth_skin_mask.size > 0 else 0
172
+ else:
173
+ eye_skin_ratio = 0
174
+ nose_skin_ratio = 0
175
+ mouth_skin_ratio = 0
176
+
177
+ # Check for covered eyes (sunglasses detection)
178
+ if eye_edge_density < 0.03 and eye_variance < 100 and eye_skin_ratio < 0.3:
179
+ return True, "Eyes appear to be covered, possibly wearing sunglasses"
180
+
181
+ # Check for covered mouth and nose (mask detection)
182
+ if mouth_edge_density < 0.04 and mouth_variance < 100 and mouth_skin_ratio < 0.3:
183
+ return True, "Mouth appears to be covered, possibly wearing a mask"
184
+
185
+ if nose_edge_density < 0.04 and nose_variance < 100 and nose_skin_ratio < 0.3:
186
+ return True, "Nose appears to be covered, possibly wearing a mask"
187
+
188
+ # Additional check for unnatural color patterns that might indicate face covering
189
+ if len(face_region.shape) == 3:
190
+ # Calculate color histograms
191
+ color_regions = [eye_region, nose_region, mouth_region]
192
+ color_histograms = []
193
+
194
+ for region in color_regions:
195
+ if region.size == 0:
196
+ continue
197
+ hist_b = cv2.calcHist([region], [0], None, [32], [0, 256])
198
+ hist_g = cv2.calcHist([region], [1], None, [32], [0, 256])
199
+ hist_r = cv2.calcHist([region], [2], None, [32], [0, 256])
200
+
201
+ # Normalize histograms
202
+ if np.sum(hist_b) > 0:
203
+ hist_b = hist_b / np.sum(hist_b)
204
+ if np.sum(hist_g) > 0:
205
+ hist_g = hist_g / np.sum(hist_g)
206
+ if np.sum(hist_r) > 0:
207
+ hist_r = hist_r / np.sum(hist_r)
208
+
209
+ color_histograms.append((hist_b, hist_g, hist_r))
210
+
211
+ # Check for unusual color distributions
212
+ for hist_b, hist_g, hist_r in color_histograms:
213
+ # Look for sharp peaks in color distribution that might indicate synthetic materials
214
+ if np.max(hist_b) > 0.3 or np.max(hist_g) > 0.3 or np.max(hist_r) > 0.3:
215
+ # Check if the peak is isolated (characteristic of uniform colored masks)
216
+ sorted_b = np.sort(hist_b.flatten())
217
+ sorted_g = np.sort(hist_g.flatten())
218
+ sorted_r = np.sort(hist_r.flatten())
219
+
220
+ if (sorted_b[-1] > 2.5 * sorted_b[-2] or
221
+ sorted_g[-1] > 2.5 * sorted_g[-2] or
222
+ sorted_r[-1] > 2.5 * sorted_r[-2]):
223
+ return True, "Unusual color pattern detected, possibly face covering"
224
+
225
+ # Face appears uncovered
226
+ return False, "No face covering detected"
227
+
228
+ except Exception as e:
229
+ logger.error(f"Error in face covering detection: {e}")
230
+ # If there's an error, we'll be cautious and assume there might be an issue
231
+ return True, f"Error analyzing face covering: {str(e)}"
232
+
233
+ def check_face_quality(self, face, image):
234
+ """Check if the full face is visible and not occluded - with more lenient quality thresholds"""
235
+ try:
236
+ # Get face bounding box
237
+ bbox = face.bbox.astype(np.int32)
238
+ x1, y1, x2, y2 = bbox
239
+
240
+ # Basic check: ensure face is completely in frame
241
+ img_h, img_w = image.shape[:2]
242
+ if x1 < 0 or y1 < 0 or x2 >= img_w or y2 >= img_h:
243
+ return False, "Face is partially out of frame"
244
+
245
+ # Reduced minimum size check for low-quality images (reduced from 60 to 40)
246
+ face_width = x2 - x1
247
+ face_height = y2 - y1
248
+ if face_width < 40 or face_height < 40: # More lenient size requirement
249
+ return False, "Face is too small in the image, please provide a clearer photo"
250
+
251
+ # Reduced confidence threshold for face detection (reduced from 0.7 to 0.5)
252
+ if hasattr(face, 'det_score') and face.det_score < 0.5:
253
+ return False, "Face cannot be clearly detected, please try another photo"
254
+
255
+ # Extract face region for additional analysis
256
+ face_region = image[y1:y2, x1:x2]
257
+
258
+ # First check specifically for face covering
259
+ is_covered, covering_message = self.detect_face_covering(face, image)
260
+ if is_covered:
261
+ return False, covering_message
262
+
263
+ # Check if key facial landmarks are present and within image
264
+ if hasattr(face, 'kps'):
265
+ landmarks = face.kps
266
+ # Check if any landmarks are outside the image
267
+ for point in landmarks:
268
+ x, y = point
269
+ if x < 0 or y < 0 or x >= img_w or y >= img_h:
270
+ return False, "Part of the face appears to be cut off"
271
+
272
+ if len(landmarks) >= 5:
273
+ left_eye = landmarks[0]
274
+ right_eye = landmarks[1]
275
+ nose = landmarks[2]
276
+ left_mouth = landmarks[3]
277
+ right_mouth = landmarks[4]
278
+
279
+ # Check if both eyes and mouth are detected
280
+ if not all([left_eye.any(), right_eye.any(), nose.any(), left_mouth.any(), right_mouth.any()]):
281
+ return False, "Some parts of the face are not visible"
282
+
283
+ # More lenient head rotation check (increased from 25 to 35 degrees)
284
+ eye_angle = np.degrees(np.arctan2(right_eye[1] - left_eye[1], right_eye[0] - left_eye[0]))
285
+ if abs(eye_angle) > 35:
286
+ return False, "Face is too tilted, please provide a more straight-facing photo"
287
+
288
+ # More lenient landmark visibility check
289
+ def check_landmark_visibility(point, radius=15):
290
+ x, y = point
291
+ x, y = int(x), int(y)
292
+
293
+ # Convert to image-relative coordinates
294
+ x_rel = x - x1
295
+ y_rel = y - y1
296
+
297
+ # Ensure the point is within bounds
298
+ if (x_rel - radius < 0 or y_rel - radius < 0 or
299
+ x_rel + radius >= face_width or y_rel + radius >= face_height):
300
+ return False
301
+
302
+ # Extract region around landmark
303
+ landmark_region = face_region[max(0, y_rel-radius):min(face_height, y_rel+radius),
304
+ max(0, x_rel-radius):min(face_width, x_rel+radius)]
305
+
306
+ # More lenient variance check (reduced from 15 to 10)
307
+ if landmark_region.size > 0:
308
+ std_dev = np.std(landmark_region)
309
+ if std_dev < 10: # Lower threshold for variance
310
+ return False
311
+ return True
312
+
313
+ # Check visibility for key landmarks
314
+ key_landmarks = [left_eye, right_eye, nose] # Only check critical landmarks
315
+ landmarks_visible = [check_landmark_visibility(lm) for lm in key_landmarks]
316
+
317
+ if not all(landmarks_visible):
318
+ return False, "Critical facial features appear to be covered or occluded"
319
+
320
+ # More lenient face proportion check
321
+ eye_distance = np.linalg.norm(right_eye - left_eye)
322
+ nose_to_mouth = np.linalg.norm(nose - ((left_mouth + right_mouth) / 2))
323
+
324
+ # Wider acceptable range for face proportions
325
+ if nose_to_mouth < 0.2 * eye_distance or nose_to_mouth > 1.0 * eye_distance:
326
+ return False, "Face proportions appear abnormal, possibly due to occlusion"
327
+
328
+ # Occlusion detection - still strict because we want to ensure face isn't covered
329
+ if len(face_region.shape) == 3:
330
+ gray_face = cv2.cvtColor(face_region, cv2.COLOR_BGR2GRAY)
331
+ else:
332
+ gray_face = face_region
333
+
334
+ # More lenient edge detection for low quality images
335
+ edges = cv2.Canny(gray_face, 40, 120) # Adjusted thresholds
336
+ edge_ratio = np.sum(edges > 0) / (face_width * face_height)
337
+
338
+ # More lenient edge ratio threshold (increased from 0.15 to 0.25)
339
+ if edge_ratio > 0.25:
340
+ return False, "Something appears to be blocking the face"
341
+
342
+ # More lenient skin tone check
343
+ if len(face_region.shape) == 3:
344
+ hsv_face = cv2.cvtColor(face_region, cv2.COLOR_BGR2HSV)
345
+
346
+ # Expanded skin tone range to account for different lighting and ethnicities
347
+ lower_skin = np.array([0, 15, 60], dtype=np.uint8) # More lenient parameters
348
+ upper_skin = np.array([25, 255, 255], dtype=np.uint8) # Expanded hue range
349
+
350
+ skin_mask = cv2.inRange(hsv_face, lower_skin, upper_skin)
351
+
352
+ # Lower threshold for skin detection (reduced from 0.4 to 0.3)
353
+ skin_ratio = np.sum(skin_mask > 0) / (face_width * face_height)
354
+
355
+ if skin_ratio < 0.3:
356
+ return False, "Face appears to be partially covered"
357
+
358
+ # If all checks pass, face is acceptable
359
+ return True, "Face check passed"
360
+
361
+ except Exception as e:
362
+ logger.error(f"Error checking face quality: {e}")
363
+ return False, f"Error checking face quality: {str(e)}"
364
+
365
+ def validate_face_image(self, image_path):
366
+ """Validate if the image contains a clear face"""
367
+ face, message = self.process_image(image_path)
368
+
369
+ if face is None:
370
+ return False, message
371
+
372
+ # Check face quality
373
+ image = cv2.imread(image_path)
374
+ is_quality_face, quality_message = self.check_face_quality(face, image)
375
+ if not is_quality_face:
376
+ return False, quality_message
377
+
378
+ # Check for duplicate face
379
+ embedding = face.normed_embedding
380
+ closest_match, distance = self.find_closest_match(embedding, threshold=0.4)
381
+
382
+ if closest_match:
383
+ return False, "This face already exists in the database"
384
+
385
+ return True, "Face image is valid and unique"
386
+
387
+ def find_closest_match(self, embedding, threshold=0.5):
388
+ """Find the closest face match in the database"""
389
+ try:
390
+
391
+ all_faces = list(self.collection.find())
392
+
393
+ if not all_faces:
394
+ return None, float('inf')
395
+
396
+ closest_match = None
397
+ min_distance = float('inf')
398
+
399
+ for face_doc in all_faces:
400
+ if 'embedding' in face_doc:
401
+
402
+ stored_embedding = pickle.loads(face_doc['embedding'])
403
+
404
+
405
+ distance = 1 - np.dot(embedding, stored_embedding)
406
+
407
+ if distance < min_distance:
408
+ min_distance = distance
409
+ closest_match = face_doc
410
+
411
+ if min_distance <= threshold:
412
+ return closest_match, min_distance
413
+ else:
414
+ return None, min_distance
415
+
416
+ except Exception as e:
417
+ logger.error(f"Error finding closest match: {e}")
418
+ return None, float('inf')
419
+
420
+ def store_face(self, image_path):
421
+ """Store a face embedding in the database"""
422
+ face, message = self.process_image(image_path)
423
+
424
+ if face is None:
425
+ return False, message
426
+
427
+ # Check face quality before storing
428
+ image = cv2.imread(image_path)
429
+ is_quality_face, quality_message = self.check_face_quality(face, image)
430
+ if not is_quality_face:
431
+ return False, quality_message
432
+
433
+ embedding = face.normed_embedding
434
+
435
+ try:
436
+
437
+ existing_face, distance = self.find_closest_match(embedding, threshold=0.4)
438
+ if existing_face:
439
+ return False, "This face appears to be already registered"
440
+
441
+ embedding_binary = Binary(pickle.dumps(embedding))
442
+
443
+ doc = {
444
+ 'user_id': str(uuid.uuid4()),
445
+ 'embedding': embedding_binary,
446
+ 'timestamp': time.time()
447
+ }
448
+
449
+ result = self.collection.insert_one(doc)
450
+ logger.info(f"Successfully stored face with ID: {result.inserted_id}")
451
+
452
+ return True, f"Face stored successfully with user_id: {doc['user_id']}"
453
+
454
+ except Exception as e:
455
+ logger.error(f"Error storing face: {e}")
456
+ return False, f"Error storing face: {str(e)}"
457
+
458
+ def verify_face(self, image_path, threshold=0.5):
459
+ """Verify a face against the database"""
460
+ face, message = self.process_image(image_path)
461
+
462
+ if face is None:
463
+ return False, message
464
+
465
+ # For verification, we still want basic quality checks but can be less strict
466
+ image = cv2.imread(image_path)
467
+ is_quality_face, quality_message = self.check_face_quality(face, image)
468
+ if not is_quality_face:
469
+ return False, quality_message
470
+
471
+ embedding = face.normed_embedding
472
+
473
+ closest_match, distance = self.find_closest_match(embedding, threshold)
474
+
475
+ if closest_match:
476
+
477
+ user_id = closest_match.get('user_id', '')
478
+ confidence = float(1 - distance)
479
+ return True, f"Face verified successfully with confidence: {confidence:.2f}", user_id
480
+ else:
481
+ return False, "No matching face found", None
482
+
483
+ app = Flask(__name__)
484
+
485
+ UPLOAD_FOLDER = 'uploads'
486
+ ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
487
+ app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
488
+ app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
489
+
490
+
491
+ os.makedirs(UPLOAD_FOLDER, exist_ok=True)
492
+
493
+
494
+ MONGODB_URI = os.environ.get("MONGODB_URI")
495
+ DB_NAME = "taaweel"
496
+ COLLECTION_NAME = "face_id_images"
497
+
498
+ face_api = FaceRecognitionAPI(MONGODB_URI, DB_NAME, COLLECTION_NAME)
499
+
500
+ def allowed_file(filename):
501
+ return '.' in filename and \
502
+ filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
503
+
504
+ @app.route('/')
505
+ def index():
506
+ return jsonify({'status': 'success', 'message': 'Face Recognition API is running'})
507
+
508
+ @app.route('/signup', methods=['POST'])
509
+ def signup():
510
+ """Endpoint to store a face in the database for signup"""
511
+ if 'file' not in request.files:
512
+ return jsonify({'status': 'error', 'message': 'No file part'}), 400
513
+
514
+ file = request.files['file']
515
+
516
+ if file.filename == '':
517
+ return jsonify({'status': 'error', 'message': 'No selected file'}), 400
518
+
519
+ if file and allowed_file(file.filename):
520
+
521
+ filename = secure_filename(file.filename)
522
+ file_path = os.path.join(app.config['UPLOAD_FOLDER'], f"{time.time()}_{filename}")
523
+ file.save(file_path)
524
+
525
+
526
+ is_valid, message = face_api.validate_face_image(file_path)
527
+
528
+ if is_valid:
529
+
530
+ success, store_message = face_api.store_face(file_path)
531
+
532
+ try:
533
+ os.remove(file_path)
534
+ except:
535
+ pass
536
+
537
+ if success:
538
+ return jsonify({
539
+ 'status': 'success',
540
+ 'message': store_message
541
+ })
542
+ else:
543
+ return jsonify({
544
+ 'status': 'error',
545
+ 'message': store_message
546
+ }), 400
547
+ else:
548
+
549
+ try:
550
+ os.remove(file_path)
551
+ except:
552
+ pass
553
+
554
+ return jsonify({
555
+ 'status': 'error',
556
+ 'message': message
557
+ }), 400
558
+
559
+ return jsonify({'status': 'error', 'message': 'Invalid file format. Please use JPG, JPEG or PNG'}), 400
560
+
561
+ @app.route('/verify', methods=['POST'])
562
+ def verify():
563
+ """Endpoint to verify a face against the database"""
564
+ if 'file' not in request.files:
565
+ return jsonify({'status': 'error', 'message': 'No file part'}), 400
566
+
567
+ file = request.files['file']
568
+
569
+ if file.filename == '':
570
+ return jsonify({'status': 'error', 'message': 'No selected file'}), 400
571
+
572
+ threshold = request.form.get('threshold', 0.5)
573
+ try:
574
+ threshold = float(threshold)
575
+ except:
576
+ threshold = 0.5
577
+
578
+ if file and allowed_file(file.filename):
579
+
580
+ filename = secure_filename(file.filename)
581
+ file_path = os.path.join(app.config['UPLOAD_FOLDER'], f"{time.time()}_{filename}")
582
+ file.save(file_path)
583
+
584
+
585
+ verified, message, user_id = face_api.verify_face(file_path, threshold)
586
+
587
+
588
+ try:
589
+ os.remove(file_path)
590
+ except:
591
+ pass
592
+
593
+ if verified:
594
+ return jsonify({
595
+ 'status': 'success',
596
+ 'message': message,
597
+ 'verified': True,
598
+ 'user_id': user_id
599
+ })
600
+ else:
601
+ return jsonify({
602
+ 'status': 'error',
603
+ 'message': message,
604
+ 'verified': False
605
+ }), 401
606
+
607
+ return jsonify({'status': 'error', 'message': 'Invalid file format. Please use JPG, JPEG or PNG'}), 400
608
+
609
+ if __name__ == '__main__':
610
+
611
+ import argparse
612
+
613
+ parser = argparse.ArgumentParser(description='Face Recognition API')
614
+ parser.add_argument('--host', default='0.0.0.0', help='Host to run the server on')
615
+ parser.add_argument('--port', default=7000, type=int, help='Port to run the server on')
616
+ parser.add_argument('--mongodb-uri',
617
+ default="mongodb+srv://projectDB:PEyHwQ2fF7e5saEf@cluster0.43hxo.mongodb.net/",
618
+ help='MongoDB connection URI')
619
+ parser.add_argument('--db-name', default="ta7t-bety", help='Database name')
620
+ parser.add_argument('--collection', default="face_id_images", help='Collection name')
621
+ parser.add_argument('--debug', action='store_true', help='Run in debug mode')
622
+
623
+ args = parser.parse_args()
624
+
625
+ face_api = FaceRecognitionAPI(args.mongodb_uri, args.db_name, args.collection)
626
+
627
+
628
+ app.run(host=args.host, port=args.port, debug=args.debug)
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ Flask>=3.0.0
2
+ numpy>=1.17.0
3
+ opencv-python-headless>=4.5.0
4
+ pymongo>=4.0.0
5
+ Pillow>=9.0.0
6
+ huggingface-hub>=0.12.0
7
+ insightface>=0.7.0
8
+ onnxruntime>=1.8.0
9
+ Werkzeug>=3.0.0