Update app.py
Browse files
app.py
CHANGED
|
@@ -234,33 +234,70 @@ class AttendanceSystem:
|
|
| 234 |
|
| 235 |
# --- Video Processing ---
|
| 236 |
def process_frame(self, frame: np.ndarray) -> np.ndarray:
|
|
|
|
|
|
|
|
|
|
| 237 |
try:
|
| 238 |
face_objs = DeepFace.extract_faces(img_path=frame, detector_backend='opencv', enforce_detection=False)
|
| 239 |
-
|
| 240 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 241 |
x, y, w, h = face_obj['facial_area'].values()
|
| 242 |
face_image = frame[y:y+h, x:x+w]
|
|
|
|
| 243 |
if face_image.size == 0: continue
|
|
|
|
| 244 |
embedding = DeepFace.represent(img_path=face_image, model_name='Facenet', enforce_detection=False)[0]['embedding']
|
| 245 |
-
|
| 246 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 247 |
match_index = distances.index(min_dist) if min_dist < 10.0 else -1
|
|
|
|
|
|
|
|
|
|
| 248 |
color, worker_id, worker_name = (0, 0, 255), None, "Unknown"
|
|
|
|
| 249 |
if match_index != -1:
|
| 250 |
-
worker_id
|
| 251 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 252 |
else:
|
|
|
|
|
|
|
| 253 |
new_worker = self._register_worker_auto(face_image)
|
| 254 |
if new_worker:
|
| 255 |
-
worker_id, worker_name
|
| 256 |
-
if self.mark_attendance(worker_id, worker_name):
|
|
|
|
|
|
|
| 257 |
label = f"{worker_name}" + (f" ({worker_id})" if worker_id else "")
|
| 258 |
cv2.rectangle(frame, (x, y), (x+w, y+h), color, 2)
|
| 259 |
cv2.putText(frame, label, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2)
|
|
|
|
| 260 |
return frame
|
| 261 |
-
except Exception:
|
|
|
|
|
|
|
| 262 |
return frame
|
| 263 |
|
|
|
|
| 264 |
def _processing_loop(self, source):
|
| 265 |
video_capture = cv2.VideoCapture(source)
|
| 266 |
if not video_capture.isOpened():
|
|
|
|
| 234 |
|
| 235 |
# --- Video Processing ---
|
| 236 |
def process_frame(self, frame: np.ndarray) -> np.ndarray:
|
| 237 |
+
"""
|
| 238 |
+
Main function to process a single video frame with added debugging prints.
|
| 239 |
+
"""
|
| 240 |
try:
|
| 241 |
face_objs = DeepFace.extract_faces(img_path=frame, detector_backend='opencv', enforce_detection=False)
|
| 242 |
+
|
| 243 |
+
# Print a summary for the current frame
|
| 244 |
+
if face_objs:
|
| 245 |
+
print(f"\n--- Frame Processed: Found {len(face_objs)} faces. ---")
|
| 246 |
+
|
| 247 |
+
for i, face_obj in enumerate(face_objs):
|
| 248 |
+
confidence = face_obj['confidence']
|
| 249 |
+
print(f" Face #{i+1}: Confidence Score = {confidence:.2f}")
|
| 250 |
+
|
| 251 |
+
if confidence < 0.95:
|
| 252 |
+
print(" -> Confidence too low, skipping.")
|
| 253 |
+
continue
|
| 254 |
+
|
| 255 |
x, y, w, h = face_obj['facial_area'].values()
|
| 256 |
face_image = frame[y:y+h, x:x+w]
|
| 257 |
+
|
| 258 |
if face_image.size == 0: continue
|
| 259 |
+
|
| 260 |
embedding = DeepFace.represent(img_path=face_image, model_name='Facenet', enforce_detection=False)[0]['embedding']
|
| 261 |
+
|
| 262 |
+
if not self.known_face_embeddings:
|
| 263 |
+
print(" -> No known faces in database to compare against.")
|
| 264 |
+
continue
|
| 265 |
+
|
| 266 |
+
distances = [np.linalg.norm(np.array(embedding) - known) for known in self.known_face_embeddings]
|
| 267 |
+
min_dist = min(distances)
|
| 268 |
match_index = distances.index(min_dist) if min_dist < 10.0 else -1
|
| 269 |
+
|
| 270 |
+
print(f" -> Comparing to DB... Minimum Distance Found: {min_dist:.4f}")
|
| 271 |
+
|
| 272 |
color, worker_id, worker_name = (0, 0, 255), None, "Unknown"
|
| 273 |
+
|
| 274 |
if match_index != -1:
|
| 275 |
+
worker_id = self.known_face_ids[match_index]
|
| 276 |
+
worker_name = self.known_face_names[match_index]
|
| 277 |
+
color = (0, 255, 0) # Green
|
| 278 |
+
print(f" ✓ MATCH! (Threshold: 10.0). Recognized as {worker_name}")
|
| 279 |
+
if self.mark_attendance(worker_id, worker_name):
|
| 280 |
+
self.last_recognition_time[worker_id] = time.time()
|
| 281 |
else:
|
| 282 |
+
color = (0, 165, 255) # Orange for potential new worker
|
| 283 |
+
print(f" ✗ NO MATCH (Threshold: 10.0). Registering as new worker...")
|
| 284 |
new_worker = self._register_worker_auto(face_image)
|
| 285 |
if new_worker:
|
| 286 |
+
worker_id, worker_name = new_worker[0], new_worker[1]
|
| 287 |
+
if self.mark_attendance(worker_id, worker_name):
|
| 288 |
+
self.last_recognition_time[worker_id] = time.time()
|
| 289 |
+
|
| 290 |
label = f"{worker_name}" + (f" ({worker_id})" if worker_id else "")
|
| 291 |
cv2.rectangle(frame, (x, y), (x+w, y+h), color, 2)
|
| 292 |
cv2.putText(frame, label, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2)
|
| 293 |
+
|
| 294 |
return frame
|
| 295 |
+
except Exception as e:
|
| 296 |
+
# This will catch errors during the representation/matching phase
|
| 297 |
+
print(f"ERROR in process_frame: {e}")
|
| 298 |
return frame
|
| 299 |
|
| 300 |
+
|
| 301 |
def _processing_loop(self, source):
|
| 302 |
video_capture = cv2.VideoCapture(source)
|
| 303 |
if not video_capture.isOpened():
|