VeuReu commited on
Commit
af928fa
·
verified ·
1 Parent(s): e3efd8b

Upload api.py

Browse files
Files changed (1) hide show
  1. api.py +31 -1
api.py CHANGED
@@ -220,16 +220,22 @@ def process_video_job(job_id: str):
220
  crops_meta: list[dict] = []
221
 
222
  saved_count = 0
 
 
223
  for frame_idx in frame_indices:
224
  cap.set(cv2.CAP_PROP_POS_FRAMES, int(frame_idx))
225
  ret2, frame = cap.read()
226
  if not ret2:
227
  continue
 
228
  rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
229
 
230
  if _use_fr and face_recognition is not None:
231
  boxes = face_recognition.face_locations(rgb, model="hog") # CPU HOG
232
  encs = face_recognition.face_encodings(rgb, boxes)
 
 
 
233
  for (top, right, bottom, left), e in zip(boxes, encs):
234
  crop = frame[top:bottom, left:right]
235
  if crop.size == 0:
@@ -260,10 +266,32 @@ def process_video_job(job_id: str):
260
  face_cascade = None
261
  boxes_haar = []
262
  if face_cascade is not None and not face_cascade.empty():
263
- faces_haar = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(40, 40))
264
  for (x, y, w, h) in faces_haar:
265
  top, left, bottom, right = max(0, y), max(0, x), min(frame.shape[0], y+h), min(frame.shape[1], x+w)
266
  boxes_haar.append((top, right, bottom, left))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
267
  for (top, right, bottom, left) in boxes_haar:
268
  crop = frame[top:bottom, left:right]
269
  if crop.size == 0:
@@ -289,6 +317,8 @@ def process_video_job(job_id: str):
289
  print(f"[{job_id}] DeepFace fallback error: {_e_df}")
290
  cap.release()
291
 
 
 
292
  print(f"[{job_id}] ✓ Caras detectadas (embeddings): {len(embeddings)}")
293
 
294
  # Clustering DBSCAN de caras como en 'originales'
 
220
  crops_meta: list[dict] = []
221
 
222
  saved_count = 0
223
+ frames_processed = 0
224
+ frames_with_faces = 0
225
  for frame_idx in frame_indices:
226
  cap.set(cv2.CAP_PROP_POS_FRAMES, int(frame_idx))
227
  ret2, frame = cap.read()
228
  if not ret2:
229
  continue
230
+ frames_processed += 1
231
  rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
232
 
233
  if _use_fr and face_recognition is not None:
234
  boxes = face_recognition.face_locations(rgb, model="hog") # CPU HOG
235
  encs = face_recognition.face_encodings(rgb, boxes)
236
+ if boxes:
237
+ frames_with_faces += 1
238
+ print(f"[{job_id}] Frame {frame_idx}: {len(boxes)} cara(s) detectada(s) con face_recognition")
239
  for (top, right, bottom, left), e in zip(boxes, encs):
240
  crop = frame[top:bottom, left:right]
241
  if crop.size == 0:
 
266
  face_cascade = None
267
  boxes_haar = []
268
  if face_cascade is not None and not face_cascade.empty():
269
+ faces_haar = face_cascade.detectMultiScale(gray, scaleFactor=1.05, minNeighbors=3, minSize=(30, 30))
270
  for (x, y, w, h) in faces_haar:
271
  top, left, bottom, right = max(0, y), max(0, x), min(frame.shape[0], y+h), min(frame.shape[1], x+w)
272
  boxes_haar.append((top, right, bottom, left))
273
+
274
+ # Si Haar no detecta nada, intentar con DeepFace directamente
275
+ if not boxes_haar:
276
+ try:
277
+ tmp_detect = faces_root / f"detect_{frame_idx:06d}.jpg"
278
+ cv2.imwrite(str(tmp_detect), frame)
279
+ detect_result = DeepFace.extract_faces(img_path=str(tmp_detect), detector_backend='opencv', enforce_detection=False)
280
+ for det in detect_result:
281
+ facial_area = det.get('facial_area', {})
282
+ if facial_area:
283
+ x, y, w, h = facial_area.get('x', 0), facial_area.get('y', 0), facial_area.get('w', 0), facial_area.get('h', 0)
284
+ if w > 20 and h > 20:
285
+ top, left, bottom, right = max(0, y), max(0, x), min(frame.shape[0], y+h), min(frame.shape[1], x+w)
286
+ boxes_haar.append((top, right, bottom, left))
287
+ tmp_detect.unlink(missing_ok=True)
288
+ except Exception as _e_detect:
289
+ print(f"[{job_id}] Frame {frame_idx}: DeepFace extract_faces error: {_e_detect}")
290
+
291
+ if boxes_haar:
292
+ frames_with_faces += 1
293
+ print(f"[{job_id}] Frame {frame_idx}: {len(boxes_haar)} cara(s) detectada(s) con Haar/DeepFace")
294
+
295
  for (top, right, bottom, left) in boxes_haar:
296
  crop = frame[top:bottom, left:right]
297
  if crop.size == 0:
 
317
  print(f"[{job_id}] DeepFace fallback error: {_e_df}")
318
  cap.release()
319
 
320
+ print(f"[{job_id}] ✓ Frames procesados: {frames_processed}/{len(frame_indices)}")
321
+ print(f"[{job_id}] ✓ Frames con caras: {frames_with_faces}")
322
  print(f"[{job_id}] ✓ Caras detectadas (embeddings): {len(embeddings)}")
323
 
324
  # Clustering DBSCAN de caras como en 'originales'