VeuReu commited on
Commit
e3efd8b
·
verified ·
1 Parent(s): be64954

Upload api.py

Browse files
Files changed (1) hide show
  1. api.py +72 -87
api.py CHANGED
@@ -205,8 +205,13 @@ def process_video_job(job_id: str):
205
  raise RuntimeError("No se pudo abrir el vídeo para extracción de caras")
206
  fps = cap.get(cv2.CAP_PROP_FPS) or 25.0
207
  total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT) or 0)
208
- step = max(1, int(3)) # cada ~3 frames para CPU
209
- print(f"[{job_id}] Total frames: {total_frames}, FPS: {fps:.2f}, Procesando cada {step} frames")
 
 
 
 
 
210
 
211
  # Salidas
212
  faces_root = base / "faces_raw"
@@ -214,94 +219,74 @@ def process_video_job(job_id: str):
214
  embeddings: list[list[float]] = []
215
  crops_meta: list[dict] = []
216
 
217
- frame_idx = 0
218
  saved_count = 0
219
- while True:
220
- ret = cap.grab()
221
- if not ret:
222
- break
223
- if frame_idx % step == 0:
224
- ret2, frame = cap.retrieve()
225
- if not ret2:
226
- break
227
- rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
228
-
229
- if _use_fr and face_recognition is not None:
230
- boxes = face_recognition.face_locations(rgb, model="hog") # CPU HOG
231
- encs = face_recognition.face_encodings(rgb, boxes)
232
- for (top, right, bottom, left), e in zip(boxes, encs):
233
- crop = frame[top:bottom, left:right]
234
- if crop.size == 0:
235
- continue
236
- fn = f"face_{frame_idx:06d}_{saved_count:03d}.jpg"
237
- cv2.imwrite(str(faces_root / fn), crop)
238
- # Normalizar embedding
239
- e = np.array(e, dtype=float)
240
- e = e / (np.linalg.norm(e) + 1e-9)
241
- embeddings.append(e.astype(float).tolist())
242
- crops_meta.append({
243
- "file": fn,
244
- "frame": frame_idx,
245
- "box": [int(top), int(right), int(bottom), int(left)],
246
- })
247
- saved_count += 1
 
248
  else:
249
- # DeepFace fallback con detección de bounding boxes vía Haar Cascade (OpenCV)
250
- if DeepFace is None:
251
- pass
252
- else:
253
  try:
254
- gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
255
- try:
256
- haar_path = getattr(cv2.data, 'haarcascades', None) or ''
257
- face_cascade = cv2.CascadeClassifier(os.path.join(haar_path, 'haarcascade_frontalface_default.xml'))
258
- except Exception:
259
- face_cascade = None
260
- boxes_haar = []
261
- if face_cascade is not None and not face_cascade.empty():
262
- faces_haar = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(40, 40))
263
- for (x, y, w, h) in faces_haar:
264
- top, left, bottom, right = max(0, y), max(0, x), min(frame.shape[0], y+h), min(frame.shape[1], x+w)
265
- boxes_haar.append((top, right, bottom, left))
266
- if boxes_haar:
267
- for (top, right, bottom, left) in boxes_haar:
268
- crop = frame[top:bottom, left:right]
269
- if crop.size == 0:
270
- continue
271
- fn = f"face_{frame_idx:06d}_{saved_count:03d}.jpg"
272
- crop_path = faces_root / fn
273
- cv2.imwrite(str(crop_path), crop)
274
- reps = DeepFace.represent(img_path=str(crop_path), model_name="Facenet512", enforce_detection=False)
275
- for r in (reps or []):
276
- emb = r.get("embedding") if isinstance(r, dict) else r
277
- if emb is None:
278
- continue
279
- emb = np.array(emb, dtype=float)
280
- emb = emb / (np.linalg.norm(emb) + 1e-9)
281
- embeddings.append(emb.astype(float).tolist())
282
- crops_meta.append({
283
- "file": fn,
284
- "frame": frame_idx,
285
- "box": [int(top), int(right), int(bottom), int(left)],
286
- })
287
- saved_count += 1
288
- else:
289
- # Fallback mínimo: emb del frame completo
290
- tmp_path = faces_root / f"frame_{frame_idx:06d}.jpg"
291
- cv2.imwrite(str(tmp_path), frame)
292
- reps = DeepFace.represent(img_path=str(tmp_path), model_name="Facenet512", enforce_detection=False)
293
- for r in (reps or []):
294
- emb = r.get("embedding") if isinstance(r, dict) else r
295
- if emb is None:
296
- continue
297
- emb = np.array(emb, dtype=float)
298
- emb = emb / (np.linalg.norm(emb) + 1e-9)
299
- embeddings.append(emb.astype(float).tolist())
300
- crops_meta.append({"file": tmp_path.name, "frame": frame_idx, "box": None})
301
- saved_count += 1
302
- except Exception as _e_df:
303
- print(f"[{job_id}] DeepFace fallback error: {_e_df}")
304
- frame_idx += 1
305
  cap.release()
306
 
307
  print(f"[{job_id}] ✓ Caras detectadas (embeddings): {len(embeddings)}")
 
205
  raise RuntimeError("No se pudo abrir el vídeo para extracción de caras")
206
  fps = cap.get(cv2.CAP_PROP_FPS) or 25.0
207
  total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT) or 0)
208
+ max_samples = 100
209
+ # Índices de frames equiespaciados (hasta 100)
210
+ if total_frames > 0:
211
+ frame_indices = sorted(set(np.linspace(0, max(0, total_frames - 1), num=min(max_samples, max(1, total_frames)), dtype=int).tolist()))
212
+ else:
213
+ frame_indices = []
214
+ print(f"[{job_id}] Total frames: {total_frames}, FPS: {fps:.2f}, Muestreando {len(frame_indices)} frames equiespaciados (máx {max_samples})")
215
 
216
  # Salidas
217
  faces_root = base / "faces_raw"
 
219
  embeddings: list[list[float]] = []
220
  crops_meta: list[dict] = []
221
 
 
222
  saved_count = 0
223
+ for frame_idx in frame_indices:
224
+ cap.set(cv2.CAP_PROP_POS_FRAMES, int(frame_idx))
225
+ ret2, frame = cap.read()
226
+ if not ret2:
227
+ continue
228
+ rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
229
+
230
+ if _use_fr and face_recognition is not None:
231
+ boxes = face_recognition.face_locations(rgb, model="hog") # CPU HOG
232
+ encs = face_recognition.face_encodings(rgb, boxes)
233
+ for (top, right, bottom, left), e in zip(boxes, encs):
234
+ crop = frame[top:bottom, left:right]
235
+ if crop.size == 0:
236
+ continue
237
+ fn = f"face_{frame_idx:06d}_{saved_count:03d}.jpg"
238
+ cv2.imwrite(str(faces_root / fn), crop)
239
+ # Normalizar embedding
240
+ e = np.array(e, dtype=float)
241
+ e = e / (np.linalg.norm(e) + 1e-9)
242
+ embeddings.append(e.astype(float).tolist())
243
+ crops_meta.append({
244
+ "file": fn,
245
+ "frame": frame_idx,
246
+ "box": [int(top), int(right), int(bottom), int(left)],
247
+ })
248
+ saved_count += 1
249
+ else:
250
+ # DeepFace fallback con detección de bounding boxes vía Haar Cascade (OpenCV)
251
+ if DeepFace is None:
252
+ pass
253
  else:
254
+ try:
255
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
 
 
256
  try:
257
+ haar_path = getattr(cv2.data, 'haarcascades', None) or ''
258
+ face_cascade = cv2.CascadeClassifier(os.path.join(haar_path, 'haarcascade_frontalface_default.xml'))
259
+ except Exception:
260
+ face_cascade = None
261
+ boxes_haar = []
262
+ if face_cascade is not None and not face_cascade.empty():
263
+ faces_haar = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(40, 40))
264
+ for (x, y, w, h) in faces_haar:
265
+ top, left, bottom, right = max(0, y), max(0, x), min(frame.shape[0], y+h), min(frame.shape[1], x+w)
266
+ boxes_haar.append((top, right, bottom, left))
267
+ for (top, right, bottom, left) in boxes_haar:
268
+ crop = frame[top:bottom, left:right]
269
+ if crop.size == 0:
270
+ continue
271
+ fn = f"face_{frame_idx:06d}_{saved_count:03d}.jpg"
272
+ crop_path = faces_root / fn
273
+ cv2.imwrite(str(crop_path), crop)
274
+ reps = DeepFace.represent(img_path=str(crop_path), model_name="Facenet512", enforce_detection=False)
275
+ for r in (reps or []):
276
+ emb = r.get("embedding") if isinstance(r, dict) else r
277
+ if emb is None:
278
+ continue
279
+ emb = np.array(emb, dtype=float)
280
+ emb = emb / (np.linalg.norm(emb) + 1e-9)
281
+ embeddings.append(emb.astype(float).tolist())
282
+ crops_meta.append({
283
+ "file": fn,
284
+ "frame": frame_idx,
285
+ "box": [int(top), int(right), int(bottom), int(left)],
286
+ })
287
+ saved_count += 1
288
+ except Exception as _e_df:
289
+ print(f"[{job_id}] DeepFace fallback error: {_e_df}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
290
  cap.release()
291
 
292
  print(f"[{job_id}] ✓ Caras detectadas (embeddings): {len(embeddings)}")