Upload 2 files
Browse files
api.py
CHANGED
|
@@ -49,6 +49,33 @@ class JobStatus(str, Enum):
|
|
| 49 |
|
| 50 |
jobs: Dict[str, dict] = {}
|
| 51 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
def hierarchical_cluster_with_min_size(X, max_groups: int, min_cluster_size: int):
|
| 53 |
"""
|
| 54 |
Clustering jer谩rquico aglomerativo que produce hasta max_groups clusters.
|
|
@@ -231,6 +258,7 @@ def process_video_job(job_id: str):
|
|
| 231 |
# Detecci贸n de caras y embeddings (CPU), alineado con 'originales'
|
| 232 |
try:
|
| 233 |
print(f"[{job_id}] Iniciando detecci贸n de personajes (CPU, originales)...")
|
|
|
|
| 234 |
import cv2
|
| 235 |
import numpy as np
|
| 236 |
try:
|
|
@@ -274,7 +302,9 @@ def process_video_job(job_id: str):
|
|
| 274 |
if not ret2:
|
| 275 |
continue
|
| 276 |
frames_processed += 1
|
| 277 |
-
|
|
|
|
|
|
|
| 278 |
|
| 279 |
if _use_fr and face_recognition is not None:
|
| 280 |
boxes = face_recognition.face_locations(rgb, model="hog") # CPU HOG
|
|
@@ -283,7 +313,7 @@ def process_video_job(job_id: str):
|
|
| 283 |
frames_with_faces += 1
|
| 284 |
print(f"[{job_id}] Frame {frame_idx}: {len(boxes)} cara(s) detectada(s) con face_recognition")
|
| 285 |
for (top, right, bottom, left), e in zip(boxes, encs):
|
| 286 |
-
crop =
|
| 287 |
if crop.size == 0:
|
| 288 |
continue
|
| 289 |
fn = f"face_{frame_idx:06d}_{saved_count:03d}.jpg"
|
|
@@ -304,7 +334,7 @@ def process_video_job(job_id: str):
|
|
| 304 |
pass
|
| 305 |
else:
|
| 306 |
try:
|
| 307 |
-
gray = cv2.cvtColor(
|
| 308 |
try:
|
| 309 |
haar_path = getattr(cv2.data, 'haarcascades', None) or ''
|
| 310 |
face_cascade = cv2.CascadeClassifier(os.path.join(haar_path, 'haarcascade_frontalface_default.xml'))
|
|
@@ -322,7 +352,7 @@ def process_video_job(job_id: str):
|
|
| 322 |
if not boxes_haar:
|
| 323 |
try:
|
| 324 |
tmp_detect = faces_root / f"detect_{frame_idx:06d}.jpg"
|
| 325 |
-
cv2.imwrite(str(tmp_detect),
|
| 326 |
detect_result = DeepFace.extract_faces(img_path=str(tmp_detect), detector_backend='opencv', enforce_detection=False)
|
| 327 |
for det in detect_result:
|
| 328 |
facial_area = det.get('facial_area', {})
|
|
@@ -344,7 +374,7 @@ def process_video_job(job_id: str):
|
|
| 344 |
print(f"[{job_id}] Frame {frame_idx}: {len(boxes_haar)} cara(s) detectada(s) con Haar/DeepFace")
|
| 345 |
|
| 346 |
for (top, right, bottom, left) in boxes_haar:
|
| 347 |
-
crop =
|
| 348 |
if crop.size == 0:
|
| 349 |
continue
|
| 350 |
fn = f"face_{frame_idx:06d}_{saved_count:03d}.jpg"
|
|
|
|
| 49 |
|
| 50 |
jobs: Dict[str, dict] = {}
|
| 51 |
|
| 52 |
+
def normalize_face_lighting(image):
|
| 53 |
+
"""
|
| 54 |
+
Normaliza el brillo de una imagen de cara usando CLAHE.
|
| 55 |
+
Esto reduce el impacto de diferentes condiciones de iluminaci贸n en los embeddings.
|
| 56 |
+
|
| 57 |
+
Args:
|
| 58 |
+
image: Imagen BGR (OpenCV format)
|
| 59 |
+
|
| 60 |
+
Returns:
|
| 61 |
+
Imagen normalizada en el mismo formato
|
| 62 |
+
"""
|
| 63 |
+
import cv2
|
| 64 |
+
# Convertir a LAB color space (m谩s robusto para iluminaci贸n)
|
| 65 |
+
lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
|
| 66 |
+
l, a, b = cv2.split(lab)
|
| 67 |
+
|
| 68 |
+
# Aplicar CLAHE (Contrast Limited Adaptive Histogram Equalization) al canal L
|
| 69 |
+
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
|
| 70 |
+
l_normalized = clahe.apply(l)
|
| 71 |
+
|
| 72 |
+
# Recombinar canales
|
| 73 |
+
lab_normalized = cv2.merge([l_normalized, a, b])
|
| 74 |
+
|
| 75 |
+
# Convertir de vuelta a BGR
|
| 76 |
+
normalized = cv2.cvtColor(lab_normalized, cv2.COLOR_LAB2BGR)
|
| 77 |
+
return normalized
|
| 78 |
+
|
| 79 |
def hierarchical_cluster_with_min_size(X, max_groups: int, min_cluster_size: int):
|
| 80 |
"""
|
| 81 |
Clustering jer谩rquico aglomerativo que produce hasta max_groups clusters.
|
|
|
|
| 258 |
# Detecci贸n de caras y embeddings (CPU), alineado con 'originales'
|
| 259 |
try:
|
| 260 |
print(f"[{job_id}] Iniciando detecci贸n de personajes (CPU, originales)...")
|
| 261 |
+
print(f"[{job_id}] Normalizaci贸n de brillo activada (CLAHE) para reducir impacto de iluminaci贸n")
|
| 262 |
import cv2
|
| 263 |
import numpy as np
|
| 264 |
try:
|
|
|
|
| 302 |
if not ret2:
|
| 303 |
continue
|
| 304 |
frames_processed += 1
|
| 305 |
+
# Normalizar iluminaci贸n antes de procesar
|
| 306 |
+
frame_normalized = normalize_face_lighting(frame)
|
| 307 |
+
rgb = cv2.cvtColor(frame_normalized, cv2.COLOR_BGR2RGB)
|
| 308 |
|
| 309 |
if _use_fr and face_recognition is not None:
|
| 310 |
boxes = face_recognition.face_locations(rgb, model="hog") # CPU HOG
|
|
|
|
| 313 |
frames_with_faces += 1
|
| 314 |
print(f"[{job_id}] Frame {frame_idx}: {len(boxes)} cara(s) detectada(s) con face_recognition")
|
| 315 |
for (top, right, bottom, left), e in zip(boxes, encs):
|
| 316 |
+
crop = frame_normalized[top:bottom, left:right]
|
| 317 |
if crop.size == 0:
|
| 318 |
continue
|
| 319 |
fn = f"face_{frame_idx:06d}_{saved_count:03d}.jpg"
|
|
|
|
| 334 |
pass
|
| 335 |
else:
|
| 336 |
try:
|
| 337 |
+
gray = cv2.cvtColor(frame_normalized, cv2.COLOR_BGR2GRAY)
|
| 338 |
try:
|
| 339 |
haar_path = getattr(cv2.data, 'haarcascades', None) or ''
|
| 340 |
face_cascade = cv2.CascadeClassifier(os.path.join(haar_path, 'haarcascade_frontalface_default.xml'))
|
|
|
|
| 352 |
if not boxes_haar:
|
| 353 |
try:
|
| 354 |
tmp_detect = faces_root / f"detect_{frame_idx:06d}.jpg"
|
| 355 |
+
cv2.imwrite(str(tmp_detect), frame_normalized)
|
| 356 |
detect_result = DeepFace.extract_faces(img_path=str(tmp_detect), detector_backend='opencv', enforce_detection=False)
|
| 357 |
for det in detect_result:
|
| 358 |
facial_area = det.get('facial_area', {})
|
|
|
|
| 374 |
print(f"[{job_id}] Frame {frame_idx}: {len(boxes_haar)} cara(s) detectada(s) con Haar/DeepFace")
|
| 375 |
|
| 376 |
for (top, right, bottom, left) in boxes_haar:
|
| 377 |
+
crop = frame_normalized[top:bottom, left:right]
|
| 378 |
if crop.size == 0:
|
| 379 |
continue
|
| 380 |
fn = f"face_{frame_idx:06d}_{saved_count:03d}.jpg"
|