Upload 5 files
Browse files- api.py +66 -26
- character_detection.py +34 -20
api.py
CHANGED
|
@@ -49,6 +49,50 @@ class JobStatus(str, Enum):
|
|
| 49 |
|
| 50 |
jobs: Dict[str, dict] = {}
|
| 51 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
@app.get("/")
|
| 53 |
def root():
|
| 54 |
return {"ok": True, "service": "veureu-engine"}
|
|
@@ -70,14 +114,14 @@ async def process_video(
|
|
| 70 |
async def create_initial_casting(
|
| 71 |
background_tasks: BackgroundTasks,
|
| 72 |
video: UploadFile = File(...),
|
| 73 |
-
|
| 74 |
-
min_cluster_size: int = Form(
|
| 75 |
-
|
| 76 |
-
voice_min_cluster_size: int = Form(
|
| 77 |
max_frames: int = Form(100),
|
| 78 |
):
|
| 79 |
"""
|
| 80 |
-
Crea un job para procesar el vídeo de forma asíncrona.
|
| 81 |
Devuelve un job_id inmediatamente.
|
| 82 |
"""
|
| 83 |
# Guardar vídeo en carpeta de datos
|
|
@@ -95,9 +139,9 @@ async def create_initial_casting(
|
|
| 95 |
"status": JobStatus.QUEUED,
|
| 96 |
"video_path": str(dst_video),
|
| 97 |
"video_name": video_name,
|
| 98 |
-
"
|
| 99 |
"min_cluster_size": int(min_cluster_size),
|
| 100 |
-
"
|
| 101 |
"voice_min_cluster_size": int(voice_min_cluster_size),
|
| 102 |
"max_frames": int(max_frames),
|
| 103 |
"created_at": datetime.now().isoformat(),
|
|
@@ -173,10 +217,10 @@ def process_video_job(job_id: str):
|
|
| 173 |
|
| 174 |
video_path = job["video_path"]
|
| 175 |
video_name = job["video_name"]
|
| 176 |
-
|
| 177 |
-
min_cluster_size = job
|
| 178 |
-
|
| 179 |
-
v_min_cluster = int(job.get("voice_min_cluster_size",
|
| 180 |
|
| 181 |
# Crear estructura de carpetas
|
| 182 |
base = TEMP_ROOT / video_name
|
|
@@ -328,13 +372,11 @@ def process_video_job(job_id: str):
|
|
| 328 |
print(f"[{job_id}] ✓ Frames con caras: {frames_with_faces}")
|
| 329 |
print(f"[{job_id}] ✓ Caras detectadas (embeddings): {len(embeddings)}")
|
| 330 |
|
| 331 |
-
# Clustering
|
| 332 |
-
from sklearn.cluster import DBSCAN
|
| 333 |
if embeddings:
|
| 334 |
Xf = np.array(embeddings)
|
| 335 |
-
|
| 336 |
-
|
| 337 |
-
labels = DBSCAN(eps=f_eps, min_samples=f_min, metric='euclidean').fit(Xf).labels_.tolist()
|
| 338 |
else:
|
| 339 |
labels = []
|
| 340 |
|
|
@@ -472,16 +514,14 @@ def process_video_job(job_id: str):
|
|
| 472 |
except Exception as _efb:
|
| 473 |
print(f"[{job_id}] WARN - Audio minimal fallback failed: {_efb}")
|
| 474 |
|
| 475 |
-
# Clustering de voces
|
| 476 |
-
from sklearn.cluster import DBSCAN
|
| 477 |
import numpy as np
|
| 478 |
voice_embeddings = [seg.get("voice_embedding") for seg in audio_segments if seg.get("voice_embedding")]
|
| 479 |
if voice_embeddings:
|
| 480 |
try:
|
| 481 |
Xv = np.array(voice_embeddings)
|
| 482 |
-
|
| 483 |
-
|
| 484 |
-
v_labels = DBSCAN(eps=v_eps, min_samples=v_min, metric='euclidean').fit(Xv).labels_.tolist()
|
| 485 |
except Exception as _e:
|
| 486 |
print(f"[{job_id}] WARN - Voice clustering failed: {_e}")
|
| 487 |
v_labels = []
|
|
@@ -686,17 +726,16 @@ def serve_scene_file(video_name: str, scene_id: str, filename: str):
|
|
| 686 |
@app.post("/detect_scenes")
|
| 687 |
async def detect_scenes(
|
| 688 |
video: UploadFile = File(...),
|
| 689 |
-
|
| 690 |
-
min_cluster_size: int = Form(
|
| 691 |
frame_interval_sec: float = Form(0.5),
|
| 692 |
):
|
| 693 |
"""
|
| 694 |
-
Detecta clústers d'escenes mitjançant clustering
|
| 695 |
Retorna una llista de scene_clusters estructurada de forma similar a characters.
|
| 696 |
"""
|
| 697 |
import cv2
|
| 698 |
import numpy as np
|
| 699 |
-
from sklearn.cluster import DBSCAN
|
| 700 |
|
| 701 |
# Guardar el vídeo temporalment
|
| 702 |
video_name = Path(video.filename).stem
|
|
@@ -740,7 +779,8 @@ async def detect_scenes(
|
|
| 740 |
return {"scene_clusters": []}
|
| 741 |
|
| 742 |
X = np.array(frames)
|
| 743 |
-
labels =
|
|
|
|
| 744 |
|
| 745 |
# Agrupar per etiqueta (>=0)
|
| 746 |
clusters = {}
|
|
|
|
| 49 |
|
| 50 |
jobs: Dict[str, dict] = {}
|
| 51 |
|
| 52 |
+
def hierarchical_cluster_with_min_size(X, max_groups: int, min_cluster_size: int):
|
| 53 |
+
"""
|
| 54 |
+
Clustering jerárquico aglomerativo que produce hasta max_groups clusters.
|
| 55 |
+
Filtra clusters con menos de min_cluster_size muestras (marcados como -1/ruido).
|
| 56 |
+
|
| 57 |
+
Args:
|
| 58 |
+
X: Array de embeddings (N, D)
|
| 59 |
+
max_groups: Número máximo de clusters a formar
|
| 60 |
+
min_cluster_size: Tamaño mínimo de cluster válido
|
| 61 |
+
|
| 62 |
+
Returns:
|
| 63 |
+
Array de labels (N,) donde -1 indica ruido
|
| 64 |
+
"""
|
| 65 |
+
import numpy as np
|
| 66 |
+
from scipy.cluster.hierarchy import linkage, fcluster
|
| 67 |
+
from collections import Counter
|
| 68 |
+
|
| 69 |
+
if len(X) == 0:
|
| 70 |
+
return np.array([])
|
| 71 |
+
|
| 72 |
+
if len(X) < min_cluster_size:
|
| 73 |
+
# Si hay menos muestras que el mínimo, todo es ruido
|
| 74 |
+
return np.full(len(X), -1, dtype=int)
|
| 75 |
+
|
| 76 |
+
# Linkage usando distancia euclidiana con método 'ward'
|
| 77 |
+
Z = linkage(X, method='ward', metric='euclidean')
|
| 78 |
+
|
| 79 |
+
# Cortar el dendrograma en max_groups clusters
|
| 80 |
+
labels = fcluster(Z, t=max_groups, criterion='maxclust')
|
| 81 |
+
|
| 82 |
+
# fcluster devuelve labels 1-indexed, convertir a 0-indexed
|
| 83 |
+
labels = labels - 1
|
| 84 |
+
|
| 85 |
+
# Filtrar clusters pequeños
|
| 86 |
+
label_counts = Counter(labels)
|
| 87 |
+
filtered_labels = []
|
| 88 |
+
for lbl in labels:
|
| 89 |
+
if label_counts[lbl] >= min_cluster_size:
|
| 90 |
+
filtered_labels.append(lbl)
|
| 91 |
+
else:
|
| 92 |
+
filtered_labels.append(-1) # Ruido
|
| 93 |
+
|
| 94 |
+
return np.array(filtered_labels, dtype=int)
|
| 95 |
+
|
| 96 |
@app.get("/")
|
| 97 |
def root():
|
| 98 |
return {"ok": True, "service": "veureu-engine"}
|
|
|
|
| 114 |
async def create_initial_casting(
|
| 115 |
background_tasks: BackgroundTasks,
|
| 116 |
video: UploadFile = File(...),
|
| 117 |
+
max_groups: int = Form(5),
|
| 118 |
+
min_cluster_size: int = Form(3),
|
| 119 |
+
voice_max_groups: int = Form(5),
|
| 120 |
+
voice_min_cluster_size: int = Form(3),
|
| 121 |
max_frames: int = Form(100),
|
| 122 |
):
|
| 123 |
"""
|
| 124 |
+
Crea un job para procesar el vídeo de forma asíncrona usando clustering jerárquico.
|
| 125 |
Devuelve un job_id inmediatamente.
|
| 126 |
"""
|
| 127 |
# Guardar vídeo en carpeta de datos
|
|
|
|
| 139 |
"status": JobStatus.QUEUED,
|
| 140 |
"video_path": str(dst_video),
|
| 141 |
"video_name": video_name,
|
| 142 |
+
"max_groups": int(max_groups),
|
| 143 |
"min_cluster_size": int(min_cluster_size),
|
| 144 |
+
"voice_max_groups": int(voice_max_groups),
|
| 145 |
"voice_min_cluster_size": int(voice_min_cluster_size),
|
| 146 |
"max_frames": int(max_frames),
|
| 147 |
"created_at": datetime.now().isoformat(),
|
|
|
|
| 217 |
|
| 218 |
video_path = job["video_path"]
|
| 219 |
video_name = job["video_name"]
|
| 220 |
+
max_groups = int(job.get("max_groups", 5))
|
| 221 |
+
min_cluster_size = int(job.get("min_cluster_size", 3))
|
| 222 |
+
v_max_groups = int(job.get("voice_max_groups", 5))
|
| 223 |
+
v_min_cluster = int(job.get("voice_min_cluster_size", 3))
|
| 224 |
|
| 225 |
# Crear estructura de carpetas
|
| 226 |
base = TEMP_ROOT / video_name
|
|
|
|
| 372 |
print(f"[{job_id}] ✓ Frames con caras: {frames_with_faces}")
|
| 373 |
print(f"[{job_id}] ✓ Caras detectadas (embeddings): {len(embeddings)}")
|
| 374 |
|
| 375 |
+
# Clustering jerárquico de caras
|
|
|
|
| 376 |
if embeddings:
|
| 377 |
Xf = np.array(embeddings)
|
| 378 |
+
labels = hierarchical_cluster_with_min_size(Xf, max_groups, min_cluster_size).tolist()
|
| 379 |
+
print(f"[{job_id}] Clustering jerárquico de caras: {len(set([l for l in labels if l >= 0]))} clusters")
|
|
|
|
| 380 |
else:
|
| 381 |
labels = []
|
| 382 |
|
|
|
|
| 514 |
except Exception as _efb:
|
| 515 |
print(f"[{job_id}] WARN - Audio minimal fallback failed: {_efb}")
|
| 516 |
|
| 517 |
+
# Clustering jerárquico de voces sobre embeddings válidos
|
|
|
|
| 518 |
import numpy as np
|
| 519 |
voice_embeddings = [seg.get("voice_embedding") for seg in audio_segments if seg.get("voice_embedding")]
|
| 520 |
if voice_embeddings:
|
| 521 |
try:
|
| 522 |
Xv = np.array(voice_embeddings)
|
| 523 |
+
v_labels = hierarchical_cluster_with_min_size(Xv, v_max_groups, v_min_cluster).tolist()
|
| 524 |
+
print(f"[{job_id}] Clustering jerárquico de voz: {len(set([l for l in v_labels if l >= 0]))} clusters")
|
|
|
|
| 525 |
except Exception as _e:
|
| 526 |
print(f"[{job_id}] WARN - Voice clustering failed: {_e}")
|
| 527 |
v_labels = []
|
|
|
|
| 726 |
@app.post("/detect_scenes")
|
| 727 |
async def detect_scenes(
|
| 728 |
video: UploadFile = File(...),
|
| 729 |
+
max_groups: int = Form(5),
|
| 730 |
+
min_cluster_size: int = Form(3),
|
| 731 |
frame_interval_sec: float = Form(0.5),
|
| 732 |
):
|
| 733 |
"""
|
| 734 |
+
Detecta clústers d'escenes mitjançant clustering jeràrquic d'histogrames de color.
|
| 735 |
Retorna una llista de scene_clusters estructurada de forma similar a characters.
|
| 736 |
"""
|
| 737 |
import cv2
|
| 738 |
import numpy as np
|
|
|
|
| 739 |
|
| 740 |
# Guardar el vídeo temporalment
|
| 741 |
video_name = Path(video.filename).stem
|
|
|
|
| 779 |
return {"scene_clusters": []}
|
| 780 |
|
| 781 |
X = np.array(frames)
|
| 782 |
+
labels = hierarchical_cluster_with_min_size(X, max_groups, min_cluster_size).tolist()
|
| 783 |
+
print(f"Scene clustering jeràrquic: {len(set([l for l in labels if l >= 0]))} clusters")
|
| 784 |
|
| 785 |
# Agrupar per etiqueta (>=0)
|
| 786 |
clusters = {}
|
character_detection.py
CHANGED
|
@@ -3,7 +3,7 @@ Character Detection Module
|
|
| 3 |
Integra el trabajo de Ana para detección de personajes mediante:
|
| 4 |
1. Extracción de caras y embeddings
|
| 5 |
2. Extracción de voces y embeddings
|
| 6 |
-
3. Clustering
|
| 7 |
4. Generación de carpetas por personaje
|
| 8 |
"""
|
| 9 |
import cv2
|
|
@@ -12,8 +12,9 @@ import json
|
|
| 12 |
import logging
|
| 13 |
import shutil
|
| 14 |
from pathlib import Path
|
| 15 |
-
from sklearn.cluster import DBSCAN
|
| 16 |
import numpy as np
|
|
|
|
|
|
|
| 17 |
from typing import List, Dict, Any, Tuple
|
| 18 |
|
| 19 |
# Imports de las herramientas de vision y audio desde los módulos de la raíz
|
|
@@ -171,30 +172,43 @@ class CharacterDetector:
|
|
| 171 |
logger.info("Extracción de escenas deshabilitada temporalmente")
|
| 172 |
return []
|
| 173 |
|
| 174 |
-
def cluster_faces(self, embeddings_caras: List[Dict],
|
| 175 |
"""
|
| 176 |
-
Agrupa caras similares usando
|
| 177 |
-
Basado en get_face_clusters de Ana.
|
| 178 |
|
| 179 |
Args:
|
| 180 |
embeddings_caras: Lista de embeddings de caras
|
| 181 |
-
|
| 182 |
-
min_samples:
|
| 183 |
|
| 184 |
Returns:
|
| 185 |
-
Array de labels (cluster asignado a cada cara)
|
| 186 |
"""
|
| 187 |
if not embeddings_caras:
|
| 188 |
return np.array([])
|
| 189 |
|
| 190 |
-
logger.info(f"Clustering {len(embeddings_caras)} caras con
|
| 191 |
|
| 192 |
# Extraer solo los embeddings
|
| 193 |
X = np.array([cara['embeddings'] for cara in embeddings_caras])
|
| 194 |
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 198 |
|
| 199 |
# Contar clusters (excluyendo ruido -1)
|
| 200 |
n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
|
|
@@ -289,13 +303,13 @@ class CharacterDetector:
|
|
| 289 |
|
| 290 |
return analysis_path
|
| 291 |
|
| 292 |
-
def detect_characters(self,
|
| 293 |
*, start_offset_sec: float = 3.0, extract_every_sec: float = 0.5) -> Tuple[List[Dict], Path, np.ndarray, List[Dict[str, Any]]]:
|
| 294 |
"""
|
| 295 |
-
Pipeline completo de detección de personajes.
|
| 296 |
|
| 297 |
Args:
|
| 298 |
-
|
| 299 |
min_cluster_size: Tamaño mínimo de cluster
|
| 300 |
|
| 301 |
Returns:
|
|
@@ -314,7 +328,7 @@ class CharacterDetector:
|
|
| 314 |
analysis_path = self.save_analysis_json(embeddings_caras, embeddings_voices, embeddings_escenas)
|
| 315 |
|
| 316 |
# 5. Clustering de caras
|
| 317 |
-
labels = self.cluster_faces(embeddings_caras,
|
| 318 |
|
| 319 |
# 6. Crear carpetas de personajes
|
| 320 |
characters = self.create_character_folders(embeddings_caras, labels)
|
|
@@ -324,16 +338,16 @@ class CharacterDetector:
|
|
| 324 |
|
| 325 |
# Función de conveniencia para usar en el API
|
| 326 |
def detect_characters_from_video(video_path: str, output_base: str,
|
| 327 |
-
|
| 328 |
video_name: str = None,
|
| 329 |
*, start_offset_sec: float = 3.0, extract_every_sec: float = 0.5) -> Dict[str, Any]:
|
| 330 |
"""
|
| 331 |
-
Función de alto nivel para detectar personajes en un vídeo.
|
| 332 |
|
| 333 |
Args:
|
| 334 |
video_path: Ruta al vídeo
|
| 335 |
output_base: Directorio base para guardar resultados
|
| 336 |
-
|
| 337 |
min_cluster_size: Tamaño mínimo de cluster
|
| 338 |
video_name: Nombre del vídeo (para construir URLs)
|
| 339 |
|
|
@@ -341,7 +355,7 @@ def detect_characters_from_video(video_path: str, output_base: str,
|
|
| 341 |
Dict con resultados: {"characters": [...], "analysis_path": "..."}
|
| 342 |
"""
|
| 343 |
detector = CharacterDetector(video_path, Path(output_base), video_name=video_name)
|
| 344 |
-
characters, analysis_path, labels, embeddings_caras = detector.detect_characters(
|
| 345 |
start_offset_sec=start_offset_sec,
|
| 346 |
extract_every_sec=extract_every_sec)
|
| 347 |
|
|
|
|
| 3 |
Integra el trabajo de Ana para detección de personajes mediante:
|
| 4 |
1. Extracción de caras y embeddings
|
| 5 |
2. Extracción de voces y embeddings
|
| 6 |
+
3. Clustering jerárquico aglomerativo
|
| 7 |
4. Generación de carpetas por personaje
|
| 8 |
"""
|
| 9 |
import cv2
|
|
|
|
| 12 |
import logging
|
| 13 |
import shutil
|
| 14 |
from pathlib import Path
|
|
|
|
| 15 |
import numpy as np
|
| 16 |
+
from scipy.cluster.hierarchy import linkage, fcluster
|
| 17 |
+
from collections import Counter
|
| 18 |
from typing import List, Dict, Any, Tuple
|
| 19 |
|
| 20 |
# Imports de las herramientas de vision y audio desde los módulos de la raíz
|
|
|
|
| 172 |
logger.info("Extracción de escenas deshabilitada temporalmente")
|
| 173 |
return []
|
| 174 |
|
| 175 |
+
def cluster_faces(self, embeddings_caras: List[Dict], max_groups: int, min_samples: int) -> np.ndarray:
|
| 176 |
"""
|
| 177 |
+
Agrupa caras similares usando clustering jerárquico aglomerativo.
|
|
|
|
| 178 |
|
| 179 |
Args:
|
| 180 |
embeddings_caras: Lista de embeddings de caras
|
| 181 |
+
max_groups: Número máximo de clusters a formar
|
| 182 |
+
min_samples: Tamaño mínimo de cluster válido
|
| 183 |
|
| 184 |
Returns:
|
| 185 |
+
Array de labels (cluster asignado a cada cara, -1 para ruido)
|
| 186 |
"""
|
| 187 |
if not embeddings_caras:
|
| 188 |
return np.array([])
|
| 189 |
|
| 190 |
+
logger.info(f"Clustering {len(embeddings_caras)} caras con max_groups={max_groups}, min_samples={min_samples}")
|
| 191 |
|
| 192 |
# Extraer solo los embeddings
|
| 193 |
X = np.array([cara['embeddings'] for cara in embeddings_caras])
|
| 194 |
|
| 195 |
+
if len(X) < min_samples:
|
| 196 |
+
# Si hay menos muestras que el mínimo, todo es ruido
|
| 197 |
+
return np.full(len(X), -1, dtype=int)
|
| 198 |
+
|
| 199 |
+
# Clustering jerárquico con método ward
|
| 200 |
+
Z = linkage(X, method='ward', metric='euclidean')
|
| 201 |
+
labels = fcluster(Z, t=max_groups, criterion='maxclust') - 1 # 0-indexed
|
| 202 |
+
|
| 203 |
+
# Filtrar clusters pequeños
|
| 204 |
+
label_counts = Counter(labels)
|
| 205 |
+
filtered_labels = []
|
| 206 |
+
for lbl in labels:
|
| 207 |
+
if label_counts[lbl] >= min_samples:
|
| 208 |
+
filtered_labels.append(lbl)
|
| 209 |
+
else:
|
| 210 |
+
filtered_labels.append(-1)
|
| 211 |
+
labels = np.array(filtered_labels, dtype=int)
|
| 212 |
|
| 213 |
# Contar clusters (excluyendo ruido -1)
|
| 214 |
n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
|
|
|
|
| 303 |
|
| 304 |
return analysis_path
|
| 305 |
|
| 306 |
+
def detect_characters(self, max_groups: int = 5, min_cluster_size: int = 3,
|
| 307 |
*, start_offset_sec: float = 3.0, extract_every_sec: float = 0.5) -> Tuple[List[Dict], Path, np.ndarray, List[Dict[str, Any]]]:
|
| 308 |
"""
|
| 309 |
+
Pipeline completo de detección de personajes con clustering jerárquico.
|
| 310 |
|
| 311 |
Args:
|
| 312 |
+
max_groups: Número máximo de clusters a formar
|
| 313 |
min_cluster_size: Tamaño mínimo de cluster
|
| 314 |
|
| 315 |
Returns:
|
|
|
|
| 328 |
analysis_path = self.save_analysis_json(embeddings_caras, embeddings_voices, embeddings_escenas)
|
| 329 |
|
| 330 |
# 5. Clustering de caras
|
| 331 |
+
labels = self.cluster_faces(embeddings_caras, max_groups, min_cluster_size)
|
| 332 |
|
| 333 |
# 6. Crear carpetas de personajes
|
| 334 |
characters = self.create_character_folders(embeddings_caras, labels)
|
|
|
|
| 338 |
|
| 339 |
# Función de conveniencia para usar en el API
|
| 340 |
def detect_characters_from_video(video_path: str, output_base: str,
|
| 341 |
+
max_groups: int = 5, min_cluster_size: int = 3,
|
| 342 |
video_name: str = None,
|
| 343 |
*, start_offset_sec: float = 3.0, extract_every_sec: float = 0.5) -> Dict[str, Any]:
|
| 344 |
"""
|
| 345 |
+
Función de alto nivel para detectar personajes en un vídeo usando clustering jerárquico.
|
| 346 |
|
| 347 |
Args:
|
| 348 |
video_path: Ruta al vídeo
|
| 349 |
output_base: Directorio base para guardar resultados
|
| 350 |
+
max_groups: Número máximo de clusters a formar
|
| 351 |
min_cluster_size: Tamaño mínimo de cluster
|
| 352 |
video_name: Nombre del vídeo (para construir URLs)
|
| 353 |
|
|
|
|
| 355 |
Dict con resultados: {"characters": [...], "analysis_path": "..."}
|
| 356 |
"""
|
| 357 |
detector = CharacterDetector(video_path, Path(output_base), video_name=video_name)
|
| 358 |
+
characters, analysis_path, labels, embeddings_caras = detector.detect_characters(max_groups, min_cluster_size,
|
| 359 |
start_offset_sec=start_offset_sec,
|
| 360 |
extract_every_sec=extract_every_sec)
|
| 361 |
|