Upload hf_collect_teacher_metadata.py with huggingface_hub
Browse files- hf_collect_teacher_metadata.py +846 -0
hf_collect_teacher_metadata.py
ADDED
|
@@ -0,0 +1,846 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
HuggingFace Jobs script: Collect teacher outputs with metadata tracking.
|
| 4 |
+
|
| 5 |
+
Saves for each image:
|
| 6 |
+
- Full SAM 3D Body outputs (.npz)
|
| 7 |
+
- Metadata: num_humans, image_width, image_height, processing_time
|
| 8 |
+
"""
|
| 9 |
+
import argparse
|
| 10 |
+
import os
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
import warnings
|
| 13 |
+
warnings.filterwarnings('ignore')
|
| 14 |
+
import logging
|
| 15 |
+
import sys
|
| 16 |
+
|
| 17 |
+
# Configure logging to stdout (so HF Jobs can capture it)
|
| 18 |
+
logging.basicConfig(
|
| 19 |
+
level=logging.INFO,
|
| 20 |
+
format='[%(asctime)s] %(levelname)s: %(message)s',
|
| 21 |
+
datefmt='%Y-%m-%d %H:%M:%S',
|
| 22 |
+
stream=sys.stdout,
|
| 23 |
+
force=True
|
| 24 |
+
)
|
| 25 |
+
logger = logging.getLogger(__name__)
|
| 26 |
+
|
| 27 |
+
# Also flush stdout immediately
|
| 28 |
+
sys.stdout.reconfigure(line_buffering=True) if hasattr(sys.stdout, 'reconfigure') else None
|
| 29 |
+
|
| 30 |
+
import numpy as np
|
| 31 |
+
import torch
|
| 32 |
+
from datasets import load_dataset, Dataset as HFDataset, Features, Value
|
| 33 |
+
from PIL import Image
|
| 34 |
+
import cv2
|
| 35 |
+
from typing import List, Dict, Optional
|
| 36 |
+
import time
|
| 37 |
+
import json
|
| 38 |
+
from collections import defaultdict
|
| 39 |
+
from transformers import AutoImageProcessor, AutoModelForImageClassification
|
| 40 |
+
import subprocess
|
| 41 |
+
|
| 42 |
+
# SAM 3D Body imports
|
| 43 |
+
import sys
|
| 44 |
+
sam_repo = Path(__file__).parent.parent / "sam-3d-body"
|
| 45 |
+
if str(sam_repo) not in sys.path:
|
| 46 |
+
sys.path.insert(0, str(sam_repo))
|
| 47 |
+
from sam_3d_body import load_sam_3d_body, SAM3DBodyEstimator
|
| 48 |
+
|
| 49 |
+
# Set environment variable
|
| 50 |
+
os.environ['PYOPENGL_PLATFORM'] = 'osmesa'
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class GazeEstimator:
|
| 54 |
+
"""Gaze estimation using L2CS-Net"""
|
| 55 |
+
|
| 56 |
+
def __init__(self, device='cuda'):
|
| 57 |
+
self.device = device
|
| 58 |
+
logger.info("Installing L2CS-Net...")
|
| 59 |
+
# Install L2CS-Net package
|
| 60 |
+
try:
|
| 61 |
+
subprocess.run(
|
| 62 |
+
['pip', 'install', '-q', 'git+https://github.com/edavalosanaya/L2CS-Net.git@main'],
|
| 63 |
+
check=True,
|
| 64 |
+
capture_output=True
|
| 65 |
+
)
|
| 66 |
+
logger.info("✓ L2CS-Net installed")
|
| 67 |
+
except Exception as e:
|
| 68 |
+
print(f"Warning: L2CS-Net installation failed: {e}")
|
| 69 |
+
|
| 70 |
+
print("Loading L2CS-Net gaze estimator...")
|
| 71 |
+
try:
|
| 72 |
+
from l2cs import Pipeline
|
| 73 |
+
# Use Gaze360 pretrained weights (better for unconstrained images)
|
| 74 |
+
self.pipeline = Pipeline(
|
| 75 |
+
weights='L2CSNet_gaze360.pkl', # Will download automatically
|
| 76 |
+
arch='ResNet50',
|
| 77 |
+
device=device
|
| 78 |
+
)
|
| 79 |
+
self.enabled = True
|
| 80 |
+
print("✓ L2CS-Net gaze estimator loaded")
|
| 81 |
+
except Exception as e:
|
| 82 |
+
print(f"Warning: Could not load L2CS-Net: {e}")
|
| 83 |
+
print("Gaze estimation will be disabled")
|
| 84 |
+
self.enabled = False
|
| 85 |
+
|
| 86 |
+
def estimate_gaze(self, image_pil, bbox):
|
| 87 |
+
"""
|
| 88 |
+
Estimate gaze direction from face crop.
|
| 89 |
+
|
| 90 |
+
Args:
|
| 91 |
+
image_pil: PIL Image
|
| 92 |
+
bbox: [x1, y1, x2, y2] in pixel coordinates
|
| 93 |
+
|
| 94 |
+
Returns:
|
| 95 |
+
dict with 'pitch' and 'yaw' in degrees, or None if failed
|
| 96 |
+
"""
|
| 97 |
+
if not self.enabled:
|
| 98 |
+
return None
|
| 99 |
+
|
| 100 |
+
try:
|
| 101 |
+
# Convert PIL to numpy
|
| 102 |
+
image_np = np.array(image_pil)
|
| 103 |
+
|
| 104 |
+
# L2CS expects BGR format
|
| 105 |
+
if image_np.shape[2] == 3:
|
| 106 |
+
image_bgr = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR)
|
| 107 |
+
else:
|
| 108 |
+
image_bgr = image_np
|
| 109 |
+
|
| 110 |
+
# Run gaze estimation
|
| 111 |
+
results = self.pipeline.step(image_bgr)
|
| 112 |
+
|
| 113 |
+
if results and len(results) > 0:
|
| 114 |
+
# Find detection closest to our bbox
|
| 115 |
+
x1, y1, x2, y2 = bbox
|
| 116 |
+
bbox_center = np.array([(x1 + x2) / 2, (y1 + y2) / 2])
|
| 117 |
+
|
| 118 |
+
best_result = None
|
| 119 |
+
min_dist = float('inf')
|
| 120 |
+
|
| 121 |
+
for result in results:
|
| 122 |
+
# L2CS returns face bbox in result
|
| 123 |
+
face_bbox = result.get('bbox', None)
|
| 124 |
+
if face_bbox is not None:
|
| 125 |
+
fx1, fy1, fx2, fy2 = face_bbox
|
| 126 |
+
face_center = np.array([(fx1 + fx2) / 2, (fy1 + fy2) / 2])
|
| 127 |
+
dist = np.linalg.norm(bbox_center - face_center)
|
| 128 |
+
if dist < min_dist:
|
| 129 |
+
min_dist = dist
|
| 130 |
+
best_result = result
|
| 131 |
+
|
| 132 |
+
if best_result is not None:
|
| 133 |
+
# Extract pitch and yaw (in degrees)
|
| 134 |
+
pitch = float(best_result.get('pitch', 0))
|
| 135 |
+
yaw = float(best_result.get('yaw', 0))
|
| 136 |
+
return {'pitch': pitch, 'yaw': yaw}
|
| 137 |
+
|
| 138 |
+
return None
|
| 139 |
+
|
| 140 |
+
except Exception as e:
|
| 141 |
+
print(f"Gaze estimation error: {e}")
|
| 142 |
+
return None
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
class FaceEmbedder:
|
| 146 |
+
"""Face embedding extraction using InsightFace ArcFace (ResNet100-IR)"""
|
| 147 |
+
|
| 148 |
+
def __init__(self, device='cuda'):
|
| 149 |
+
self.device = device
|
| 150 |
+
print("Installing InsightFace...")
|
| 151 |
+
# Install InsightFace package
|
| 152 |
+
try:
|
| 153 |
+
subprocess.run(
|
| 154 |
+
['pip', 'install', '-q', 'insightface', 'onnxruntime-gpu' if device.type == 'cuda' else 'onnxruntime'],
|
| 155 |
+
check=True,
|
| 156 |
+
capture_output=True
|
| 157 |
+
)
|
| 158 |
+
print("✓ InsightFace installed")
|
| 159 |
+
except Exception as e:
|
| 160 |
+
print(f"Warning: InsightFace installation failed: {e}")
|
| 161 |
+
|
| 162 |
+
print("Loading InsightFace ArcFace (ResNet100-IR)...")
|
| 163 |
+
try:
|
| 164 |
+
import insightface
|
| 165 |
+
from insightface.app import FaceAnalysis
|
| 166 |
+
|
| 167 |
+
# Initialize with ArcFace ResNet100 model
|
| 168 |
+
# det_size=(640, 640) for better detection
|
| 169 |
+
self.app = FaceAnalysis(
|
| 170 |
+
name='buffalo_l', # Uses ResNet100 backbone
|
| 171 |
+
providers=['CUDAExecutionProvider'] if device.type == 'cuda' else ['CPUExecutionProvider']
|
| 172 |
+
)
|
| 173 |
+
self.app.prepare(ctx_id=0 if device.type == 'cuda' else -1, det_size=(640, 640))
|
| 174 |
+
self.enabled = True
|
| 175 |
+
print("✓ InsightFace ArcFace loaded (ResNet100-IR)")
|
| 176 |
+
print(" Model: buffalo_l (ResNet100 + ArcFace head)")
|
| 177 |
+
print(" Robust to: occlusion, lighting, blur, pose variations, aging")
|
| 178 |
+
except Exception as e:
|
| 179 |
+
print(f"Warning: Could not load InsightFace: {e}")
|
| 180 |
+
print("Face embeddings will be disabled")
|
| 181 |
+
self.enabled = False
|
| 182 |
+
|
| 183 |
+
def extract_embedding(self, image_pil, bbox=None, keypoints_2d=None):
|
| 184 |
+
"""
|
| 185 |
+
Extract 512-dimensional ArcFace embedding from face.
|
| 186 |
+
|
| 187 |
+
Args:
|
| 188 |
+
image_pil: PIL Image
|
| 189 |
+
bbox: [x1, y1, x2, y2] in pixel coordinates (optional, for cropping)
|
| 190 |
+
keypoints_2d: Face keypoints for alignment (optional)
|
| 191 |
+
|
| 192 |
+
Returns:
|
| 193 |
+
dict with 'embedding' (512-dim vector), 'det_score' (confidence), or None if failed
|
| 194 |
+
"""
|
| 195 |
+
if not self.enabled:
|
| 196 |
+
return None
|
| 197 |
+
|
| 198 |
+
try:
|
| 199 |
+
# Convert PIL to numpy BGR (InsightFace expects BGR)
|
| 200 |
+
image_np = np.array(image_pil)
|
| 201 |
+
if image_np.shape[2] == 3:
|
| 202 |
+
image_bgr = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR)
|
| 203 |
+
else:
|
| 204 |
+
image_bgr = image_np
|
| 205 |
+
|
| 206 |
+
# Optionally crop to bbox region for efficiency
|
| 207 |
+
if bbox is not None:
|
| 208 |
+
x1, y1, x2, y2 = map(int, bbox)
|
| 209 |
+
# Add padding for better detection
|
| 210 |
+
pad = 20
|
| 211 |
+
h, w = image_bgr.shape[:2]
|
| 212 |
+
x1 = max(0, x1 - pad)
|
| 213 |
+
y1 = max(0, y1 - pad)
|
| 214 |
+
x2 = min(w, x2 + pad)
|
| 215 |
+
y2 = min(h, y2 + pad)
|
| 216 |
+
image_bgr = image_bgr[y1:y2, x1:x2]
|
| 217 |
+
|
| 218 |
+
# Detect and extract faces
|
| 219 |
+
faces = self.app.get(image_bgr)
|
| 220 |
+
|
| 221 |
+
if len(faces) == 0:
|
| 222 |
+
return None
|
| 223 |
+
|
| 224 |
+
# Use the largest/most confident face
|
| 225 |
+
face = max(faces, key=lambda x: x.det_score)
|
| 226 |
+
|
| 227 |
+
# Extract embedding (512-dim ArcFace feature)
|
| 228 |
+
embedding = face.embedding # numpy array, shape (512,)
|
| 229 |
+
det_score = float(face.det_score)
|
| 230 |
+
|
| 231 |
+
# Normalize embedding (L2 norm = 1)
|
| 232 |
+
embedding_norm = embedding / np.linalg.norm(embedding)
|
| 233 |
+
|
| 234 |
+
return {
|
| 235 |
+
'embedding': embedding_norm.astype(np.float32).tolist(),
|
| 236 |
+
'det_score': det_score,
|
| 237 |
+
'embedding_dim': len(embedding)
|
| 238 |
+
}
|
| 239 |
+
|
| 240 |
+
except Exception as e:
|
| 241 |
+
print(f"Face embedding extraction error: {e}")
|
| 242 |
+
return None
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
class NSFWClassifier:
|
| 246 |
+
"""NSFW classification using EraX-NSFW-V1.0 YOLO model"""
|
| 247 |
+
|
| 248 |
+
def __init__(self, device='cuda'):
|
| 249 |
+
self.device = device
|
| 250 |
+
print("Loading EraX-NSFW YOLO model...")
|
| 251 |
+
try:
|
| 252 |
+
# Download the model if not already downloaded
|
| 253 |
+
from huggingface_hub import snapshot_download
|
| 254 |
+
snapshot_download(repo_id="erax-ai/EraX-NSFW-V1.0", local_dir="./", force_download=False)
|
| 255 |
+
|
| 256 |
+
from ultralytics import YOLO
|
| 257 |
+
# Use the m model for better accuracy
|
| 258 |
+
self.model = YOLO('erax_nsfw_yolo11m.pt')
|
| 259 |
+
self.enabled = True
|
| 260 |
+
print("✓ EraX-NSFW classifier loaded (YOLO11m)")
|
| 261 |
+
except Exception as e:
|
| 262 |
+
print(f"Warning: Could not load EraX-NSFW: {e}")
|
| 263 |
+
print("NSFW classification will be disabled")
|
| 264 |
+
self.enabled = False
|
| 265 |
+
|
| 266 |
+
def classify_crop(self, image_pil, bbox):
|
| 267 |
+
"""
|
| 268 |
+
Classify NSFW content in a crop defined by bbox.
|
| 269 |
+
|
| 270 |
+
Args:
|
| 271 |
+
image_pil: PIL Image
|
| 272 |
+
bbox: [x1, y1, x2, y2] in pixel coordinates
|
| 273 |
+
|
| 274 |
+
Returns:
|
| 275 |
+
dict with class scores, or None if failed
|
| 276 |
+
"""
|
| 277 |
+
if not self.enabled:
|
| 278 |
+
return None
|
| 279 |
+
|
| 280 |
+
try:
|
| 281 |
+
# Convert bbox to ultralytics format [x1, y1, x2, y2]
|
| 282 |
+
x1, y1, x2, y2 = bbox
|
| 283 |
+
|
| 284 |
+
# Crop the image
|
| 285 |
+
crop = image_pil.crop((x1, y1, x2, y2))
|
| 286 |
+
|
| 287 |
+
# Convert PIL to numpy for ultralytics
|
| 288 |
+
crop_np = np.array(crop)
|
| 289 |
+
|
| 290 |
+
# Run inference with confidence and IoU thresholds
|
| 291 |
+
results = self.model(crop_np, conf=0.2, iou=0.3, verbose=False)
|
| 292 |
+
|
| 293 |
+
detections = []
|
| 294 |
+
if len(results) > 0 and len(results[0].boxes) > 0:
|
| 295 |
+
boxes = results[0].boxes
|
| 296 |
+
for box in boxes:
|
| 297 |
+
class_id = int(box.cls.item())
|
| 298 |
+
confidence = box.conf.item()
|
| 299 |
+
|
| 300 |
+
# Model classes: ['anus', 'make_love', 'nipple', 'penis', 'vagina']
|
| 301 |
+
class_names = ['anus', 'make_love', 'nipple', 'penis', 'vagina']
|
| 302 |
+
class_name = class_names[class_id] if class_id < len(class_names) else f'class_{class_id}'
|
| 303 |
+
|
| 304 |
+
# Get bbox relative to crop and convert to absolute coordinates
|
| 305 |
+
dx1, dy1, dx2, dy2 = box.xyxy[0].tolist()
|
| 306 |
+
abs_bbox = [x1 + dx1, y1 + dy1, x1 + dx2, y1 + dy2]
|
| 307 |
+
|
| 308 |
+
detections.append({
|
| 309 |
+
'class': class_name,
|
| 310 |
+
'confidence': confidence,
|
| 311 |
+
'bbox': abs_bbox
|
| 312 |
+
})
|
| 313 |
+
|
| 314 |
+
if detections:
|
| 315 |
+
return detections
|
| 316 |
+
else:
|
| 317 |
+
# No detections - consider safe
|
| 318 |
+
return [{'class': 'safe', 'confidence': 1.0, 'bbox': [x1, y1, x2, y2]}]
|
| 319 |
+
|
| 320 |
+
except Exception as e:
|
| 321 |
+
print(f"! NSFW classification failed: {e}")
|
| 322 |
+
return None
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
def compute_face_orientation(vertices, keypoints_3d):
|
| 326 |
+
"""
|
| 327 |
+
Compute face orientation vector from 3D mesh vertices and keypoints.
|
| 328 |
+
Uses nose→head_top vector as face direction.
|
| 329 |
+
|
| 330 |
+
Args:
|
| 331 |
+
vertices: (N, 3) array of 3D vertices
|
| 332 |
+
keypoints_3d: (70, 3) array of 3D keypoints (MHR70 format)
|
| 333 |
+
|
| 334 |
+
Returns:
|
| 335 |
+
(3,) normalized face orientation vector [x, y, z] or None
|
| 336 |
+
"""
|
| 337 |
+
if vertices is None or keypoints_3d is None:
|
| 338 |
+
return None
|
| 339 |
+
|
| 340 |
+
try:
|
| 341 |
+
# MHR70 keypoint indices (from sam-3d-body/sam_3d_body/metadata/mhr70.py)
|
| 342 |
+
# 0: nose, 1: left-eye, 2: right-eye
|
| 343 |
+
# Check if face keypoints are valid (not all zeros)
|
| 344 |
+
nose_3d = keypoints_3d[0]
|
| 345 |
+
left_eye_3d = keypoints_3d[1]
|
| 346 |
+
right_eye_3d = keypoints_3d[2]
|
| 347 |
+
|
| 348 |
+
# Verify face keypoints are valid (not at origin)
|
| 349 |
+
if (np.linalg.norm(nose_3d) < 1e-6 or
|
| 350 |
+
np.linalg.norm(left_eye_3d) < 1e-6 or
|
| 351 |
+
np.linalg.norm(right_eye_3d) < 1e-6):
|
| 352 |
+
return None # Face keypoints not detected
|
| 353 |
+
|
| 354 |
+
# Find topmost vertex as head top (highest Y coordinate in body frame)
|
| 355 |
+
head_top_idx = np.argmax(vertices[:, 1]) # Y is up in SMPL convention
|
| 356 |
+
head_top_3d = vertices[head_top_idx]
|
| 357 |
+
|
| 358 |
+
# Face orientation = nose → head_top (points upward/forward from face)
|
| 359 |
+
face_orientation = head_top_3d - nose_3d
|
| 360 |
+
|
| 361 |
+
# Normalize
|
| 362 |
+
norm = np.linalg.norm(face_orientation)
|
| 363 |
+
if norm > 1e-6:
|
| 364 |
+
face_orientation = face_orientation / norm
|
| 365 |
+
return face_orientation.astype(np.float32)
|
| 366 |
+
|
| 367 |
+
return None
|
| 368 |
+
|
| 369 |
+
except Exception as e:
|
| 370 |
+
print(f"Face orientation computation failed: {e}")
|
| 371 |
+
return None
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
def compute_bbox_from_keypoints(keypoints_2d, indices):
|
| 375 |
+
"""
|
| 376 |
+
Compute bounding box from a set of 2D keypoints.
|
| 377 |
+
|
| 378 |
+
Args:
|
| 379 |
+
keypoints_2d: (70, 2) array of 2D keypoints
|
| 380 |
+
indices: list of keypoint indices to include
|
| 381 |
+
|
| 382 |
+
Returns:
|
| 383 |
+
[x1, y1, x2, y2] or None if no valid keypoints
|
| 384 |
+
"""
|
| 385 |
+
if keypoints_2d is None or len(keypoints_2d) < max(indices) + 1:
|
| 386 |
+
return None
|
| 387 |
+
|
| 388 |
+
valid_points = []
|
| 389 |
+
for idx in indices:
|
| 390 |
+
kp = keypoints_2d[idx]
|
| 391 |
+
if kp[0] >= 0 and kp[1] >= 0: # Check if keypoint is valid (not -1, -1)
|
| 392 |
+
valid_points.append(kp)
|
| 393 |
+
|
| 394 |
+
if len(valid_points) < 2: # Need at least 2 points for a bbox
|
| 395 |
+
return None
|
| 396 |
+
|
| 397 |
+
points = np.array(valid_points)
|
| 398 |
+
x1, y1 = points.min(axis=0)
|
| 399 |
+
x2, y2 = points.max(axis=0)
|
| 400 |
+
|
| 401 |
+
# Add some padding (10% of bbox size)
|
| 402 |
+
width = x2 - x1
|
| 403 |
+
height = y2 - y1
|
| 404 |
+
padding_x = width * 0.1
|
| 405 |
+
padding_y = height * 0.1
|
| 406 |
+
|
| 407 |
+
x1 = max(0, x1 - padding_x)
|
| 408 |
+
y1 = max(0, y1 - padding_y)
|
| 409 |
+
x2 = x2 + padding_x
|
| 410 |
+
y2 = y2 + padding_y
|
| 411 |
+
|
| 412 |
+
return [float(x1), float(y1), float(x2), float(y2)]
|
| 413 |
+
|
| 414 |
+
|
| 415 |
+
def collect_for_image(estimator, nsfw_classifier, gaze_estimator, face_embedder, image_pil, image_id: str, out_dir: Path, faces: np.ndarray) -> Optional[Dict]:
|
| 416 |
+
"""
|
| 417 |
+
Process PIL image and save .npz with ALL outputs + metadata + NSFW scores per human.
|
| 418 |
+
|
| 419 |
+
Returns metadata dict or None if failed.
|
| 420 |
+
"""
|
| 421 |
+
out_path = out_dir / f"{image_id}.npz"
|
| 422 |
+
if out_path.exists():
|
| 423 |
+
return None
|
| 424 |
+
|
| 425 |
+
# Get image dimensions
|
| 426 |
+
img_width, img_height = image_pil.size
|
| 427 |
+
|
| 428 |
+
# Convert PIL to numpy array
|
| 429 |
+
image_rgb = np.array(image_pil.convert('RGB'))
|
| 430 |
+
image_bgr = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2BGR)
|
| 431 |
+
|
| 432 |
+
start_time = time.time()
|
| 433 |
+
|
| 434 |
+
try:
|
| 435 |
+
# Use process_one_image
|
| 436 |
+
outputs = estimator.process_one_image(image_bgr)
|
| 437 |
+
|
| 438 |
+
processing_time = time.time() - start_time
|
| 439 |
+
|
| 440 |
+
if not outputs:
|
| 441 |
+
# No humans detected
|
| 442 |
+
return {
|
| 443 |
+
'image_id': image_id,
|
| 444 |
+
'num_humans': 0,
|
| 445 |
+
'image_width': img_width,
|
| 446 |
+
'image_height': img_height,
|
| 447 |
+
'processing_time_ms': int(processing_time * 1000),
|
| 448 |
+
'status': 'no_detection',
|
| 449 |
+
'humans': []
|
| 450 |
+
}
|
| 451 |
+
|
| 452 |
+
num_humans = len(outputs)
|
| 453 |
+
|
| 454 |
+
# Process each detected human
|
| 455 |
+
humans_data = []
|
| 456 |
+
for human_idx, pred in enumerate(outputs):
|
| 457 |
+
# Get 3D body outputs
|
| 458 |
+
vertices = pred.get('pred_vertices')
|
| 459 |
+
cam_t = pred.get('pred_cam_t')
|
| 460 |
+
focal_length = pred.get('focal_length')
|
| 461 |
+
kpts2d = pred.get('pred_keypoints_2d')
|
| 462 |
+
kpts3d = pred.get('pred_keypoints_3d')
|
| 463 |
+
|
| 464 |
+
# Get bounding box from detection
|
| 465 |
+
bbox = pred.get('bbox', None) # [x1, y1, x2, y2]
|
| 466 |
+
|
| 467 |
+
# Check if we have valid face keypoints (nose, eyes) in the image
|
| 468 |
+
has_face = False
|
| 469 |
+
if kpts2d is not None and kpts3d is not None and len(kpts2d) >= 3 and len(kpts3d) >= 3:
|
| 470 |
+
# Get 2D projected keypoints (nose, left eye, right eye)
|
| 471 |
+
nose_2d = kpts2d[0] # [x, y]
|
| 472 |
+
left_eye_2d = kpts2d[1]
|
| 473 |
+
right_eye_2d = kpts2d[2]
|
| 474 |
+
|
| 475 |
+
# Get 3D keypoints to check they exist
|
| 476 |
+
nose_3d = kpts3d[0]
|
| 477 |
+
left_eye_3d = kpts3d[1]
|
| 478 |
+
right_eye_3d = kpts3d[2]
|
| 479 |
+
|
| 480 |
+
# Check if face keypoints are valid:
|
| 481 |
+
# 1. 3D keypoints are not at origin
|
| 482 |
+
# 2. 2D keypoints are inside image bounds
|
| 483 |
+
keypoints_valid_3d = (np.linalg.norm(nose_3d) > 1e-6 and
|
| 484 |
+
np.linalg.norm(left_eye_3d) > 1e-6 and
|
| 485 |
+
np.linalg.norm(right_eye_3d) > 1e-6)
|
| 486 |
+
|
| 487 |
+
keypoints_in_image = True
|
| 488 |
+
if keypoints_valid_3d:
|
| 489 |
+
# Check if face keypoints are within image bounds
|
| 490 |
+
for kp in [nose_2d, left_eye_2d, right_eye_2d]:
|
| 491 |
+
if (kp[0] < 0 or kp[0] >= img_width or
|
| 492 |
+
kp[1] < 0 or kp[1] >= img_height):
|
| 493 |
+
keypoints_in_image = False
|
| 494 |
+
break
|
| 495 |
+
|
| 496 |
+
has_face = keypoints_valid_3d and keypoints_in_image
|
| 497 |
+
|
| 498 |
+
# Compute face orientation from mesh (only if face visible in image)
|
| 499 |
+
face_orientation = None
|
| 500 |
+
if has_face:
|
| 501 |
+
face_orientation = compute_face_orientation(vertices, kpts3d)
|
| 502 |
+
|
| 503 |
+
# Estimate gaze direction (only if face visible and bbox available)
|
| 504 |
+
gaze_direction = None
|
| 505 |
+
if has_face and bbox is not None and gaze_estimator is not None:
|
| 506 |
+
try:
|
| 507 |
+
gaze_direction = gaze_estimator.estimate_gaze(image_pil, bbox)
|
| 508 |
+
except Exception as e:
|
| 509 |
+
print(f"! Gaze estimation failed for {image_id} human {human_idx}: {e}")
|
| 510 |
+
gaze_direction = None
|
| 511 |
+
|
| 512 |
+
# Extract face embedding (only if face visible)
|
| 513 |
+
face_embedding = None
|
| 514 |
+
if has_face and bbox is not None and face_embedder is not None:
|
| 515 |
+
try:
|
| 516 |
+
face_embedding = face_embedder.extract_embedding(image_pil, bbox, kpts2d)
|
| 517 |
+
except Exception as e:
|
| 518 |
+
print(f"! Face embedding extraction failed for {image_id} human {human_idx}: {e}")
|
| 519 |
+
face_embedding = None
|
| 520 |
+
|
| 521 |
+
# NSFW classification for this human
|
| 522 |
+
nsfw_scores = None
|
| 523 |
+
if bbox is not None and nsfw_classifier is not None:
|
| 524 |
+
try:
|
| 525 |
+
nsfw_scores = nsfw_classifier.classify_crop(image_pil, bbox)
|
| 526 |
+
except Exception as e:
|
| 527 |
+
print(f"! NSFW classification failed for {image_id} human {human_idx}: {e}")
|
| 528 |
+
nsfw_scores = None
|
| 529 |
+
|
| 530 |
+
# Compute hand and foot bboxes from keypoints
|
| 531 |
+
left_hand_bbox = None
|
| 532 |
+
right_hand_bbox = None
|
| 533 |
+
left_foot_bbox = None
|
| 534 |
+
right_foot_bbox = None
|
| 535 |
+
|
| 536 |
+
if kpts2d is not None:
|
| 537 |
+
# Left hand keypoints: indices 42-61 (left_thumb4 to left_pinky_finger_third_joint)
|
| 538 |
+
left_hand_indices = list(range(42, 62))
|
| 539 |
+
left_hand_bbox = compute_bbox_from_keypoints(kpts2d, left_hand_indices)
|
| 540 |
+
|
| 541 |
+
# Right hand keypoints: indices 21-40 (right_thumb4 to right_pinky_finger_third_joint)
|
| 542 |
+
right_hand_indices = list(range(21, 41))
|
| 543 |
+
right_hand_bbox = compute_bbox_from_keypoints(kpts2d, right_hand_indices)
|
| 544 |
+
|
| 545 |
+
# Left foot keypoints: indices 15-17 (left_big_toe, left_small_toe, left_heel)
|
| 546 |
+
left_foot_indices = [15, 16, 17]
|
| 547 |
+
left_foot_bbox = compute_bbox_from_keypoints(kpts2d, left_foot_indices)
|
| 548 |
+
|
| 549 |
+
# Right foot keypoints: indices 18-20 (right_big_toe, right_small_toe, right_heel)
|
| 550 |
+
right_foot_indices = [18, 19, 20]
|
| 551 |
+
right_foot_bbox = compute_bbox_from_keypoints(kpts2d, right_foot_indices)
|
| 552 |
+
|
| 553 |
+
humans_data.append({
|
| 554 |
+
'human_idx': human_idx,
|
| 555 |
+
'bbox': bbox.tolist() if bbox is not None else None,
|
| 556 |
+
'left_hand_bbox': left_hand_bbox,
|
| 557 |
+
'right_hand_bbox': right_hand_bbox,
|
| 558 |
+
'left_foot_bbox': left_foot_bbox,
|
| 559 |
+
'right_foot_bbox': right_foot_bbox,
|
| 560 |
+
'has_face': has_face,
|
| 561 |
+
'face_orientation': face_orientation.tolist() if face_orientation is not None else None,
|
| 562 |
+
'gaze_direction': gaze_direction,
|
| 563 |
+
'face_embedding': face_embedding,
|
| 564 |
+
'nsfw_scores': nsfw_scores,
|
| 565 |
+
'has_mesh': vertices is not None
|
| 566 |
+
})
|
| 567 |
+
|
| 568 |
+
# Save first detected person's mesh (or could save all in future)
|
| 569 |
+
pred = outputs[0]
|
| 570 |
+
vertices = pred.get('pred_vertices')
|
| 571 |
+
cam_t = pred.get('pred_cam_t')
|
| 572 |
+
focal_length = pred.get('focal_length')
|
| 573 |
+
kpts2d = pred.get('pred_keypoints_2d')
|
| 574 |
+
kpts3d = pred.get('pred_keypoints_3d')
|
| 575 |
+
bbox_0 = pred.get('bbox', None)
|
| 576 |
+
|
| 577 |
+
# Save to npz with all humans metadata
|
| 578 |
+
np.savez_compressed(
|
| 579 |
+
out_path,
|
| 580 |
+
# First human mesh data
|
| 581 |
+
vertices=vertices.astype(np.float32) if vertices is not None else None,
|
| 582 |
+
faces=faces.astype(np.int32),
|
| 583 |
+
cam_t=cam_t.astype(np.float32) if cam_t is not None else None,
|
| 584 |
+
focal_length=np.array([focal_length], dtype=np.float32) if focal_length is not None else None,
|
| 585 |
+
keypoints_2d=kpts2d.astype(np.float32) if kpts2d is not None else None,
|
| 586 |
+
keypoints_3d=kpts3d.astype(np.float32) if kpts3d is not None else None,
|
| 587 |
+
bbox=np.array(bbox_0, dtype=np.float32) if bbox_0 is not None else None,
|
| 588 |
+
# Image metadata
|
| 589 |
+
image_id=image_id,
|
| 590 |
+
num_humans=num_humans,
|
| 591 |
+
image_width=img_width,
|
| 592 |
+
image_height=img_height,
|
| 593 |
+
# All humans data (as JSON string in npz)
|
| 594 |
+
humans_metadata=json.dumps(humans_data)
|
| 595 |
+
)
|
| 596 |
+
|
| 597 |
+
return {
|
| 598 |
+
'image_id': image_id,
|
| 599 |
+
'num_humans': num_humans,
|
| 600 |
+
'image_width': img_width,
|
| 601 |
+
'image_height': img_height,
|
| 602 |
+
'processing_time_ms': int(processing_time * 1000),
|
| 603 |
+
'status': 'success',
|
| 604 |
+
'npz_size_bytes': out_path.stat().st_size,
|
| 605 |
+
'humans': humans_data
|
| 606 |
+
}
|
| 607 |
+
|
| 608 |
+
except Exception as e:
|
| 609 |
+
processing_time = time.time() - start_time
|
| 610 |
+
print(f"! Error on {image_id}: {e}")
|
| 611 |
+
return {
|
| 612 |
+
'image_id': image_id,
|
| 613 |
+
'num_humans': 0,
|
| 614 |
+
'image_width': img_width,
|
| 615 |
+
'image_height': img_height,
|
| 616 |
+
'processing_time_ms': int(processing_time * 1000),
|
| 617 |
+
'status': 'error',
|
| 618 |
+
'error_message': str(e),
|
| 619 |
+
'humans': []
|
| 620 |
+
}
|
| 621 |
+
|
| 622 |
+
|
| 623 |
+
def main():
|
| 624 |
+
logger.info("="*60)
|
| 625 |
+
logger.info("SAM 3D Body Metadata Collection with Face Features")
|
| 626 |
+
logger.info("="*60)
|
| 627 |
+
sys.stdout.flush()
|
| 628 |
+
|
| 629 |
+
ap = argparse.ArgumentParser()
|
| 630 |
+
ap.add_argument('--input-dataset', type=str, required=True)
|
| 631 |
+
ap.add_argument('--output-dataset', type=str, required=True)
|
| 632 |
+
ap.add_argument('--split', type=str, default='train')
|
| 633 |
+
ap.add_argument('--checkpoint', type=str, default='checkpoints/sam-3d-body-dinov3/model.ckpt')
|
| 634 |
+
ap.add_argument('--mhr-path', type=str, default='checkpoints/sam-3d-body-dinov3/assets/mhr_model.pt')
|
| 635 |
+
ap.add_argument('--limit', type=int, default=0)
|
| 636 |
+
ap.add_argument('--shard-index', type=int, default=0)
|
| 637 |
+
ap.add_argument('--num-shards', type=int, default=1)
|
| 638 |
+
args = ap.parse_args()
|
| 639 |
+
|
| 640 |
+
logger.info(f"Arguments: {vars(args)}")
|
| 641 |
+
sys.stdout.flush()
|
| 642 |
+
|
| 643 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 644 |
+
logger.info(f"Using device: {device}")
|
| 645 |
+
if torch.cuda.is_available():
|
| 646 |
+
logger.info(f" GPU: {torch.cuda.get_device_name(0)}")
|
| 647 |
+
logger.info(f" CUDA version: {torch.version.cuda}")
|
| 648 |
+
sys.stdout.flush()
|
| 649 |
+
|
| 650 |
+
# Load gaze estimator
|
| 651 |
+
logger.info("Loading gaze estimator...")
|
| 652 |
+
sys.stdout.flush()
|
| 653 |
+
gaze_estimator = GazeEstimator(device=device)
|
| 654 |
+
|
| 655 |
+
# Load face embedder (InsightFace ArcFace)
|
| 656 |
+
logger.info("Loading face embedder (InsightFace ArcFace)...")
|
| 657 |
+
sys.stdout.flush()
|
| 658 |
+
face_embedder = FaceEmbedder(device=device)
|
| 659 |
+
|
| 660 |
+
# Load NSFW classifier
|
| 661 |
+
logger.info("Loading NSFW classifier...")
|
| 662 |
+
sys.stdout.flush()
|
| 663 |
+
nsfw_classifier = NSFWClassifier(device=device)
|
| 664 |
+
|
| 665 |
+
# Load teacher
|
| 666 |
+
logger.info("Loading SAM 3D Body teacher...")
|
| 667 |
+
sys.stdout.flush()
|
| 668 |
+
start_load = time.time()
|
| 669 |
+
model, model_cfg = load_sam_3d_body(args.checkpoint, device=device, mhr_path=args.mhr_path)
|
| 670 |
+
model.eval()
|
| 671 |
+
|
| 672 |
+
teacher = SAM3DBodyEstimator(
|
| 673 |
+
sam_3d_body_model=model,
|
| 674 |
+
model_cfg=model_cfg,
|
| 675 |
+
human_detector=None,
|
| 676 |
+
human_segmentor=None,
|
| 677 |
+
fov_estimator=None,
|
| 678 |
+
)
|
| 679 |
+
logger.info(f"✓ Model loaded in {time.time() - start_load:.1f}s")
|
| 680 |
+
sys.stdout.flush()
|
| 681 |
+
|
| 682 |
+
# Load dataset
|
| 683 |
+
logger.info(f"Loading dataset {args.input_dataset}...")
|
| 684 |
+
sys.stdout.flush()
|
| 685 |
+
start_ds = time.time()
|
| 686 |
+
ds = load_dataset(args.input_dataset, split=args.split, streaming=True)
|
| 687 |
+
|
| 688 |
+
if args.num_shards > 1:
|
| 689 |
+
ds = ds.shard(num_shards=args.num_shards, index=args.shard_index)
|
| 690 |
+
logger.info(f"Using shard {args.shard_index+1}/{args.num_shards} (~{100/args.num_shards:.1f}% of dataset)")
|
| 691 |
+
|
| 692 |
+
if args.limit and args.limit > 0:
|
| 693 |
+
ds = ds.take(args.limit)
|
| 694 |
+
logger.info(f"✓ Dataset ready in {time.time() - start_ds:.1f}s")
|
| 695 |
+
sys.stdout.flush()
|
| 696 |
+
|
| 697 |
+
# Process
|
| 698 |
+
out_dir = Path('teacher_labels')
|
| 699 |
+
out_dir.mkdir(exist_ok=True)
|
| 700 |
+
|
| 701 |
+
# Get mesh faces (same for all images)
|
| 702 |
+
faces = teacher.faces
|
| 703 |
+
logger.info(f"Mesh topology: {faces.shape[0]} faces")
|
| 704 |
+
logger.info("="*60)
|
| 705 |
+
logger.info("Starting image processing...")
|
| 706 |
+
logger.info("="*60)
|
| 707 |
+
sys.stdout.flush()
|
| 708 |
+
|
| 709 |
+
processed = 0
|
| 710 |
+
failed = 0
|
| 711 |
+
no_detection = 0
|
| 712 |
+
metadata_records = []
|
| 713 |
+
start_process = time.time()
|
| 714 |
+
|
| 715 |
+
for i, sample in enumerate(ds, 1):
|
| 716 |
+
image_pil = sample['image']
|
| 717 |
+
image_id = sample.get('image_path', f'img_{i:06d}')
|
| 718 |
+
image_id = Path(image_id).stem if image_id else f'img_{i:06d}'
|
| 719 |
+
|
| 720 |
+
metadata = collect_for_image(teacher, nsfw_classifier, gaze_estimator, face_embedder, image_pil, image_id, out_dir, faces)
|
| 721 |
+
|
| 722 |
+
if metadata:
|
| 723 |
+
metadata_records.append(metadata)
|
| 724 |
+
|
| 725 |
+
if metadata['status'] == 'success':
|
| 726 |
+
processed += 1
|
| 727 |
+
elif metadata['status'] == 'no_detection':
|
| 728 |
+
no_detection += 1
|
| 729 |
+
else:
|
| 730 |
+
failed += 1
|
| 731 |
+
|
| 732 |
+
if i % 10 == 0:
|
| 733 |
+
elapsed = time.time() - start_process
|
| 734 |
+
speed = processed / elapsed if elapsed > 0 else 0
|
| 735 |
+
logger.info(f"[{i}] success={processed}, no_detect={no_detection}, failed={failed}, speed={speed:.2f} img/s")
|
| 736 |
+
sys.stdout.flush()
|
| 737 |
+
|
| 738 |
+
total_time = time.time() - start_process
|
| 739 |
+
logger.info("="*60)
|
| 740 |
+
logger.info(f"✓ Processing complete!")
|
| 741 |
+
logger.info(f" Processed: {processed} images in {total_time:.1f}s ({processed/total_time:.2f} img/s)")
|
| 742 |
+
logger.info(f" No detection: {no_detection}, Failed: {failed}")
|
| 743 |
+
logger.info("="*60)
|
| 744 |
+
sys.stdout.flush()
|
| 745 |
+
|
| 746 |
+
# Compute metadata statistics
|
| 747 |
+
if metadata_records:
|
| 748 |
+
successful = [m for m in metadata_records if m['status'] == 'success']
|
| 749 |
+
if successful:
|
| 750 |
+
total_humans = sum(m['num_humans'] for m in successful)
|
| 751 |
+
avg_humans = total_humans / len(successful)
|
| 752 |
+
avg_width = sum(m['image_width'] for m in successful) / len(successful)
|
| 753 |
+
avg_height = sum(m['image_height'] for m in successful) / len(successful)
|
| 754 |
+
avg_time = sum(m['processing_time_ms'] for m in successful) / len(successful)
|
| 755 |
+
|
| 756 |
+
# NSFW statistics
|
| 757 |
+
nsfw_stats = defaultdict(list)
|
| 758 |
+
for m in successful:
|
| 759 |
+
for human in m.get('humans', []):
|
| 760 |
+
nsfw_list = human.get('nsfw_scores', [])
|
| 761 |
+
for detection in nsfw_list:
|
| 762 |
+
label = detection['class']
|
| 763 |
+
score = detection['confidence']
|
| 764 |
+
nsfw_stats[label].append(score)
|
| 765 |
+
|
| 766 |
+
print(f"\nMetadata Statistics:")
|
| 767 |
+
print(f" Total humans detected: {total_humans}")
|
| 768 |
+
print(f" Avg humans per image: {avg_humans:.2f}")
|
| 769 |
+
print(f" Avg image size: {avg_width:.0f}x{avg_height:.0f}")
|
| 770 |
+
print(f" Avg processing time: {avg_time:.0f}ms")
|
| 771 |
+
|
| 772 |
+
if nsfw_stats:
|
| 773 |
+
print(f"\nNSFW Classification Statistics:")
|
| 774 |
+
for label, scores in nsfw_stats.items():
|
| 775 |
+
avg_score = sum(scores) / len(scores)
|
| 776 |
+
max_score = max(scores)
|
| 777 |
+
print(f" {label}: avg={avg_score:.3f}, max={max_score:.3f}, n={len(scores)}")
|
| 778 |
+
|
| 779 |
+
# Face orientation and gaze statistics
|
| 780 |
+
face_orientation_count = sum(1 for m in successful for h in m.get('humans', []) if h.get('face_orientation'))
|
| 781 |
+
gaze_count = sum(1 for m in successful for h in m.get('humans', []) if h.get('gaze_direction'))
|
| 782 |
+
face_embedding_count = sum(1 for m in successful for h in m.get('humans', []) if h.get('face_embedding'))
|
| 783 |
+
print(f"\nFace Orientation & Gaze Statistics:")
|
| 784 |
+
print(f" Face orientations computed: {face_orientation_count}/{total_humans}")
|
| 785 |
+
print(f" Gaze directions estimated: {gaze_count}/{total_humans}")
|
| 786 |
+
print(f" Face embeddings extracted: {face_embedding_count}/{total_humans}")
|
| 787 |
+
|
| 788 |
+
# Face embedding quality statistics
|
| 789 |
+
if face_embedding_count > 0:
|
| 790 |
+
det_scores = [h['face_embedding']['det_score'] for m in successful
|
| 791 |
+
for h in m.get('humans', []) if h.get('face_embedding')]
|
| 792 |
+
avg_det_score = sum(det_scores) / len(det_scores)
|
| 793 |
+
min_det_score = min(det_scores)
|
| 794 |
+
print(f"\nFace Embedding Quality (InsightFace ArcFace):")
|
| 795 |
+
print(f" Model: ResNet100-IR + ArcFace head (512-dim)")
|
| 796 |
+
print(f" Avg detection confidence: {avg_det_score:.3f}")
|
| 797 |
+
print(f" Min detection confidence: {min_det_score:.3f}")
|
| 798 |
+
|
| 799 |
+
|
| 800 |
+
# Save metadata JSON locally
|
| 801 |
+
metadata_path = Path('metadata.json')
|
| 802 |
+
with open(metadata_path, 'w') as f:
|
| 803 |
+
json.dump(metadata_records, f, indent=2)
|
| 804 |
+
print(f"Saved metadata to {metadata_path}")
|
| 805 |
+
|
| 806 |
+
# Upload labels
|
| 807 |
+
print(f"\nUploading labels to {args.output_dataset}...")
|
| 808 |
+
|
| 809 |
+
label_files = sorted(out_dir.glob('*.npz'))
|
| 810 |
+
data = {'image_id': [], 'label_data': []}
|
| 811 |
+
|
| 812 |
+
for npz_path in label_files:
|
| 813 |
+
data['image_id'].append(npz_path.stem)
|
| 814 |
+
with open(npz_path, 'rb') as f:
|
| 815 |
+
data['label_data'].append(f.read())
|
| 816 |
+
|
| 817 |
+
features = Features({
|
| 818 |
+
'image_id': Value('string'),
|
| 819 |
+
'label_data': Value('binary'),
|
| 820 |
+
})
|
| 821 |
+
|
| 822 |
+
label_ds = HFDataset.from_dict(data, features=features)
|
| 823 |
+
label_ds.push_to_hub(
|
| 824 |
+
args.output_dataset,
|
| 825 |
+
split=args.split,
|
| 826 |
+
token=os.environ.get('HF_TOKEN'),
|
| 827 |
+
private=True,
|
| 828 |
+
)
|
| 829 |
+
logger.info(f"✓ Uploaded {len(label_files)} labels to {args.output_dataset}")
|
| 830 |
+
sys.stdout.flush()
|
| 831 |
+
|
| 832 |
+
# Upload metadata JSON
|
| 833 |
+
from huggingface_hub import HfApi
|
| 834 |
+
api = HfApi(token=os.environ.get('HF_TOKEN'))
|
| 835 |
+
api.upload_file(
|
| 836 |
+
path_or_fileobj=str(metadata_path),
|
| 837 |
+
path_in_repo=f'metadata_shard{args.shard_index}.json',
|
| 838 |
+
repo_id=args.output_dataset,
|
| 839 |
+
repo_type='dataset'
|
| 840 |
+
)
|
| 841 |
+
logger.info(f"✓ Uploaded metadata to {args.output_dataset}/metadata_shard{args.shard_index}.json")
|
| 842 |
+
sys.stdout.flush()
|
| 843 |
+
|
| 844 |
+
|
| 845 |
+
if __name__ == '__main__':
|
| 846 |
+
main()
|