vlordier commited on
Commit
cf992fe
·
verified ·
1 Parent(s): 793169b

Upload hf_collect_teacher_metadata.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. hf_collect_teacher_metadata.py +61 -54
hf_collect_teacher_metadata.py CHANGED
@@ -35,6 +35,8 @@ import cv2
35
  from typing import List, Dict, Optional
36
  import time
37
  import functools
 
 
38
 
39
  # SAM 3D Body imports
40
  import sys
@@ -80,63 +82,58 @@ class GazeEstimator:
80
  print("Gaze estimation will be disabled")
81
  self.enabled = False
82
 
83
- def estimate_gaze(self, image_pil, bbox):
84
  """
85
- Estimate gaze direction from face crop.
86
 
87
  Args:
88
- image_pil: PIL Image
89
- bbox: [x1, y1, x2, y2] in pixel coordinates
90
-
91
- Returns:
92
- dict with 'pitch' and 'yaw' in degrees, or None if failed
93
  """
94
  if not self.enabled:
95
  return None
96
 
97
  try:
98
- # Convert PIL to numpy
99
- image_np = np.array(image_pil)
100
-
101
- # L2CS expects BGR format
102
- if image_np.shape[2] == 3:
103
- image_bgr = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR)
104
- else:
105
- image_bgr = image_np
106
-
107
- # Run gaze estimation
108
- results = self.pipeline.step(image_bgr)
109
-
110
- if results and len(results) > 0:
111
- # Find detection closest to our bbox
112
- x1, y1, x2, y2 = bbox
113
- bbox_center = np.array([(x1 + x2) / 2, (y1 + y2) / 2])
114
-
115
- best_result = None
116
- min_dist = float('inf')
117
-
118
- for result in results:
119
- # L2CS returns face bbox in result
120
- face_bbox = result.get('bbox', None)
121
- if face_bbox is not None:
122
- fx1, fy1, fx2, fy2 = face_bbox
123
- face_center = np.array([(fx1 + fx2) / 2, (fy1 + fy2) / 2])
124
- dist = np.linalg.norm(bbox_center - face_center)
125
- if dist < min_dist:
126
- min_dist = dist
127
- best_result = result
128
-
129
- if best_result is not None:
130
- # Extract pitch and yaw (in degrees)
131
- pitch = float(best_result.get('pitch', 0))
132
- yaw = float(best_result.get('yaw', 0))
133
- return {'pitch': pitch, 'yaw': yaw}
134
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
  return None
136
-
137
  except Exception as e:
138
  print(f"Gaze estimation error: {e}")
139
  return None
 
 
 
 
 
 
 
 
 
 
140
 
141
 
142
  class FaceEmbedder:
@@ -177,12 +174,12 @@ class FaceEmbedder:
177
  print("Face embeddings will be disabled")
178
  self.enabled = False
179
 
180
- def extract_embedding(self, image_pil, bbox=None, keypoints_2d=None):
181
  """
182
  Extract 512-dimensional ArcFace embedding from face.
183
 
184
  Args:
185
- image_pil: PIL Image
186
  bbox: [x1, y1, x2, y2] in pixel coordinates (optional, for cropping)
187
  keypoints_2d: Face keypoints for alignment (optional)
188
 
@@ -193,12 +190,15 @@ class FaceEmbedder:
193
  return None
194
 
195
  try:
196
- # Convert PIL to numpy BGR (InsightFace expects BGR)
197
- image_np = np.array(image_pil)
198
- if image_np.shape[2] == 3:
199
- image_bgr = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR)
 
 
 
200
  else:
201
- image_bgr = image_np
202
 
203
  # Optionally crop to bbox region for efficiency
204
  if bbox is not None:
@@ -427,14 +427,21 @@ def process_batch(batch, teacher, nsfw_classifier, gaze_estimator, face_embedder
427
  humans_data_list = []
428
  outputs_list = []
429
  image_rgbs = [] # cache RGB numpy arrays for later crops
 
 
430
 
431
  for img_idx, image_pil in enumerate(images):
432
  img_width, img_height = image_pil.size
433
  image_rgb = np.array(image_pil.convert('RGB'))
434
  image_rgbs.append(image_rgb)
435
  image_bgr = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2BGR)
 
 
 
 
436
 
437
- outputs = teacher.process_one_image(image_bgr)
 
438
  outputs_list.append(outputs)
439
 
440
  if not outputs:
@@ -481,14 +488,14 @@ def process_batch(batch, teacher, nsfw_classifier, gaze_estimator, face_embedder
481
  gaze_direction = None
482
  if has_face and bbox is not None and gaze_estimator is not None:
483
  try:
484
- gaze_direction = gaze_estimator.estimate_gaze(image_pil, bbox)
485
  except Exception as e:
486
  gaze_direction = None
487
 
488
  face_embedding = None
489
  if has_face and bbox is not None and face_embedder is not None:
490
  try:
491
- face_embedding = face_embedder.extract_embedding(image_pil, bbox, kpts2d)
492
  except Exception as e:
493
  face_embedding = None
494
 
 
35
  from typing import List, Dict, Optional
36
  import time
37
  import functools
38
+ import json
39
+ from collections import defaultdict
40
 
41
  # SAM 3D Body imports
42
  import sys
 
82
  print("Gaze estimation will be disabled")
83
  self.enabled = False
84
 
85
+ def estimate_gaze(self, bbox, detections=None, image_bgr=None):
86
  """
87
+ Estimate gaze direction for a bbox using optional precomputed detections.
88
 
89
  Args:
90
+ bbox: [x1, y1, x2, y2]
91
+ detections: cached L2CS pipeline outputs for the full image
92
+ image_bgr: optional BGR image (used only when detections missing)
 
 
93
  """
94
  if not self.enabled:
95
  return None
96
 
97
  try:
98
+ if detections is None and image_bgr is not None:
99
+ detections = self.pipeline.step(image_bgr)
100
+ if not detections:
101
+ return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
 
103
+ x1, y1, x2, y2 = bbox
104
+ bbox_center = np.array([(x1 + x2) / 2, (y1 + y2) / 2])
105
+ best_result = None
106
+ min_dist = float('inf')
107
+
108
+ for result in detections:
109
+ face_bbox = result.get('bbox')
110
+ if face_bbox is None:
111
+ continue
112
+ fx1, fy1, fx2, fy2 = face_bbox
113
+ face_center = np.array([(fx1 + fx2) / 2, (fy1 + fy2) / 2])
114
+ dist = np.linalg.norm(bbox_center - face_center)
115
+ if dist < min_dist:
116
+ min_dist = dist
117
+ best_result = result
118
+
119
+ if best_result is not None:
120
+ pitch = float(best_result.get('pitch', 0))
121
+ yaw = float(best_result.get('yaw', 0))
122
+ return {'pitch': pitch, 'yaw': yaw}
123
  return None
 
124
  except Exception as e:
125
  print(f"Gaze estimation error: {e}")
126
  return None
127
+
128
+ def run_pipeline(self, image_bgr):
129
+ """Run L2CS pipeline once per image and reuse detections."""
130
+ if not self.enabled:
131
+ return None
132
+ try:
133
+ return self.pipeline.step(image_bgr)
134
+ except Exception as e:
135
+ print(f"Warning: L2CS pipeline failed: {e}")
136
+ return None
137
 
138
 
139
  class FaceEmbedder:
 
174
  print("Face embeddings will be disabled")
175
  self.enabled = False
176
 
177
+ def extract_embedding(self, image, bbox=None, keypoints_2d=None):
178
  """
179
  Extract 512-dimensional ArcFace embedding from face.
180
 
181
  Args:
182
+ image: PIL Image or BGR numpy array
183
  bbox: [x1, y1, x2, y2] in pixel coordinates (optional, for cropping)
184
  keypoints_2d: Face keypoints for alignment (optional)
185
 
 
190
  return None
191
 
192
  try:
193
+ # Convert to numpy BGR (InsightFace expects BGR)
194
+ if isinstance(image, Image.Image):
195
+ image_np = np.array(image)
196
+ if image_np.shape[2] == 3:
197
+ image_bgr = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR)
198
+ else:
199
+ image_bgr = image_np
200
  else:
201
+ image_bgr = image
202
 
203
  # Optionally crop to bbox region for efficiency
204
  if bbox is not None:
 
427
  humans_data_list = []
428
  outputs_list = []
429
  image_rgbs = [] # cache RGB numpy arrays for later crops
430
+ image_bgrs = [] # cache BGR arrays for gaze/face embedding
431
+ gaze_detections = []
432
 
433
  for img_idx, image_pil in enumerate(images):
434
  img_width, img_height = image_pil.size
435
  image_rgb = np.array(image_pil.convert('RGB'))
436
  image_rgbs.append(image_rgb)
437
  image_bgr = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2BGR)
438
+ image_bgrs.append(image_bgr)
439
+
440
+ detections = gaze_estimator.run_pipeline(image_bgr) if gaze_estimator is not None else None
441
+ gaze_detections.append(detections)
442
 
443
+ with torch.inference_mode():
444
+ outputs = teacher.process_one_image(image_bgr)
445
  outputs_list.append(outputs)
446
 
447
  if not outputs:
 
488
  gaze_direction = None
489
  if has_face and bbox is not None and gaze_estimator is not None:
490
  try:
491
+ gaze_direction = gaze_estimator.estimate_gaze(bbox, detections=gaze_detections[img_idx])
492
  except Exception as e:
493
  gaze_direction = None
494
 
495
  face_embedding = None
496
  if has_face and bbox is not None and face_embedder is not None:
497
  try:
498
+ face_embedding = face_embedder.extract_embedding(image_bgrs[img_idx], bbox, kpts2d)
499
  except Exception as e:
500
  face_embedding = None
501