vlordier commited on
Commit
878ad56
·
verified ·
1 Parent(s): 48953e9

Upload hf_job_gaze.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. hf_job_gaze.py +76 -84
hf_job_gaze.py CHANGED
@@ -1,8 +1,8 @@
1
  #!/usr/bin/env python3
2
  """
3
- Gaze Estimation Job - Estimate gaze direction using L2CS-Net
4
  Requires: SAM 3D Body outputs for face bboxes
5
- Outputs: Pitch/yaw gaze angles per detected face
6
  """
7
  import argparse
8
  import os
@@ -30,34 +30,35 @@ import cv2
30
  import json
31
 
32
 
33
- def init_gaze_estimator(device='cuda'):
34
- """Initialize L2CS-Net gaze estimator"""
35
- logger.info("Installing L2CS-Net...")
36
  try:
37
  subprocess.run(
38
- ['pip', 'install', '-q', 'git+https://github.com/edavalosanaya/L2CS-Net.git@main'],
39
  check=True,
40
  capture_output=True
41
  )
42
- logger.info("✓ L2CS-Net installed")
43
  except Exception as e:
44
- logger.warning(f"L2CS-Net installation failed: {e}")
45
 
46
- logger.info("Loading L2CS-Net...")
47
- from l2cs import Pipeline
 
48
 
49
- pipeline = Pipeline(
50
- weights='L2CSNet_gaze360.pkl',
51
- arch='ResNet50',
52
- device=device
53
  )
54
- logger.info("✓ L2CS-Net loaded")
 
55
 
56
- return pipeline
57
 
58
 
59
- def make_square_bbox_with_padding(bbox, img_width, img_height, padding=0.15):
60
- """Convert bbox to square with padding for gaze estimation"""
61
  x1, y1, x2, y2 = bbox
62
  w = x2 - x1
63
  h = y2 - y1
@@ -115,58 +116,45 @@ def has_valid_face(keypoints_2d, keypoints_3d, img_width, img_height):
115
  return True
116
 
117
 
118
- def estimate_gaze_batch(pipeline, image_bgr, bboxes, img_width, img_height):
119
- """Run L2CS once on full image and match results to face bboxes"""
120
  try:
121
- # Convert bboxes to square with padding
122
- square_bboxes = []
123
- for bbox in bboxes:
124
- if bbox is not None:
125
- square_bbox = make_square_bbox_with_padding(bbox, img_width, img_height, padding=0.15)
126
- square_bboxes.append(square_bbox)
127
- else:
128
- square_bboxes.append(None)
129
 
130
- detections = pipeline.step(image_bgr)
131
- if not detections:
132
- return [None] * len(bboxes)
133
 
134
- results = []
135
- for square_bbox in square_bboxes:
136
- if square_bbox is None:
137
- results.append(None)
138
- continue
139
-
140
- x1, y1, x2, y2 = square_bbox
141
- bbox_center = np.array([(x1 + x2) / 2, (y1 + y2) / 2])
142
-
143
- best_result = None
144
- min_dist = float('inf')
145
-
146
- for det in detections:
147
- face_bbox = det.get('bbox')
148
- if face_bbox is None:
149
- continue
150
- fx1, fy1, fx2, fy2 = face_bbox
151
- face_center = np.array([(fx1 + fx2) / 2, (fy1 + fy2) / 2])
152
- dist = np.linalg.norm(bbox_center - face_center)
153
-
154
- if dist < min_dist:
155
- min_dist = dist
156
- best_result = det
157
-
158
- if best_result is not None:
159
- results.append({
160
- 'pitch': float(best_result.get('pitch', 0)),
161
- 'yaw': float(best_result.get('yaw', 0))
162
- })
163
- else:
164
- results.append(None)
165
 
166
- return results
 
 
 
 
 
 
 
 
 
167
  except Exception as e:
168
- logger.error(f"Gaze estimation failed: {e}")
169
- return [None] * len(bboxes)
170
 
171
 
172
  def process_batch(batch, sam3d_dataset):
@@ -187,7 +175,7 @@ def process_batch(batch, sam3d_dataset):
187
  if not sam3d_row or not sam3d_row[0]['sam3d_data']:
188
  results_list.append({
189
  'image_id': image_id,
190
- 'gaze_data': None
191
  })
192
  continue
193
 
@@ -197,38 +185,42 @@ def process_batch(batch, sam3d_dataset):
197
  image_rgb = np.array(image_pil.convert('RGB'))
198
  image_bgr = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2BGR)
199
 
200
- # Collect bboxes for valid faces only
201
- bboxes = []
202
- for human in humans_data:
203
  bbox = human.get('bbox')
204
  kpts2d = human.get('keypoints_2d')
205
  kpts3d = human.get('keypoints_3d')
206
 
207
  # Check if this human has a valid, visible face
208
- if has_valid_face(kpts2d, kpts3d, img_width, img_height) and bbox is not None:
209
- bboxes.append(bbox)
210
- else:
211
- bboxes.append(None)
212
-
213
- # Estimate gaze for all valid faces in one pass
214
- gaze_results = estimate_gaze_batch(gaze_pipeline, image_bgr, bboxes, img_width, img_height)
 
 
 
 
215
 
216
  results_list.append({
217
  'image_id': image_id,
218
- 'gaze_data': json.dumps(gaze_results) if any(g is not None for g in gaze_results) else None
219
  })
220
 
221
  return {
222
  'image_id': [r['image_id'] for r in results_list],
223
- 'gaze_directions': [r['gaze_data'] for r in results_list]
224
  }
225
 
226
 
227
  def main():
228
- global gaze_pipeline
229
 
230
  logger.info("="*60)
231
- logger.info("Gaze Estimation (L2CS-Net)")
232
  logger.info("="*60)
233
 
234
  ap = argparse.ArgumentParser()
@@ -246,8 +238,8 @@ def main():
246
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
247
  logger.info(f"Using device: {device}")
248
 
249
- # Load gaze estimator
250
- gaze_pipeline = init_gaze_estimator(device)
251
 
252
  # Load SAM3D results
253
  logger.info(f"Loading SAM3D results from {args.sam3d_dataset}...")
@@ -288,12 +280,12 @@ def main():
288
  # Create output dataset
289
  features = Features({
290
  'image_id': Value('string'),
291
- 'gaze_directions': Value('string')
292
  })
293
 
294
  output_ds = HFDataset.from_dict({
295
  'image_id': [r['image_id'] for r in results],
296
- 'gaze_directions': [r['gaze_directions'] for r in results]
297
  }, features=features)
298
 
299
  # Upload
 
1
  #!/usr/bin/env python3
2
  """
3
+ Face Embedding Job - Extract ArcFace embeddings from detected faces
4
  Requires: SAM 3D Body outputs for face bboxes
5
+ Outputs: 512-dim face embeddings with detection confidence
6
  """
7
  import argparse
8
  import os
 
30
  import json
31
 
32
 
33
+ def init_face_embedder(device='cuda'):
34
+ """Initialize InsightFace ArcFace model"""
35
+ logger.info("Installing InsightFace...")
36
  try:
37
  subprocess.run(
38
+ ['pip', 'install', '-q', 'insightface', 'onnxruntime-gpu' if device.type == 'cuda' else 'onnxruntime'],
39
  check=True,
40
  capture_output=True
41
  )
42
+ logger.info("✓ InsightFace installed")
43
  except Exception as e:
44
+ logger.warning(f"InsightFace installation failed: {e}")
45
 
46
+ logger.info("Loading InsightFace ArcFace...")
47
+ import insightface
48
+ from insightface.app import FaceAnalysis
49
 
50
+ app = FaceAnalysis(
51
+ name='buffalo_l',
52
+ providers=['CUDAExecutionProvider'] if device.type == 'cuda' else ['CPUExecutionProvider']
 
53
  )
54
+ app.prepare(ctx_id=0 if device.type == 'cuda' else -1, det_size=(640, 640))
55
+ logger.info("✓ ArcFace loaded")
56
 
57
+ return app
58
 
59
 
60
+ def make_square_bbox_with_padding(bbox, img_width, img_height, padding=0.2):
61
+ """Convert bbox to square with padding for face detection"""
62
  x1, y1, x2, y2 = bbox
63
  w = x2 - x1
64
  h = y2 - y1
 
116
  return True
117
 
118
 
119
+ def extract_embedding(app, image_bgr, bbox, img_width, img_height):
120
+ """Extract face embedding from bbox region with proper cropping and padding"""
121
  try:
122
+ # Make square bbox with padding for better face detection
123
+ square_bbox = make_square_bbox_with_padding(bbox, img_width, img_height, padding=0.2)
124
+ x1, y1, x2, y2 = square_bbox
 
 
 
 
 
125
 
126
+ # Crop to square region
127
+ crop = image_bgr[y1:y2, x1:x2]
 
128
 
129
+ if crop.size == 0:
130
+ return None
131
+
132
+ # Resize to optimal size for InsightFace (640x640 max)
133
+ crop_h, crop_w = crop.shape[:2]
134
+ if max(crop_h, crop_w) > 640:
135
+ scale = 640 / max(crop_h, crop_w)
136
+ new_h = int(crop_h * scale)
137
+ new_w = int(crop_w * scale)
138
+ crop = cv2.resize(crop, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
139
+
140
+ # Detect faces
141
+ faces = app.get(crop)
142
+ if len(faces) == 0:
143
+ return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
 
145
+ # Use the most confident face
146
+ face = max(faces, key=lambda x: x.det_score)
147
+ embedding = face.embedding
148
+ embedding_norm = embedding / np.linalg.norm(embedding)
149
+
150
+ return {
151
+ 'embedding': embedding_norm.astype(np.float32).tolist(),
152
+ 'det_score': float(face.det_score),
153
+ 'embedding_dim': len(embedding)
154
+ }
155
  except Exception as e:
156
+ logger.error(f"Embedding extraction failed: {e}")
157
+ return None
158
 
159
 
160
  def process_batch(batch, sam3d_dataset):
 
175
  if not sam3d_row or not sam3d_row[0]['sam3d_data']:
176
  results_list.append({
177
  'image_id': image_id,
178
+ 'embeddings': None
179
  })
180
  continue
181
 
 
185
  image_rgb = np.array(image_pil.convert('RGB'))
186
  image_bgr = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2BGR)
187
 
188
+ # Extract embeddings for each human with valid face
189
+ embeddings = []
190
+ for human_idx, human in enumerate(humans_data):
191
  bbox = human.get('bbox')
192
  kpts2d = human.get('keypoints_2d')
193
  kpts3d = human.get('keypoints_3d')
194
 
195
  # Check if this human has a valid, visible face
196
+ if not has_valid_face(kpts2d, kpts3d, img_width, img_height):
197
+ embeddings.append(None)
198
+ continue
199
+
200
+ if bbox is None:
201
+ embeddings.append(None)
202
+ continue
203
+
204
+ # Extract embedding from face region
205
+ embedding = extract_embedding(face_app, image_bgr, bbox, img_width, img_height)
206
+ embeddings.append(embedding)
207
 
208
  results_list.append({
209
  'image_id': image_id,
210
+ 'embeddings': json.dumps(embeddings) if any(e is not None for e in embeddings) else None
211
  })
212
 
213
  return {
214
  'image_id': [r['image_id'] for r in results_list],
215
+ 'face_embeddings': [r['embeddings'] for r in results_list]
216
  }
217
 
218
 
219
  def main():
220
+ global face_app
221
 
222
  logger.info("="*60)
223
+ logger.info("Face Embedding Extraction (ArcFace)")
224
  logger.info("="*60)
225
 
226
  ap = argparse.ArgumentParser()
 
238
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
239
  logger.info(f"Using device: {device}")
240
 
241
+ # Load face embedder
242
+ face_app = init_face_embedder(device)
243
 
244
  # Load SAM3D results
245
  logger.info(f"Loading SAM3D results from {args.sam3d_dataset}...")
 
280
  # Create output dataset
281
  features = Features({
282
  'image_id': Value('string'),
283
+ 'face_embeddings': Value('string')
284
  })
285
 
286
  output_ds = HFDataset.from_dict({
287
  'image_id': [r['image_id'] for r in results],
288
+ 'face_embeddings': [r['face_embeddings'] for r in results]
289
  }, features=features)
290
 
291
  # Upload