vlordier commited on
Commit
9f9bcb0
·
verified ·
1 Parent(s): 878ad56

Upload hf_job_gaze.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. hf_job_gaze.py +94 -89
hf_job_gaze.py CHANGED
@@ -1,8 +1,8 @@
1
  #!/usr/bin/env python3
2
  """
3
- Face Embedding Job - Extract ArcFace embeddings from detected faces
4
  Requires: SAM 3D Body outputs for face bboxes
5
- Outputs: 512-dim face embeddings with detection confidence
6
  """
7
  import argparse
8
  import os
@@ -30,35 +30,34 @@ import cv2
30
  import json
31
 
32
 
33
- def init_face_embedder(device='cuda'):
34
- """Initialize InsightFace ArcFace model"""
35
- logger.info("Installing InsightFace...")
36
  try:
37
  subprocess.run(
38
- ['pip', 'install', '-q', 'insightface', 'onnxruntime-gpu' if device.type == 'cuda' else 'onnxruntime'],
39
  check=True,
40
  capture_output=True
41
  )
42
- logger.info("✓ InsightFace installed")
43
  except Exception as e:
44
- logger.warning(f"InsightFace installation failed: {e}")
45
 
46
- logger.info("Loading InsightFace ArcFace...")
47
- import insightface
48
- from insightface.app import FaceAnalysis
49
 
50
- app = FaceAnalysis(
51
- name='buffalo_l',
52
- providers=['CUDAExecutionProvider'] if device.type == 'cuda' else ['CPUExecutionProvider']
 
53
  )
54
- app.prepare(ctx_id=0 if device.type == 'cuda' else -1, det_size=(640, 640))
55
- logger.info("✓ ArcFace loaded")
56
 
57
- return app
58
 
59
 
60
- def make_square_bbox_with_padding(bbox, img_width, img_height, padding=0.2):
61
- """Convert bbox to square with padding for face detection"""
62
  x1, y1, x2, y2 = bbox
63
  w = x2 - x1
64
  h = y2 - y1
@@ -80,8 +79,8 @@ def make_square_bbox_with_padding(bbox, img_width, img_height, padding=0.2):
80
  return [x1_sq, y1_sq, x2_sq, y2_sq]
81
 
82
 
83
- def has_valid_face(keypoints_2d, keypoints_3d, img_width, img_height):
84
- """Check if human has a valid, visible face"""
85
  if keypoints_2d is None or keypoints_3d is None:
86
  return False
87
 
@@ -91,24 +90,21 @@ def has_valid_face(keypoints_2d, keypoints_3d, img_width, img_height):
91
  if len(kpts2d_arr) < 3 or len(kpts3d_arr) < 3:
92
  return False
93
 
94
- # Check face keypoints (nose, left eye, right eye)
95
- nose_2d = kpts2d_arr[0]
96
  left_eye_2d = kpts2d_arr[1]
97
  right_eye_2d = kpts2d_arr[2]
98
- nose_3d = kpts3d_arr[0]
99
  left_eye_3d = kpts3d_arr[1]
100
  right_eye_3d = kpts3d_arr[2]
101
 
102
- # Check 3D keypoints are valid (not at origin)
103
- keypoints_valid_3d = (np.linalg.norm(nose_3d) > 1e-6 and
104
- np.linalg.norm(left_eye_3d) > 1e-6 and
105
- np.linalg.norm(right_eye_3d) > 1e-6)
106
 
107
- if not keypoints_valid_3d:
108
  return False
109
 
110
- # Check 2D keypoints are within image bounds
111
- for kp in [nose_2d, left_eye_2d, right_eye_2d]:
112
  if (kp[0] < 0 or kp[0] >= img_width or
113
  kp[1] < 0 or kp[1] >= img_height):
114
  return False
@@ -116,45 +112,58 @@ def has_valid_face(keypoints_2d, keypoints_3d, img_width, img_height):
116
  return True
117
 
118
 
119
- def extract_embedding(app, image_bgr, bbox, img_width, img_height):
120
- """Extract face embedding from bbox region with proper cropping and padding"""
121
  try:
122
- # Make square bbox with padding for better face detection
123
- square_bbox = make_square_bbox_with_padding(bbox, img_width, img_height, padding=0.2)
124
- x1, y1, x2, y2 = square_bbox
 
 
 
 
 
125
 
126
- # Crop to square region
127
- crop = image_bgr[y1:y2, x1:x2]
 
128
 
129
- if crop.size == 0:
130
- return None
131
-
132
- # Resize to optimal size for InsightFace (640x640 max)
133
- crop_h, crop_w = crop.shape[:2]
134
- if max(crop_h, crop_w) > 640:
135
- scale = 640 / max(crop_h, crop_w)
136
- new_h = int(crop_h * scale)
137
- new_w = int(crop_w * scale)
138
- crop = cv2.resize(crop, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
139
-
140
- # Detect faces
141
- faces = app.get(crop)
142
- if len(faces) == 0:
143
- return None
144
-
145
- # Use the most confident face
146
- face = max(faces, key=lambda x: x.det_score)
147
- embedding = face.embedding
148
- embedding_norm = embedding / np.linalg.norm(embedding)
 
 
 
 
 
 
 
 
 
 
 
149
 
150
- return {
151
- 'embedding': embedding_norm.astype(np.float32).tolist(),
152
- 'det_score': float(face.det_score),
153
- 'embedding_dim': len(embedding)
154
- }
155
  except Exception as e:
156
- logger.error(f"Embedding extraction failed: {e}")
157
- return None
158
 
159
 
160
  def process_batch(batch, sam3d_dataset):
@@ -175,7 +184,7 @@ def process_batch(batch, sam3d_dataset):
175
  if not sam3d_row or not sam3d_row[0]['sam3d_data']:
176
  results_list.append({
177
  'image_id': image_id,
178
- 'embeddings': None
179
  })
180
  continue
181
 
@@ -185,42 +194,38 @@ def process_batch(batch, sam3d_dataset):
185
  image_rgb = np.array(image_pil.convert('RGB'))
186
  image_bgr = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2BGR)
187
 
188
- # Extract embeddings for each human with valid face
189
- embeddings = []
190
- for human_idx, human in enumerate(humans_data):
191
  bbox = human.get('bbox')
192
  kpts2d = human.get('keypoints_2d')
193
  kpts3d = human.get('keypoints_3d')
194
 
195
- # Check if this human has a valid, visible face
196
- if not has_valid_face(kpts2d, kpts3d, img_width, img_height):
197
- embeddings.append(None)
198
- continue
199
-
200
- if bbox is None:
201
- embeddings.append(None)
202
- continue
203
-
204
- # Extract embedding from face region
205
- embedding = extract_embedding(face_app, image_bgr, bbox, img_width, img_height)
206
- embeddings.append(embedding)
207
 
208
  results_list.append({
209
  'image_id': image_id,
210
- 'embeddings': json.dumps(embeddings) if any(e is not None for e in embeddings) else None
211
  })
212
 
213
  return {
214
  'image_id': [r['image_id'] for r in results_list],
215
- 'face_embeddings': [r['embeddings'] for r in results_list]
216
  }
217
 
218
 
219
  def main():
220
- global face_app
221
 
222
  logger.info("="*60)
223
- logger.info("Face Embedding Extraction (ArcFace)")
224
  logger.info("="*60)
225
 
226
  ap = argparse.ArgumentParser()
@@ -238,8 +243,8 @@ def main():
238
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
239
  logger.info(f"Using device: {device}")
240
 
241
- # Load face embedder
242
- face_app = init_face_embedder(device)
243
 
244
  # Load SAM3D results
245
  logger.info(f"Loading SAM3D results from {args.sam3d_dataset}...")
@@ -280,12 +285,12 @@ def main():
280
  # Create output dataset
281
  features = Features({
282
  'image_id': Value('string'),
283
- 'face_embeddings': Value('string')
284
  })
285
 
286
  output_ds = HFDataset.from_dict({
287
  'image_id': [r['image_id'] for r in results],
288
- 'face_embeddings': [r['face_embeddings'] for r in results]
289
  }, features=features)
290
 
291
  # Upload
 
1
  #!/usr/bin/env python3
2
  """
3
+ Gaze Estimation Job - Estimate gaze direction using L2CS-Net
4
  Requires: SAM 3D Body outputs for face bboxes
5
+ Outputs: Pitch/yaw gaze angles per detected face
6
  """
7
  import argparse
8
  import os
 
30
  import json
31
 
32
 
33
+ def init_gaze_estimator(device='cuda'):
34
+ """Initialize L2CS-Net gaze estimator"""
35
+ logger.info("Installing L2CS-Net...")
36
  try:
37
  subprocess.run(
38
+ ['pip', 'install', '-q', 'git+https://github.com/edavalosanaya/L2CS-Net.git@main'],
39
  check=True,
40
  capture_output=True
41
  )
42
+ logger.info("✓ L2CS-Net installed")
43
  except Exception as e:
44
+ logger.warning(f"L2CS-Net installation failed: {e}")
45
 
46
+ logger.info("Loading L2CS-Net...")
47
+ from l2cs import Pipeline
 
48
 
49
+ pipeline = Pipeline(
50
+ weights='L2CSNet_gaze360.pkl',
51
+ arch='ResNet50',
52
+ device=device
53
  )
54
+ logger.info("✓ L2CS-Net loaded")
 
55
 
56
+ return pipeline
57
 
58
 
59
+ def make_square_bbox_with_padding(bbox, img_width, img_height, padding=0.15):
60
+ """Convert bbox to square with padding for gaze estimation"""
61
  x1, y1, x2, y2 = bbox
62
  w = x2 - x1
63
  h = y2 - y1
 
79
  return [x1_sq, y1_sq, x2_sq, y2_sq]
80
 
81
 
82
+ def has_valid_eyes(keypoints_2d, keypoints_3d, img_width, img_height):
83
+ """Check if human has valid, visible eyes for gaze estimation"""
84
  if keypoints_2d is None or keypoints_3d is None:
85
  return False
86
 
 
90
  if len(kpts2d_arr) < 3 or len(kpts3d_arr) < 3:
91
  return False
92
 
93
+ # Check eye keypoints specifically (left eye, right eye)
 
94
  left_eye_2d = kpts2d_arr[1]
95
  right_eye_2d = kpts2d_arr[2]
 
96
  left_eye_3d = kpts3d_arr[1]
97
  right_eye_3d = kpts3d_arr[2]
98
 
99
+ # Check 3D eye keypoints are valid (not at origin)
100
+ eyes_valid_3d = (np.linalg.norm(left_eye_3d) > 1e-6 and
101
+ np.linalg.norm(right_eye_3d) > 1e-6)
 
102
 
103
+ if not eyes_valid_3d:
104
  return False
105
 
106
+ # Check 2D eye keypoints are within image bounds
107
+ for kp in [left_eye_2d, right_eye_2d]:
108
  if (kp[0] < 0 or kp[0] >= img_width or
109
  kp[1] < 0 or kp[1] >= img_height):
110
  return False
 
112
  return True
113
 
114
 
115
+ def estimate_gaze_batch(pipeline, image_bgr, bboxes, img_width, img_height):
116
+ """Run L2CS once on full image and match results to face bboxes"""
117
  try:
118
+ # Convert bboxes to square with padding
119
+ square_bboxes = []
120
+ for bbox in bboxes:
121
+ if bbox is not None:
122
+ square_bbox = make_square_bbox_with_padding(bbox, img_width, img_height, padding=0.15)
123
+ square_bboxes.append(square_bbox)
124
+ else:
125
+ square_bboxes.append(None)
126
 
127
+ detections = pipeline.step(image_bgr)
128
+ if not detections:
129
+ return [None] * len(bboxes)
130
 
131
+ results = []
132
+ for square_bbox in square_bboxes:
133
+ if square_bbox is None:
134
+ results.append(None)
135
+ continue
136
+
137
+ x1, y1, x2, y2 = square_bbox
138
+ bbox_center = np.array([(x1 + x2) / 2, (y1 + y2) / 2])
139
+
140
+ best_result = None
141
+ min_dist = float('inf')
142
+
143
+ for det in detections:
144
+ face_bbox = det.get('bbox')
145
+ if face_bbox is None:
146
+ continue
147
+ fx1, fy1, fx2, fy2 = face_bbox
148
+ face_center = np.array([(fx1 + fx2) / 2, (fy1 + fy2) / 2])
149
+ dist = np.linalg.norm(bbox_center - face_center)
150
+
151
+ if dist < min_dist:
152
+ min_dist = dist
153
+ best_result = det
154
+
155
+ if best_result is not None:
156
+ results.append({
157
+ 'pitch': float(best_result.get('pitch', 0)),
158
+ 'yaw': float(best_result.get('yaw', 0))
159
+ })
160
+ else:
161
+ results.append(None)
162
 
163
+ return results
 
 
 
 
164
  except Exception as e:
165
+ logger.error(f"Gaze estimation failed: {e}")
166
+ return [None] * len(bboxes)
167
 
168
 
169
  def process_batch(batch, sam3d_dataset):
 
184
  if not sam3d_row or not sam3d_row[0]['sam3d_data']:
185
  results_list.append({
186
  'image_id': image_id,
187
+ 'gaze_data': None
188
  })
189
  continue
190
 
 
194
  image_rgb = np.array(image_pil.convert('RGB'))
195
  image_bgr = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2BGR)
196
 
197
+ # Collect bboxes for valid eyes only
198
+ bboxes = []
199
+ for human in humans_data:
200
  bbox = human.get('bbox')
201
  kpts2d = human.get('keypoints_2d')
202
  kpts3d = human.get('keypoints_3d')
203
 
204
+ # Check if this human has valid, visible eyes for gaze estimation
205
+ if has_valid_eyes(kpts2d, kpts3d, img_width, img_height) and bbox is not None:
206
+ bboxes.append(bbox)
207
+ else:
208
+ bboxes.append(None)
209
+
210
+ # Estimate gaze for all valid eyes in one pass
211
+ gaze_results = estimate_gaze_batch(gaze_pipeline, image_bgr, bboxes, img_width, img_height)
 
 
 
 
212
 
213
  results_list.append({
214
  'image_id': image_id,
215
+ 'gaze_data': json.dumps(gaze_results) if any(g is not None for g in gaze_results) else None
216
  })
217
 
218
  return {
219
  'image_id': [r['image_id'] for r in results_list],
220
+ 'gaze_directions': [r['gaze_data'] for r in results_list]
221
  }
222
 
223
 
224
  def main():
225
+ global gaze_pipeline
226
 
227
  logger.info("="*60)
228
+ logger.info("Gaze Estimation (L2CS-Net)")
229
  logger.info("="*60)
230
 
231
  ap = argparse.ArgumentParser()
 
243
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
244
  logger.info(f"Using device: {device}")
245
 
246
+ # Load gaze estimator
247
+ gaze_pipeline = init_gaze_estimator(device)
248
 
249
  # Load SAM3D results
250
  logger.info(f"Loading SAM3D results from {args.sam3d_dataset}...")
 
285
  # Create output dataset
286
  features = Features({
287
  'image_id': Value('string'),
288
+ 'gaze_directions': Value('string')
289
  })
290
 
291
  output_ds = HFDataset.from_dict({
292
  'image_id': [r['image_id'] for r in results],
293
+ 'gaze_directions': [r['gaze_directions'] for r in results]
294
  }, features=features)
295
 
296
  # Upload