vlordier commited on
Commit
5259419
·
verified ·
1 Parent(s): ed38ecf

Upload hf_job_gaze.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. hf_job_gaze.py +258 -0
hf_job_gaze.py ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Gaze Estimation Job - Estimate gaze direction using L2CS-Net
4
+ Requires: SAM 3D Body outputs for face bboxes
5
+ Outputs: Pitch/yaw gaze angles per detected face
6
+ """
7
+ import argparse
8
+ import os
9
+ from pathlib import Path
10
+ import warnings
11
+ warnings.filterwarnings('ignore')
12
+ import logging
13
+ import sys
14
+ import subprocess
15
+
16
+ logging.basicConfig(
17
+ level=logging.INFO,
18
+ format='[%(asctime)s] %(levelname)s: %(message)s',
19
+ datefmt='%Y-%m-%d %H:%M:%S',
20
+ stream=sys.stdout,
21
+ force=True
22
+ )
23
+ logger = logging.getLogger(__name__)
24
+
25
+ import numpy as np
26
+ import torch
27
+ from datasets import load_dataset, Dataset as HFDataset, Features, Value
28
+ from PIL import Image
29
+ import cv2
30
+ import json
31
+
32
+
33
+ def init_gaze_estimator(device='cuda'):
34
+ """Initialize L2CS-Net gaze estimator"""
35
+ logger.info("Installing L2CS-Net...")
36
+ try:
37
+ subprocess.run(
38
+ ['pip', 'install', '-q', 'git+https://github.com/edavalosanaya/L2CS-Net.git@main'],
39
+ check=True,
40
+ capture_output=True
41
+ )
42
+ logger.info("✓ L2CS-Net installed")
43
+ except Exception as e:
44
+ logger.warning(f"L2CS-Net installation failed: {e}")
45
+
46
+ logger.info("Loading L2CS-Net...")
47
+ from l2cs import Pipeline
48
+
49
+ pipeline = Pipeline(
50
+ weights='L2CSNet_gaze360.pkl',
51
+ arch='ResNet50',
52
+ device=device
53
+ )
54
+ logger.info("✓ L2CS-Net loaded")
55
+
56
+ return pipeline
57
+
58
+
59
+ def estimate_gaze_batch(pipeline, image_bgr, bboxes):
60
+ """Run L2CS once and match results to bboxes"""
61
+ try:
62
+ detections = pipeline.step(image_bgr)
63
+ if not detections:
64
+ return [None] * len(bboxes)
65
+
66
+ results = []
67
+ for bbox in bboxes:
68
+ if bbox is None:
69
+ results.append(None)
70
+ continue
71
+
72
+ x1, y1, x2, y2 = bbox
73
+ bbox_center = np.array([(x1 + x2) / 2, (y1 + y2) / 2])
74
+
75
+ best_result = None
76
+ min_dist = float('inf')
77
+
78
+ for det in detections:
79
+ face_bbox = det.get('bbox')
80
+ if face_bbox is None:
81
+ continue
82
+ fx1, fy1, fx2, fy2 = face_bbox
83
+ face_center = np.array([(fx1 + fx2) / 2, (fy1 + fy2) / 2])
84
+ dist = np.linalg.norm(bbox_center - face_center)
85
+
86
+ if dist < min_dist:
87
+ min_dist = dist
88
+ best_result = det
89
+
90
+ if best_result is not None:
91
+ results.append({
92
+ 'pitch': float(best_result.get('pitch', 0)),
93
+ 'yaw': float(best_result.get('yaw', 0))
94
+ })
95
+ else:
96
+ results.append(None)
97
+
98
+ return results
99
+ except Exception as e:
100
+ logger.error(f"Gaze estimation failed: {e}")
101
+ return [None] * len(bboxes)
102
+
103
+
104
+ def process_batch(batch, sam3d_dataset):
105
+ """Process batch of images - join with SAM3D results to get bboxes"""
106
+ images = batch['image']
107
+ image_paths = batch.get('image_path', [f'img_{i:06d}' for i in range(len(images))])
108
+
109
+ results_list = []
110
+
111
+ for idx, image_pil in enumerate(images):
112
+ image_id = Path(image_paths[idx]).stem if image_paths[idx] else f'img_{idx:06d}'
113
+
114
+ # Find corresponding SAM3D data
115
+ sam3d_row = sam3d_dataset.filter(lambda x: x['image_id'] == image_id).take(1)
116
+ sam3d_row = list(sam3d_row)
117
+
118
+ if not sam3d_row or not sam3d_row[0]['sam3d_data']:
119
+ results_list.append({
120
+ 'image_id': image_id,
121
+ 'gaze_data': None
122
+ })
123
+ continue
124
+
125
+ humans_data = json.loads(sam3d_row[0]['sam3d_data'])
126
+
127
+ # Convert to BGR
128
+ image_rgb = np.array(image_pil.convert('RGB'))
129
+ image_bgr = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2BGR)
130
+
131
+ # Collect bboxes for valid faces
132
+ bboxes = []
133
+ for human in humans_data:
134
+ bbox = human.get('bbox')
135
+ kpts2d = human.get('keypoints_2d')
136
+ kpts3d = human.get('keypoints_3d')
137
+
138
+ if bbox is None or kpts2d is None or kpts3d is None:
139
+ bboxes.append(None)
140
+ continue
141
+
142
+ kpts3d_arr = np.array(kpts3d)
143
+ if len(kpts3d_arr) < 3:
144
+ bboxes.append(None)
145
+ continue
146
+
147
+ # Check face keypoints valid
148
+ nose_3d = kpts3d_arr[0]
149
+ left_eye_3d = kpts3d_arr[1]
150
+ right_eye_3d = kpts3d_arr[2]
151
+
152
+ if (np.linalg.norm(nose_3d) < 1e-6 or
153
+ np.linalg.norm(left_eye_3d) < 1e-6 or
154
+ np.linalg.norm(right_eye_3d) < 1e-6):
155
+ bboxes.append(None)
156
+ continue
157
+
158
+ bboxes.append(bbox)
159
+
160
+ # Estimate gaze for all faces in one pass
161
+ gaze_results = estimate_gaze_batch(gaze_pipeline, image_bgr, bboxes)
162
+
163
+ results_list.append({
164
+ 'image_id': image_id,
165
+ 'gaze_data': json.dumps(gaze_results) if any(g is not None for g in gaze_results) else None
166
+ })
167
+
168
+ return {
169
+ 'image_id': [r['image_id'] for r in results_list],
170
+ 'gaze_directions': [r['gaze_data'] for r in results_list]
171
+ }
172
+
173
+
174
+ def main():
175
+ global gaze_pipeline
176
+
177
+ logger.info("="*60)
178
+ logger.info("Gaze Estimation (L2CS-Net)")
179
+ logger.info("="*60)
180
+
181
+ ap = argparse.ArgumentParser()
182
+ ap.add_argument('--input-dataset', type=str, required=True, help='Original images')
183
+ ap.add_argument('--sam3d-dataset', type=str, required=True, help='SAM3D outputs with bboxes')
184
+ ap.add_argument('--output-dataset', type=str, required=True)
185
+ ap.add_argument('--split', type=str, default='train')
186
+ ap.add_argument('--batch-size', type=int, default=4)
187
+ ap.add_argument('--shard-index', type=int, default=0)
188
+ ap.add_argument('--num-shards', type=int, default=1)
189
+ args = ap.parse_args()
190
+
191
+ logger.info(f"Arguments: {vars(args)}")
192
+
193
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
194
+ logger.info(f"Using device: {device}")
195
+
196
+ # Load gaze estimator
197
+ gaze_pipeline = init_gaze_estimator(device)
198
+
199
+ # Load SAM3D results
200
+ logger.info(f"Loading SAM3D results from {args.sam3d_dataset}...")
201
+ sam3d_ds = load_dataset(args.sam3d_dataset, split=args.split, streaming=True)
202
+
203
+ # Load images dataset
204
+ logger.info(f"Loading images from {args.input_dataset}...")
205
+ ds = load_dataset(args.input_dataset, split=args.split, streaming=True)
206
+
207
+ if args.num_shards > 1:
208
+ ds = ds.shard(num_shards=args.num_shards, index=args.shard_index)
209
+ sam3d_ds = sam3d_ds.shard(num_shards=args.num_shards, index=args.shard_index)
210
+ logger.info(f"Using shard {args.shard_index+1}/{args.num_shards}")
211
+
212
+ # Process
213
+ logger.info(f"Processing with batch_size={args.batch_size}")
214
+
215
+ from functools import partial
216
+ process_fn = partial(process_batch, sam3d_dataset=sam3d_ds)
217
+
218
+ processed_ds = ds.map(
219
+ process_fn,
220
+ batched=True,
221
+ batch_size=args.batch_size,
222
+ remove_columns=ds.column_names
223
+ )
224
+
225
+ # Collect results
226
+ results = []
227
+ for batch_idx, item in enumerate(processed_ds):
228
+ results.append(item)
229
+
230
+ if (batch_idx + 1) % 50 == 0:
231
+ logger.info(f"Processed {batch_idx + 1} images")
232
+
233
+ logger.info(f"✓ Processed {len(results)} images")
234
+
235
+ # Create output dataset
236
+ features = Features({
237
+ 'image_id': Value('string'),
238
+ 'gaze_directions': Value('string')
239
+ })
240
+
241
+ output_ds = HFDataset.from_dict({
242
+ 'image_id': [r['image_id'] for r in results],
243
+ 'gaze_directions': [r['gaze_directions'] for r in results]
244
+ }, features=features)
245
+
246
+ # Upload
247
+ logger.info(f"Uploading to {args.output_dataset}...")
248
+ output_ds.push_to_hub(
249
+ args.output_dataset,
250
+ split=args.split,
251
+ token=os.environ.get('HF_TOKEN'),
252
+ private=True
253
+ )
254
+ logger.info("✓ Upload complete")
255
+
256
+
257
+ if __name__ == '__main__':
258
+ main()