|
|
|
|
|
""" |
|
|
Gaze Estimation Job - Estimate gaze direction using L2CS-Net |
|
|
Requires: SAM 3D Body outputs for face bboxes |
|
|
Outputs: Pitch/yaw gaze angles per detected face |
|
|
""" |
|
|
import argparse |
|
|
import os |
|
|
from pathlib import Path |
|
|
import warnings |
|
|
warnings.filterwarnings('ignore') |
|
|
import logging |
|
|
import sys |
|
|
import subprocess |
|
|
|
|
|
logging.basicConfig( |
|
|
level=logging.INFO, |
|
|
format='[%(asctime)s] %(levelname)s: %(message)s', |
|
|
datefmt='%Y-%m-%d %H:%M:%S', |
|
|
stream=sys.stdout, |
|
|
force=True |
|
|
) |
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
import numpy as np |
|
|
import torch |
|
|
from datasets import load_dataset, Dataset as HFDataset, Features, Value |
|
|
from PIL import Image |
|
|
import cv2 |
|
|
import json |
|
|
|
|
|
|
|
|
def init_gaze_estimator(device='cuda'): |
|
|
"""Initialize L2CS-Net gaze estimator""" |
|
|
logger.info("Installing L2CS-Net...") |
|
|
try: |
|
|
subprocess.run( |
|
|
['pip', 'install', '-q', 'git+https://github.com/edavalosanaya/L2CS-Net.git@main'], |
|
|
check=True, |
|
|
capture_output=True |
|
|
) |
|
|
logger.info("✓ L2CS-Net installed") |
|
|
except Exception as e: |
|
|
logger.warning(f"L2CS-Net installation failed: {e}") |
|
|
|
|
|
logger.info("Loading L2CS-Net...") |
|
|
from l2cs import Pipeline |
|
|
|
|
|
pipeline = Pipeline( |
|
|
weights='L2CSNet_gaze360.pkl', |
|
|
arch='ResNet50', |
|
|
device=device |
|
|
) |
|
|
logger.info("✓ L2CS-Net loaded") |
|
|
|
|
|
return pipeline |
|
|
|
|
|
|
|
|
def make_square_bbox_with_padding(bbox, img_width, img_height, padding=0.15): |
|
|
"""Convert bbox to square with padding for gaze estimation""" |
|
|
x1, y1, x2, y2 = bbox |
|
|
w = x2 - x1 |
|
|
h = y2 - y1 |
|
|
|
|
|
|
|
|
size = max(w, h) |
|
|
cx = (x1 + x2) / 2 |
|
|
cy = (y1 + y2) / 2 |
|
|
|
|
|
|
|
|
size = size * (1 + padding) |
|
|
|
|
|
|
|
|
x1_sq = max(0, int(cx - size / 2)) |
|
|
y1_sq = max(0, int(cy - size / 2)) |
|
|
x2_sq = min(img_width, int(cx + size / 2)) |
|
|
y2_sq = min(img_height, int(cy + size / 2)) |
|
|
|
|
|
return [x1_sq, y1_sq, x2_sq, y2_sq] |
|
|
|
|
|
|
|
|
def has_valid_eyes(keypoints_2d, keypoints_3d, img_width, img_height): |
|
|
"""Check if human has valid, visible eyes for gaze estimation""" |
|
|
if keypoints_2d is None or keypoints_3d is None: |
|
|
return False |
|
|
|
|
|
kpts2d_arr = np.array(keypoints_2d) |
|
|
kpts3d_arr = np.array(keypoints_3d) |
|
|
|
|
|
if len(kpts2d_arr) < 3 or len(kpts3d_arr) < 3: |
|
|
return False |
|
|
|
|
|
|
|
|
left_eye_2d = kpts2d_arr[1] |
|
|
right_eye_2d = kpts2d_arr[2] |
|
|
left_eye_3d = kpts3d_arr[1] |
|
|
right_eye_3d = kpts3d_arr[2] |
|
|
|
|
|
|
|
|
eyes_valid_3d = (np.linalg.norm(left_eye_3d) > 1e-6 and |
|
|
np.linalg.norm(right_eye_3d) > 1e-6) |
|
|
|
|
|
if not eyes_valid_3d: |
|
|
return False |
|
|
|
|
|
|
|
|
for kp in [left_eye_2d, right_eye_2d]: |
|
|
if (kp[0] < 0 or kp[0] >= img_width or |
|
|
kp[1] < 0 or kp[1] >= img_height): |
|
|
return False |
|
|
|
|
|
return True |
|
|
|
|
|
|
|
|
def estimate_gaze_batch(pipeline, image_bgr, bboxes, img_width, img_height): |
|
|
"""Run L2CS once on full image and match results to face bboxes""" |
|
|
try: |
|
|
|
|
|
square_bboxes = [] |
|
|
for bbox in bboxes: |
|
|
if bbox is not None: |
|
|
square_bbox = make_square_bbox_with_padding(bbox, img_width, img_height, padding=0.15) |
|
|
square_bboxes.append(square_bbox) |
|
|
else: |
|
|
square_bboxes.append(None) |
|
|
|
|
|
detections = pipeline.step(image_bgr) |
|
|
if not detections: |
|
|
return [None] * len(bboxes) |
|
|
|
|
|
results = [] |
|
|
for square_bbox in square_bboxes: |
|
|
if square_bbox is None: |
|
|
results.append(None) |
|
|
continue |
|
|
|
|
|
x1, y1, x2, y2 = square_bbox |
|
|
bbox_center = np.array([(x1 + x2) / 2, (y1 + y2) / 2]) |
|
|
|
|
|
best_result = None |
|
|
min_dist = float('inf') |
|
|
|
|
|
for det in detections: |
|
|
face_bbox = det.get('bbox') |
|
|
if face_bbox is None: |
|
|
continue |
|
|
fx1, fy1, fx2, fy2 = face_bbox |
|
|
face_center = np.array([(fx1 + fx2) / 2, (fy1 + fy2) / 2]) |
|
|
dist = np.linalg.norm(bbox_center - face_center) |
|
|
|
|
|
if dist < min_dist: |
|
|
min_dist = dist |
|
|
best_result = det |
|
|
|
|
|
if best_result is not None: |
|
|
results.append({ |
|
|
'pitch': float(best_result.get('pitch', 0)), |
|
|
'yaw': float(best_result.get('yaw', 0)) |
|
|
}) |
|
|
else: |
|
|
results.append(None) |
|
|
|
|
|
return results |
|
|
except Exception as e: |
|
|
logger.error(f"Gaze estimation failed: {e}") |
|
|
return [None] * len(bboxes) |
|
|
|
|
|
|
|
|
def process_batch(batch, sam3d_dataset): |
|
|
"""Process batch of images - join with SAM3D results to get bboxes""" |
|
|
images = batch['image'] |
|
|
image_paths = batch.get('image_path', [f'img_{i:06d}' for i in range(len(images))]) |
|
|
|
|
|
results_list = [] |
|
|
|
|
|
for idx, image_pil in enumerate(images): |
|
|
image_id = Path(image_paths[idx]).stem if image_paths[idx] else f'img_{idx:06d}' |
|
|
img_width, img_height = image_pil.size |
|
|
|
|
|
|
|
|
sam3d_row = sam3d_dataset.filter(lambda x: x['image_id'] == image_id).take(1) |
|
|
sam3d_row = list(sam3d_row) |
|
|
|
|
|
if not sam3d_row or not sam3d_row[0]['sam3d_data']: |
|
|
results_list.append({ |
|
|
'image_id': image_id, |
|
|
'gaze_data': None |
|
|
}) |
|
|
continue |
|
|
|
|
|
humans_data = json.loads(sam3d_row[0]['sam3d_data']) |
|
|
|
|
|
|
|
|
image_rgb = np.array(image_pil.convert('RGB')) |
|
|
image_bgr = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2BGR) |
|
|
|
|
|
|
|
|
bboxes = [] |
|
|
for human in humans_data: |
|
|
bbox = human.get('bbox') |
|
|
kpts2d = human.get('keypoints_2d') |
|
|
kpts3d = human.get('keypoints_3d') |
|
|
|
|
|
|
|
|
if has_valid_eyes(kpts2d, kpts3d, img_width, img_height) and bbox is not None: |
|
|
bboxes.append(bbox) |
|
|
else: |
|
|
bboxes.append(None) |
|
|
|
|
|
|
|
|
gaze_results = estimate_gaze_batch(gaze_pipeline, image_bgr, bboxes, img_width, img_height) |
|
|
|
|
|
results_list.append({ |
|
|
'image_id': image_id, |
|
|
'gaze_data': json.dumps(gaze_results) if any(g is not None for g in gaze_results) else None |
|
|
}) |
|
|
|
|
|
return { |
|
|
'image_id': [r['image_id'] for r in results_list], |
|
|
'gaze_directions': [r['gaze_data'] for r in results_list] |
|
|
} |
|
|
|
|
|
|
|
|
def main(): |
|
|
global gaze_pipeline |
|
|
|
|
|
logger.info("="*60) |
|
|
logger.info("Gaze Estimation (L2CS-Net)") |
|
|
logger.info("="*60) |
|
|
|
|
|
ap = argparse.ArgumentParser() |
|
|
ap.add_argument('--input-dataset', type=str, required=True, help='Original images') |
|
|
ap.add_argument('--sam3d-dataset', type=str, required=True, help='SAM3D outputs with bboxes') |
|
|
ap.add_argument('--output-dataset', type=str, required=True) |
|
|
ap.add_argument('--split', type=str, default='train') |
|
|
ap.add_argument('--batch-size', type=int, default=4) |
|
|
ap.add_argument('--shard-index', type=int, default=0) |
|
|
ap.add_argument('--num-shards', type=int, default=1) |
|
|
args = ap.parse_args() |
|
|
|
|
|
logger.info(f"Arguments: {vars(args)}") |
|
|
|
|
|
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
|
|
logger.info(f"Using device: {device}") |
|
|
|
|
|
|
|
|
gaze_pipeline = init_gaze_estimator(device) |
|
|
|
|
|
|
|
|
logger.info(f"Loading SAM3D results from {args.sam3d_dataset}...") |
|
|
sam3d_ds = load_dataset(args.sam3d_dataset, split=args.split, streaming=True) |
|
|
|
|
|
|
|
|
logger.info(f"Loading images from {args.input_dataset}...") |
|
|
ds = load_dataset(args.input_dataset, split=args.split, streaming=True) |
|
|
|
|
|
if args.num_shards > 1: |
|
|
ds = ds.shard(num_shards=args.num_shards, index=args.shard_index) |
|
|
sam3d_ds = sam3d_ds.shard(num_shards=args.num_shards, index=args.shard_index) |
|
|
logger.info(f"Using shard {args.shard_index+1}/{args.num_shards}") |
|
|
|
|
|
|
|
|
logger.info(f"Processing with batch_size={args.batch_size}") |
|
|
|
|
|
from functools import partial |
|
|
process_fn = partial(process_batch, sam3d_dataset=sam3d_ds) |
|
|
|
|
|
processed_ds = ds.map( |
|
|
process_fn, |
|
|
batched=True, |
|
|
batch_size=args.batch_size, |
|
|
remove_columns=ds.column_names |
|
|
) |
|
|
|
|
|
|
|
|
results = [] |
|
|
for batch_idx, item in enumerate(processed_ds): |
|
|
results.append(item) |
|
|
|
|
|
if (batch_idx + 1) % 50 == 0: |
|
|
logger.info(f"Processed {batch_idx + 1} images") |
|
|
|
|
|
logger.info(f"✓ Processed {len(results)} images") |
|
|
|
|
|
|
|
|
features = Features({ |
|
|
'image_id': Value('string'), |
|
|
'gaze_directions': Value('string') |
|
|
}) |
|
|
|
|
|
output_ds = HFDataset.from_dict({ |
|
|
'image_id': [r['image_id'] for r in results], |
|
|
'gaze_directions': [r['gaze_directions'] for r in results] |
|
|
}, features=features) |
|
|
|
|
|
|
|
|
logger.info(f"Uploading to {args.output_dataset}...") |
|
|
output_ds.push_to_hub( |
|
|
args.output_dataset, |
|
|
split=args.split, |
|
|
token=os.environ.get('HF_TOKEN'), |
|
|
private=True |
|
|
) |
|
|
logger.info("✓ Upload complete") |
|
|
|
|
|
|
|
|
if __name__ == '__main__': |
|
|
main() |
|
|
|