#!/usr/bin/env python3 """ SAM 3D Body Inference Job - Extract 3D pose and keypoints Outputs: Vertices, keypoints 2D/3D, camera params, bboxes """ import argparse import os from pathlib import Path import warnings warnings.filterwarnings('ignore') import logging import sys logging.basicConfig( level=logging.INFO, format='[%(asctime)s] %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S', stream=sys.stdout, force=True ) logger = logging.getLogger(__name__) import numpy as np import torch from datasets import load_dataset from huggingface_hub import HfApi from PIL import Image import cv2 import json import time # SAM 3D Body imports sam_repo = Path(__file__).parent.parent / "sam-3d-body" if str(sam_repo) not in sys.path: sys.path.insert(0, str(sam_repo)) from sam_3d_body import load_sam_3d_body, SAM3DBodyEstimator os.environ['PYOPENGL_PLATFORM'] = 'osmesa' def process_batch(batch): """Process batch of images with SAM 3D Body""" images = batch['image'] image_paths = batch.get('image_path', [f'img_{i:06d}' for i in range(len(images))]) results_list = [] for idx, image_pil in enumerate(images): image_id = Path(image_paths[idx]).stem if image_paths[idx] else f'img_{idx:06d}' img_width, img_height = image_pil.size # Convert to BGR image_rgb = np.array(image_pil.convert('RGB')) image_bgr = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2BGR) # Process with SAM 3D Body with torch.inference_mode(): outputs = teacher.process_one_image(image_bgr) if not outputs: results_list.append({ 'image_id': image_id, 'num_humans': 0, 'data': None }) continue # Collect all humans data humans_data = [] for human_idx, pred in enumerate(outputs): human_data = { 'vertices': pred.get('pred_vertices').astype(np.float32).tolist() if pred.get('pred_vertices') is not None else None, 'cam_t': pred.get('pred_cam_t').astype(np.float32).tolist() if pred.get('pred_cam_t') is not None else None, 'focal_length': float(pred.get('focal_length')) if pred.get('focal_length') is not None else None, 'keypoints_2d': pred.get('pred_keypoints_2d').astype(np.float32).tolist() if pred.get('pred_keypoints_2d') is not None else None, 'keypoints_3d': pred.get('pred_keypoints_3d').astype(np.float32).tolist() if pred.get('pred_keypoints_3d') is not None else None, 'bbox': pred.get('bbox').tolist() if pred.get('bbox') is not None else None } humans_data.append(human_data) results_list.append({ 'image_id': image_id, 'num_humans': len(humans_data), 'data': json.dumps(humans_data) }) return { 'image_id': [r['image_id'] for r in results_list], 'num_humans': [r['num_humans'] for r in results_list], 'sam3d_data': [r['data'] for r in results_list] } def main(): global teacher logger.info("="*60) logger.info("SAM 3D Body Inference") logger.info("="*60) ap = argparse.ArgumentParser() ap.add_argument('--input-dataset', type=str, required=True) ap.add_argument('--output-dataset', type=str, required=True) ap.add_argument('--split', type=str, default='train') ap.add_argument('--checkpoint', type=str, default='checkpoints/sam-3d-body-dinov3/model.ckpt') ap.add_argument('--mhr-path', type=str, default='checkpoints/sam-3d-body-dinov3/assets/mhr_model.pt') ap.add_argument('--batch-size', type=int, default=4) ap.add_argument('--shard-index', type=int, default=0) ap.add_argument('--num-shards', type=int, default=1) ap.add_argument('--max-images', type=int, default=8000, help='Limit number of images processed per shard') ap.add_argument('--upload-interval', type=int, default=500, help='Upload partial results every N images') args = ap.parse_args() logger.info(f"Arguments: {vars(args)}") device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') logger.info(f"Using device: {device}") # Load model logger.info("Loading SAM 3D Body...") model, model_cfg = load_sam_3d_body(args.checkpoint, device=device, mhr_path=args.mhr_path) model.eval() teacher = SAM3DBodyEstimator( sam_3d_body_model=model, model_cfg=model_cfg, human_detector=None, human_segmentor=None, fov_estimator=None, ) logger.info("✓ Model loaded") # Load dataset logger.info(f"Loading dataset {args.input_dataset}...") ds = load_dataset(args.input_dataset, split=args.split, streaming=True) if args.num_shards > 1: ds = ds.shard(num_shards=args.num_shards, index=args.shard_index) logger.info(f"Using shard {args.shard_index+1}/{args.num_shards}") # Prepare incremental upload api = HfApi(token=os.environ.get('HF_TOKEN')) token = os.environ.get('HF_TOKEN') repo_id = args.output_dataset # Load existing image_ids (skip logic) existing_ids = set() try: from datasets import load_dataset as _ld existing = _ld(repo_id, split=args.split) for r in existing: existing_ids.add(r['image_id']) logger.info(f"Loaded {len(existing_ids)} existing image_ids to skip") except Exception: logger.info("No existing output dataset found; starting fresh") # Processing loop with manual batching logger.info(f"Processing with batch_size={args.batch_size}, max_images={args.max_images}, upload_interval={args.upload_interval}") buffer = [] # list of dicts {image_id,num_humans,sam3d_data} total_processed = 0 upload_index = 0 batch_images = [] batch_paths = [] def flush_buffer(): nonlocal buffer, upload_index if not buffer: return import pyarrow as pa import pyarrow.parquet as pq # Build columns image_ids = [b['image_id'] for b in buffer] num_humans = [b['num_humans'] for b in buffer] sam3d_data = [b['sam3d_data'] for b in buffer] table = pa.table({'image_id': image_ids, 'num_humans': num_humans, 'sam3d_data': sam3d_data}) file_name = f"batch-sh{args.shard_index}-u{upload_index:04d}.parquet" local_dir = Path('sam3d_batches') local_dir.mkdir(parents=True, exist_ok=True) local_path = local_dir / file_name pq.write_table(table, local_path) path_in_repo = f"data/{file_name}" logger.info(f"Uploading incremental batch {upload_index} with {len(buffer)} images -> {path_in_repo}") try: api.upload_file(path_or_fileobj=str(local_path), path_in_repo=path_in_repo, repo_id=repo_id, repo_type='dataset', token=token) logger.info("✓ Incremental upload committed") except Exception as e: logger.error(f"Incremental upload failed: {e}") buffer.clear() upload_index += 1 # Iterate streaming dataset manually current_batch_imgs = [] current_batch_paths = [] for idx, sample in enumerate(ds): if idx >= args.max_images: break image = sample['image'] image_path = sample.get('image_path', None) image_id = Path(image_path).stem if image_path else f"img_{idx:06d}" if image_id in existing_ids: continue current_batch_imgs.append(image) current_batch_paths.append(image_path) # When batch full, process if len(current_batch_imgs) == args.batch_size: batch = {'image': current_batch_imgs, 'image_path': current_batch_paths} batch_result = process_batch(batch) for i in range(len(batch_result['image_id'])): buffer.append({ 'image_id': batch_result['image_id'][i], 'num_humans': batch_result['num_humans'][i], 'sam3d_data': batch_result['sam3d_data'][i] }) existing_ids.add(batch_result['image_id'][i]) total_processed += len(batch_result['image_id']) current_batch_imgs = [] current_batch_paths = [] if total_processed % 50 == 0: logger.info(f"Processed {total_processed} images") if total_processed % args.upload_interval == 0: flush_buffer() # Process any leftover if current_batch_imgs: batch = {'image': current_batch_imgs, 'image_path': current_batch_paths} batch_result = process_batch(batch) for i in range(len(batch_result['image_id'])): buffer.append({ 'image_id': batch_result['image_id'][i], 'num_humans': batch_result['num_humans'][i], 'sam3d_data': batch_result['sam3d_data'][i] }) existing_ids.add(batch_result['image_id'][i]) total_processed += len(batch_result['image_id']) # Final flush flush_buffer() logger.info(f"✓ Finished shard processing with total images processed: {total_processed}") logger.info("All incremental uploads done.") if __name__ == '__main__': main()