davanstrien
HF Staff
Enhance input processing by moving tensors to the model's device and converting to the appropriate dtype
b5861ea
| #!/usr/bin/env python3 | |
| # /// script | |
| # requires-python = ">=3.10" | |
| # dependencies = [ | |
| # "transformers@git+https://github.com/huggingface/transformers.git@1fba72361e8e0e865d569f7cd15e5aa50b41ac9a", | |
| # "datasets", | |
| # "huggingface-hub", | |
| # "pillow", | |
| # "tqdm", | |
| # "torchvision", | |
| # "accelerate", | |
| # ] | |
| # /// | |
| """ | |
| Detect objects in images using Meta's SAM3 (Segment Anything Model 3). | |
| This script processes images from a HuggingFace dataset and detects objects | |
| based on text prompts, outputting bounding boxes in HuggingFace object detection format. | |
| Examples: | |
| # Detect photographs in historical newspapers | |
| uv run detect-objects.py \\ | |
| davanstrien/newspapers-with-images-after-photography \\ | |
| my-username/newspapers-detected \\ | |
| --classes photograph | |
| # Detect multiple object types | |
| uv run detect-objects.py \\ | |
| my-dataset \\ | |
| my-output \\ | |
| --classes "photograph,illustration,headline" \\ | |
| --confidence-threshold 0.7 | |
| # Test on small subset | |
| uv run detect-objects.py input output \\ | |
| --classes photo \\ | |
| --max-samples 10 | |
| # Run on HF Jobs with L4 GPU | |
| hfjobs run --flavor l4x1 \\ | |
| -e HF_TOKEN=$HF_TOKEN \\ | |
| ghcr.io/astral-sh/uv:latest \\ | |
| /bin/bash -c "uv run https://huggingface.co/datasets/uv-scripts/sam3/raw/main/detect-objects.py \\ | |
| input-dataset output-dataset --classes 'photo,illustration'" | |
| Performance: | |
| - L4 GPU: ~2-4 images/sec (depending on image size and batch size) | |
| - Memory: ~8-12 GB VRAM | |
| - Recommended batch size: 4-8 for L4, 8-16 for A10 | |
| """ | |
| import argparse | |
| import logging | |
| import os | |
| import sys | |
| from typing import List, Dict, Any | |
| import torch | |
| from PIL import Image | |
| from datasets import load_dataset, Dataset, Features, Sequence, Value, ClassLabel | |
| from datasets import Image as ImageFeature | |
| from huggingface_hub import HfApi, login | |
| from tqdm.auto import tqdm | |
| from transformers import Sam3Processor, Sam3Model | |
| # Configure logging | |
| logging.basicConfig( | |
| level=logging.INFO, | |
| format="%(asctime)s - %(levelname)s - %(message)s", | |
| datefmt="%H:%M:%S", | |
| ) | |
| logger = logging.getLogger(__name__) | |
| # GPU availability check | |
| if not torch.cuda.is_available(): | |
| logger.error("β CUDA is not available. This script requires a GPU.") | |
| logger.error("For local testing, ensure you have a CUDA-capable GPU.") | |
| logger.error("For cloud execution, use HF Jobs with --flavor l4x1 or similar.") | |
| sys.exit(1) | |
| def parse_args(): | |
| """Parse command line arguments.""" | |
| parser = argparse.ArgumentParser( | |
| description="Detect objects in images using SAM3", | |
| formatter_class=argparse.RawDescriptionHelpFormatter, | |
| epilog=__doc__, | |
| ) | |
| # Required arguments | |
| parser.add_argument( | |
| "input_dataset", help="Input HuggingFace dataset ID (e.g., 'username/dataset')" | |
| ) | |
| parser.add_argument( | |
| "output_dataset", help="Output HuggingFace dataset ID (e.g., 'username/output')" | |
| ) | |
| # Object detection configuration | |
| parser.add_argument( | |
| "--classes", | |
| required=True, | |
| help="Comma-separated list of object classes to detect (e.g., 'photograph,illustration,diagram')", | |
| ) | |
| parser.add_argument( | |
| "--confidence-threshold", | |
| type=float, | |
| default=0.5, | |
| help="Minimum confidence score for detections (default: 0.5)", | |
| ) | |
| parser.add_argument( | |
| "--mask-threshold", | |
| type=float, | |
| default=0.5, | |
| help="Threshold for mask generation (default: 0.5)", | |
| ) | |
| # Dataset configuration | |
| parser.add_argument( | |
| "--image-column", | |
| default="image", | |
| help="Name of the column containing images (default: 'image')", | |
| ) | |
| parser.add_argument( | |
| "--split", default="train", help="Dataset split to process (default: 'train')" | |
| ) | |
| parser.add_argument( | |
| "--max-samples", | |
| type=int, | |
| default=None, | |
| help="Maximum number of samples to process (for testing)", | |
| ) | |
| parser.add_argument( | |
| "--shuffle", action="store_true", help="Shuffle dataset before processing" | |
| ) | |
| # Processing configuration | |
| parser.add_argument( | |
| "--batch-size", | |
| type=int, | |
| default=4, | |
| help="Batch size for processing (default: 4)", | |
| ) | |
| parser.add_argument( | |
| "--model", | |
| default="facebook/sam3", | |
| help="SAM3 model ID (default: 'facebook/sam3')", | |
| ) | |
| parser.add_argument( | |
| "--dtype", | |
| default="bfloat16", | |
| choices=["float32", "float16", "bfloat16"], | |
| help="Model precision (default: 'bfloat16')", | |
| ) | |
| # Output configuration | |
| parser.add_argument( | |
| "--private", action="store_true", help="Make output dataset private" | |
| ) | |
| parser.add_argument( | |
| "--hf-token", | |
| default=None, | |
| help="HuggingFace token (default: uses HF_TOKEN env var or cached token)", | |
| ) | |
| return parser.parse_args() | |
| def load_and_validate_dataset( | |
| dataset_id: str, | |
| split: str, | |
| image_column: str, | |
| max_samples: int = None, | |
| shuffle: bool = False, | |
| hf_token: str = None, | |
| ) -> Dataset: | |
| """Load dataset and validate it has the required image column.""" | |
| logger.info(f"π Loading dataset: {dataset_id} (split: {split})") | |
| try: | |
| dataset = load_dataset(dataset_id, split=split, token=hf_token) | |
| except Exception as e: | |
| logger.error(f"Failed to load dataset '{dataset_id}': {e}") | |
| sys.exit(1) | |
| # Validate image column exists | |
| if image_column not in dataset.column_names: | |
| logger.error(f"Column '{image_column}' not found in dataset") | |
| logger.error(f"Available columns: {dataset.column_names}") | |
| sys.exit(1) | |
| # Shuffle if requested | |
| if shuffle: | |
| logger.info("π Shuffling dataset") | |
| dataset = dataset.shuffle() | |
| # Limit samples if requested | |
| if max_samples is not None: | |
| logger.info(f"π’ Limiting to {max_samples} samples") | |
| dataset = dataset.select(range(min(max_samples, len(dataset)))) | |
| logger.info(f"β Loaded {len(dataset)} samples") | |
| return dataset | |
| def process_batch( | |
| batch: Dict[str, List[Any]], | |
| image_column: str, | |
| class_names: List[str], | |
| processor: Sam3Processor, | |
| model: Sam3Model, | |
| confidence_threshold: float, | |
| mask_threshold: float, | |
| ) -> Dict[str, List[List[Dict[str, Any]]]]: | |
| """Process a batch of images and return detections.""" | |
| images = batch[image_column] | |
| # Convert to PIL Images and ensure RGB | |
| pil_images = [] | |
| for img in images: | |
| if isinstance(img, str): | |
| img = Image.open(img) | |
| if img.mode == "L" or img.mode != "RGB": | |
| img = img.convert("RGB") | |
| pil_images.append(img) | |
| # Store original sizes for post-processing | |
| original_sizes = [(img.height, img.width) for img in pil_images] | |
| # Process batch through model | |
| try: | |
| inputs = processor( | |
| images=pil_images, | |
| text=class_names, # All class names as prompts | |
| return_tensors="pt", | |
| ) | |
| # Move to device and convert to model's dtype | |
| inputs = { | |
| k: v.to( | |
| model.device, | |
| dtype=model.dtype if v.dtype.is_floating_point else v.dtype, | |
| ) | |
| for k, v in inputs.items() | |
| } | |
| with torch.no_grad(): | |
| outputs = model(**inputs) | |
| # Post-process outputs | |
| results = processor.post_process_instance_segmentation( | |
| outputs, | |
| threshold=confidence_threshold, | |
| mask_threshold=mask_threshold, | |
| target_sizes=original_sizes, | |
| ) | |
| except Exception as e: | |
| logger.warning(f"β οΈ Failed to process batch: {e}") | |
| # Return empty detections for all images in batch | |
| return {"objects": [[] for _ in range(len(pil_images))]} | |
| # Convert to HuggingFace object detection format | |
| batch_objects = [] | |
| for result in results: | |
| boxes = result.get("boxes", torch.tensor([])) | |
| scores = result.get("scores", torch.tensor([])) | |
| labels = result.get("labels", torch.tensor([])) | |
| # Handle empty results | |
| if len(boxes) == 0: | |
| batch_objects.append([]) | |
| continue | |
| # Build list of detections | |
| detections = [] | |
| for box, score, label_idx in zip( | |
| boxes.cpu().numpy(), scores.cpu().numpy(), labels.cpu().numpy() | |
| ): | |
| x1, y1, x2, y2 = box | |
| width = x2 - x1 | |
| height = y2 - y1 | |
| detection = { | |
| "bbox": [float(x1), float(y1), float(width), float(height)], | |
| "category": int(label_idx), # Index into class_names | |
| "score": float(score), | |
| } | |
| detections.append(detection) | |
| batch_objects.append(detections) | |
| return {"objects": batch_objects} | |
| def main(): | |
| args = parse_args() | |
| # Parse class names | |
| class_names = [name.strip() for name in args.classes.split(",")] | |
| if not class_names or not all(class_names): | |
| logger.error( | |
| "β Invalid --classes argument. Provide comma-separated class names." | |
| ) | |
| sys.exit(1) | |
| logger.info("π SAM3 Object Detection") | |
| logger.info(f" Input: {args.input_dataset}") | |
| logger.info(f" Output: {args.output_dataset}") | |
| logger.info(f" Classes: {class_names}") | |
| logger.info(f" Confidence threshold: {args.confidence_threshold}") | |
| logger.info(f" Batch size: {args.batch_size}") | |
| # Authentication | |
| if args.hf_token: | |
| login(token=args.hf_token) | |
| elif os.getenv("HF_TOKEN"): | |
| login(token=os.getenv("HF_TOKEN")) | |
| # Load dataset | |
| dataset = load_and_validate_dataset( | |
| args.input_dataset, | |
| args.split, | |
| args.image_column, | |
| args.max_samples, | |
| args.shuffle, | |
| args.hf_token, | |
| ) | |
| # Load model | |
| logger.info(f"π€ Loading SAM3 model: {args.model}") | |
| try: | |
| processor = Sam3Processor.from_pretrained(args.model) | |
| model = Sam3Model.from_pretrained( | |
| args.model, torch_dtype=getattr(torch, args.dtype), device_map="auto" | |
| ) | |
| logger.info(f"β Model loaded on {model.device}") | |
| except Exception as e: | |
| logger.error(f"β Failed to load model: {e}") | |
| logger.error("Ensure the model exists and you have access permissions") | |
| sys.exit(1) | |
| # Process dataset | |
| logger.info("π Processing images...") | |
| processed_dataset = dataset.map( | |
| lambda batch: process_batch( | |
| batch, | |
| args.image_column, | |
| class_names, | |
| processor, | |
| model, | |
| args.confidence_threshold, | |
| args.mask_threshold, | |
| ), | |
| batched=True, | |
| batch_size=args.batch_size, | |
| desc="Detecting objects", | |
| ) | |
| # Create dynamic features with ClassLabel | |
| logger.info("π Creating output schema...") | |
| new_features = processed_dataset.features.copy() | |
| new_features["objects"] = Sequence( | |
| { | |
| "bbox": Sequence(Value("float32"), length=4), | |
| "category": ClassLabel(names=class_names), | |
| "score": Value("float32"), | |
| } | |
| ) | |
| # Cast to proper types | |
| processed_dataset = processed_dataset.cast(new_features) | |
| # Calculate statistics | |
| total_detections = sum(len(objs) for objs in processed_dataset["objects"]) | |
| images_with_detections = sum(len(objs) > 0 for objs in processed_dataset["objects"]) | |
| logger.info("β Detection complete!") | |
| logger.info(f" Total detections: {total_detections}") | |
| logger.info( | |
| f" Images with detections: {images_with_detections}/{len(processed_dataset)}" | |
| ) | |
| logger.info( | |
| f" Average detections per image: {total_detections / len(processed_dataset):.2f}" | |
| ) | |
| # Push to hub | |
| logger.info(f"π€ Pushing to HuggingFace Hub: {args.output_dataset}") | |
| try: | |
| processed_dataset.push_to_hub(args.output_dataset, private=args.private) | |
| logger.info( | |
| f"β Dataset available at: https://huggingface.co/datasets/{args.output_dataset}" | |
| ) | |
| except Exception as e: | |
| logger.error(f"β Failed to push to hub: {e}") | |
| logger.info("πΎ Saving locally as backup...") | |
| processed_dataset.save_to_disk("./output_dataset") | |
| logger.info("β Saved to ./output_dataset") | |
| sys.exit(1) | |
| if __name__ == "__main__": | |
| main() | |