davanstrien HF Staff commited on
Commit
213c8b6
Β·
1 Parent(s): a79963f
Files changed (1) hide show
  1. detect-objects.py +400 -0
detect-objects.py ADDED
@@ -0,0 +1,400 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # /// script
3
+ # requires-python = ">=3.10"
4
+ # dependencies = [
5
+ # "transformers@git+https://github.com/huggingface/transformers.git@1fba72361e8e0e865d569f7cd15e5aa50b41ac9a",
6
+ # "datasets",
7
+ # "huggingface-hub",
8
+ # "pillow",
9
+ # "tqdm",
10
+ # "torchvision",
11
+ # ]
12
+ # ///
13
+
14
+ """
15
+ Detect objects in images using Meta's SAM3 (Segment Anything Model 3).
16
+
17
+ This script processes images from a HuggingFace dataset and detects objects
18
+ based on text prompts, outputting bounding boxes in HuggingFace object detection format.
19
+
20
+ Examples:
21
+ # Detect photographs in historical newspapers
22
+ uv run detect-objects.py \\
23
+ davanstrien/newspapers-with-images-after-photography \\
24
+ my-username/newspapers-detected \\
25
+ --classes photograph
26
+
27
+ # Detect multiple object types
28
+ uv run detect-objects.py \\
29
+ my-dataset \\
30
+ my-output \\
31
+ --classes "photograph,illustration,headline" \\
32
+ --confidence-threshold 0.7
33
+
34
+ # Test on small subset
35
+ uv run detect-objects.py input output \\
36
+ --classes photo \\
37
+ --max-samples 10
38
+
39
+ # Run on HF Jobs with L4 GPU
40
+ hfjobs run --flavor l4x1 \\
41
+ -e HF_TOKEN=$HF_TOKEN \\
42
+ ghcr.io/astral-sh/uv:latest \\
43
+ /bin/bash -c "uv run https://huggingface.co/datasets/uv-scripts/sam3/raw/main/detect-objects.py \\
44
+ input-dataset output-dataset --classes 'photo,illustration'"
45
+
46
+ Performance:
47
+ - L4 GPU: ~2-4 images/sec (depending on image size and batch size)
48
+ - Memory: ~8-12 GB VRAM
49
+ - Recommended batch size: 4-8 for L4, 8-16 for A10
50
+ """
51
+
52
+ import argparse
53
+ import logging
54
+ import os
55
+ import sys
56
+ from typing import List, Dict, Any
57
+
58
+ import torch
59
+ from PIL import Image
60
+ from datasets import load_dataset, Dataset, Features, Sequence, Value, ClassLabel
61
+ from datasets import Image as ImageFeature
62
+ from huggingface_hub import HfApi, login
63
+ from tqdm.auto import tqdm
64
+ from transformers import Sam3Processor, Sam3Model
65
+
66
+ # Configure logging
67
+ logging.basicConfig(
68
+ level=logging.INFO,
69
+ format='%(asctime)s - %(levelname)s - %(message)s',
70
+ datefmt='%H:%M:%S'
71
+ )
72
+ logger = logging.getLogger(__name__)
73
+
74
+ # GPU availability check
75
+ if not torch.cuda.is_available():
76
+ logger.error("❌ CUDA is not available. This script requires a GPU.")
77
+ logger.error("For local testing, ensure you have a CUDA-capable GPU.")
78
+ logger.error("For cloud execution, use HF Jobs with --flavor l4x1 or similar.")
79
+ sys.exit(1)
80
+
81
+
82
+ def parse_args():
83
+ """Parse command line arguments."""
84
+ parser = argparse.ArgumentParser(
85
+ description="Detect objects in images using SAM3",
86
+ formatter_class=argparse.RawDescriptionHelpFormatter,
87
+ epilog=__doc__
88
+ )
89
+
90
+ # Required arguments
91
+ parser.add_argument(
92
+ "input_dataset",
93
+ help="Input HuggingFace dataset ID (e.g., 'username/dataset')"
94
+ )
95
+ parser.add_argument(
96
+ "output_dataset",
97
+ help="Output HuggingFace dataset ID (e.g., 'username/output')"
98
+ )
99
+
100
+ # Object detection configuration
101
+ parser.add_argument(
102
+ "--classes",
103
+ required=True,
104
+ help="Comma-separated list of object classes to detect (e.g., 'photograph,illustration,diagram')"
105
+ )
106
+ parser.add_argument(
107
+ "--confidence-threshold",
108
+ type=float,
109
+ default=0.5,
110
+ help="Minimum confidence score for detections (default: 0.5)"
111
+ )
112
+ parser.add_argument(
113
+ "--mask-threshold",
114
+ type=float,
115
+ default=0.5,
116
+ help="Threshold for mask generation (default: 0.5)"
117
+ )
118
+
119
+ # Dataset configuration
120
+ parser.add_argument(
121
+ "--image-column",
122
+ default="image",
123
+ help="Name of the column containing images (default: 'image')"
124
+ )
125
+ parser.add_argument(
126
+ "--split",
127
+ default="train",
128
+ help="Dataset split to process (default: 'train')"
129
+ )
130
+ parser.add_argument(
131
+ "--max-samples",
132
+ type=int,
133
+ default=None,
134
+ help="Maximum number of samples to process (for testing)"
135
+ )
136
+ parser.add_argument(
137
+ "--shuffle",
138
+ action="store_true",
139
+ help="Shuffle dataset before processing"
140
+ )
141
+
142
+ # Processing configuration
143
+ parser.add_argument(
144
+ "--batch-size",
145
+ type=int,
146
+ default=4,
147
+ help="Batch size for processing (default: 4)"
148
+ )
149
+ parser.add_argument(
150
+ "--model",
151
+ default="facebook/sam3",
152
+ help="SAM3 model ID (default: 'facebook/sam3')"
153
+ )
154
+ parser.add_argument(
155
+ "--dtype",
156
+ default="bfloat16",
157
+ choices=["float32", "float16", "bfloat16"],
158
+ help="Model precision (default: 'bfloat16')"
159
+ )
160
+
161
+ # Output configuration
162
+ parser.add_argument(
163
+ "--private",
164
+ action="store_true",
165
+ help="Make output dataset private"
166
+ )
167
+ parser.add_argument(
168
+ "--hf-token",
169
+ default=None,
170
+ help="HuggingFace token (default: uses HF_TOKEN env var or cached token)"
171
+ )
172
+
173
+ return parser.parse_args()
174
+
175
+
176
+ def load_and_validate_dataset(
177
+ dataset_id: str,
178
+ split: str,
179
+ image_column: str,
180
+ max_samples: int = None,
181
+ shuffle: bool = False,
182
+ hf_token: str = None
183
+ ) -> Dataset:
184
+ """Load dataset and validate it has the required image column."""
185
+ logger.info(f"πŸ“‚ Loading dataset: {dataset_id} (split: {split})")
186
+
187
+ try:
188
+ dataset = load_dataset(dataset_id, split=split, token=hf_token)
189
+ except Exception as e:
190
+ logger.error(f"Failed to load dataset '{dataset_id}': {e}")
191
+ sys.exit(1)
192
+
193
+ # Validate image column exists
194
+ if image_column not in dataset.column_names:
195
+ logger.error(f"Column '{image_column}' not found in dataset")
196
+ logger.error(f"Available columns: {dataset.column_names}")
197
+ sys.exit(1)
198
+
199
+ # Shuffle if requested
200
+ if shuffle:
201
+ logger.info("πŸ”€ Shuffling dataset")
202
+ dataset = dataset.shuffle()
203
+
204
+ # Limit samples if requested
205
+ if max_samples is not None:
206
+ logger.info(f"πŸ”’ Limiting to {max_samples} samples")
207
+ dataset = dataset.select(range(min(max_samples, len(dataset))))
208
+
209
+ logger.info(f"βœ… Loaded {len(dataset)} samples")
210
+ return dataset
211
+
212
+
213
+ def process_batch(
214
+ batch: Dict[str, List[Any]],
215
+ image_column: str,
216
+ class_names: List[str],
217
+ processor: Sam3Processor,
218
+ model: Sam3Model,
219
+ confidence_threshold: float,
220
+ mask_threshold: float
221
+ ) -> Dict[str, List[List[Dict[str, Any]]]]:
222
+ """Process a batch of images and return detections."""
223
+ images = batch[image_column]
224
+
225
+ # Convert to PIL Images and ensure RGB
226
+ pil_images = []
227
+ for img in images:
228
+ if isinstance(img, str):
229
+ img = Image.open(img)
230
+ if img.mode == "L":
231
+ img = img.convert("RGB")
232
+ elif img.mode != "RGB":
233
+ img = img.convert("RGB")
234
+ pil_images.append(img)
235
+
236
+ # Store original sizes for post-processing
237
+ original_sizes = [(img.height, img.width) for img in pil_images]
238
+
239
+ # Process batch through model
240
+ try:
241
+ inputs = processor(
242
+ images=pil_images,
243
+ text=class_names, # All class names as prompts
244
+ return_tensors="pt"
245
+ ).to(model.device)
246
+
247
+ with torch.no_grad():
248
+ outputs = model(**inputs)
249
+
250
+ # Post-process outputs
251
+ results = processor.post_process_instance_segmentation(
252
+ outputs,
253
+ threshold=confidence_threshold,
254
+ mask_threshold=mask_threshold,
255
+ target_sizes=original_sizes
256
+ )
257
+
258
+ except Exception as e:
259
+ logger.warning(f"⚠️ Failed to process batch: {e}")
260
+ # Return empty detections for all images in batch
261
+ return {"objects": [[] for _ in range(len(pil_images))]}
262
+
263
+ # Convert to HuggingFace object detection format
264
+ batch_objects = []
265
+ for result in results:
266
+ boxes = result.get('boxes', torch.tensor([]))
267
+ scores = result.get('scores', torch.tensor([]))
268
+ labels = result.get('labels', torch.tensor([]))
269
+
270
+ # Handle empty results
271
+ if len(boxes) == 0:
272
+ batch_objects.append([])
273
+ continue
274
+
275
+ # Build list of detections
276
+ detections = []
277
+ for box, score, label_idx in zip(
278
+ boxes.cpu().numpy(),
279
+ scores.cpu().numpy(),
280
+ labels.cpu().numpy()
281
+ ):
282
+ x1, y1, x2, y2 = box
283
+ width = x2 - x1
284
+ height = y2 - y1
285
+
286
+ detection = {
287
+ "bbox": [float(x1), float(y1), float(width), float(height)],
288
+ "category": int(label_idx), # Index into class_names
289
+ "score": float(score)
290
+ }
291
+ detections.append(detection)
292
+
293
+ batch_objects.append(detections)
294
+
295
+ return {"objects": batch_objects}
296
+
297
+
298
+ def main():
299
+ args = parse_args()
300
+
301
+ # Parse class names
302
+ class_names = [name.strip() for name in args.classes.split(',')]
303
+ if not class_names or any(not name for name in class_names):
304
+ logger.error("❌ Invalid --classes argument. Provide comma-separated class names.")
305
+ sys.exit(1)
306
+
307
+ logger.info("πŸš€ SAM3 Object Detection")
308
+ logger.info(f" Input: {args.input_dataset}")
309
+ logger.info(f" Output: {args.output_dataset}")
310
+ logger.info(f" Classes: {class_names}")
311
+ logger.info(f" Confidence threshold: {args.confidence_threshold}")
312
+ logger.info(f" Batch size: {args.batch_size}")
313
+
314
+ # Authentication
315
+ if args.hf_token:
316
+ login(token=args.hf_token)
317
+ elif os.getenv("HF_TOKEN"):
318
+ login(token=os.getenv("HF_TOKEN"))
319
+
320
+ # Load dataset
321
+ dataset = load_and_validate_dataset(
322
+ args.input_dataset,
323
+ args.split,
324
+ args.image_column,
325
+ args.max_samples,
326
+ args.shuffle,
327
+ args.hf_token
328
+ )
329
+
330
+ # Load model
331
+ logger.info(f"πŸ€– Loading SAM3 model: {args.model}")
332
+ try:
333
+ processor = Sam3Processor.from_pretrained(args.model)
334
+ model = Sam3Model.from_pretrained(
335
+ args.model,
336
+ torch_dtype=getattr(torch, args.dtype),
337
+ device_map="auto"
338
+ )
339
+ logger.info(f"βœ… Model loaded on {model.device}")
340
+ except Exception as e:
341
+ logger.error(f"❌ Failed to load model: {e}")
342
+ logger.error("Ensure the model exists and you have access permissions")
343
+ sys.exit(1)
344
+
345
+ # Process dataset
346
+ logger.info("πŸ” Processing images...")
347
+ processed_dataset = dataset.map(
348
+ lambda batch: process_batch(
349
+ batch,
350
+ args.image_column,
351
+ class_names,
352
+ processor,
353
+ model,
354
+ args.confidence_threshold,
355
+ args.mask_threshold
356
+ ),
357
+ batched=True,
358
+ batch_size=args.batch_size,
359
+ desc="Detecting objects"
360
+ )
361
+
362
+ # Create dynamic features with ClassLabel
363
+ logger.info("πŸ“Š Creating output schema...")
364
+ new_features = processed_dataset.features.copy()
365
+ new_features["objects"] = Sequence({
366
+ "bbox": Sequence(Value("float32"), length=4),
367
+ "category": ClassLabel(names=class_names),
368
+ "score": Value("float32")
369
+ })
370
+
371
+ # Cast to proper types
372
+ processed_dataset = processed_dataset.cast(new_features)
373
+
374
+ # Calculate statistics
375
+ total_detections = sum(len(objs) for objs in processed_dataset["objects"])
376
+ images_with_detections = sum(1 for objs in processed_dataset["objects"] if len(objs) > 0)
377
+
378
+ logger.info("βœ… Detection complete!")
379
+ logger.info(f" Total detections: {total_detections}")
380
+ logger.info(f" Images with detections: {images_with_detections}/{len(processed_dataset)}")
381
+ logger.info(f" Average detections per image: {total_detections/len(processed_dataset):.2f}")
382
+
383
+ # Push to hub
384
+ logger.info(f"πŸ“€ Pushing to HuggingFace Hub: {args.output_dataset}")
385
+ try:
386
+ processed_dataset.push_to_hub(
387
+ args.output_dataset,
388
+ private=args.private
389
+ )
390
+ logger.info(f"βœ… Dataset available at: https://huggingface.co/datasets/{args.output_dataset}")
391
+ except Exception as e:
392
+ logger.error(f"❌ Failed to push to hub: {e}")
393
+ logger.info("πŸ’Ύ Saving locally as backup...")
394
+ processed_dataset.save_to_disk("./output_dataset")
395
+ logger.info("βœ… Saved to ./output_dataset")
396
+ sys.exit(1)
397
+
398
+
399
+ if __name__ == "__main__":
400
+ main()