Commit
·
db8245e
1
Parent(s):
b5861ea
Refactor object detection script to use single class detection and update argument naming
Browse files- detect-objects.py +32 -33
detect-objects.py
CHANGED
|
@@ -15,39 +15,43 @@
|
|
| 15 |
"""
|
| 16 |
Detect objects in images using Meta's SAM3 (Segment Anything Model 3).
|
| 17 |
|
| 18 |
-
This script processes images from a HuggingFace dataset and detects
|
| 19 |
-
based on text
|
| 20 |
|
| 21 |
Examples:
|
| 22 |
# Detect photographs in historical newspapers
|
| 23 |
uv run detect-objects.py \\
|
| 24 |
davanstrien/newspapers-with-images-after-photography \\
|
| 25 |
my-username/newspapers-detected \\
|
| 26 |
-
--
|
| 27 |
|
| 28 |
-
# Detect
|
| 29 |
uv run detect-objects.py \\
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
--
|
| 33 |
-
--confidence-threshold 0.
|
| 34 |
|
| 35 |
# Test on small subset
|
| 36 |
uv run detect-objects.py input output \\
|
| 37 |
-
--
|
| 38 |
--max-samples 10
|
| 39 |
|
| 40 |
# Run on HF Jobs with L4 GPU
|
| 41 |
-
|
| 42 |
-
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
|
|
|
| 46 |
|
| 47 |
Performance:
|
| 48 |
- L4 GPU: ~2-4 images/sec (depending on image size and batch size)
|
| 49 |
- Memory: ~8-12 GB VRAM
|
| 50 |
- Recommended batch size: 4-8 for L4, 8-16 for A10
|
|
|
|
|
|
|
|
|
|
| 51 |
"""
|
| 52 |
|
| 53 |
import argparse
|
|
@@ -98,9 +102,9 @@ def parse_args():
|
|
| 98 |
|
| 99 |
# Object detection configuration
|
| 100 |
parser.add_argument(
|
| 101 |
-
"--
|
| 102 |
required=True,
|
| 103 |
-
help="
|
| 104 |
)
|
| 105 |
parser.add_argument(
|
| 106 |
"--confidence-threshold",
|
|
@@ -206,13 +210,13 @@ def load_and_validate_dataset(
|
|
| 206 |
def process_batch(
|
| 207 |
batch: Dict[str, List[Any]],
|
| 208 |
image_column: str,
|
| 209 |
-
|
| 210 |
processor: Sam3Processor,
|
| 211 |
model: Sam3Model,
|
| 212 |
confidence_threshold: float,
|
| 213 |
mask_threshold: float,
|
| 214 |
) -> Dict[str, List[List[Dict[str, Any]]]]:
|
| 215 |
-
"""Process a batch of images and return detections."""
|
| 216 |
images = batch[image_column]
|
| 217 |
|
| 218 |
# Convert to PIL Images and ensure RGB
|
|
@@ -231,7 +235,7 @@ def process_batch(
|
|
| 231 |
try:
|
| 232 |
inputs = processor(
|
| 233 |
images=pil_images,
|
| 234 |
-
text=
|
| 235 |
return_tensors="pt",
|
| 236 |
)
|
| 237 |
# Move to device and convert to model's dtype
|
|
@@ -264,7 +268,6 @@ def process_batch(
|
|
| 264 |
for result in results:
|
| 265 |
boxes = result.get("boxes", torch.tensor([]))
|
| 266 |
scores = result.get("scores", torch.tensor([]))
|
| 267 |
-
labels = result.get("labels", torch.tensor([]))
|
| 268 |
|
| 269 |
# Handle empty results
|
| 270 |
if len(boxes) == 0:
|
|
@@ -273,16 +276,14 @@ def process_batch(
|
|
| 273 |
|
| 274 |
# Build list of detections
|
| 275 |
detections = []
|
| 276 |
-
for box, score
|
| 277 |
-
boxes.cpu().numpy(), scores.cpu().numpy(), labels.cpu().numpy()
|
| 278 |
-
):
|
| 279 |
x1, y1, x2, y2 = box
|
| 280 |
width = x2 - x1
|
| 281 |
height = y2 - y1
|
| 282 |
|
| 283 |
detection = {
|
| 284 |
"bbox": [float(x1), float(y1), float(width), float(height)],
|
| 285 |
-
"category":
|
| 286 |
"score": float(score),
|
| 287 |
}
|
| 288 |
detections.append(detection)
|
|
@@ -294,18 +295,16 @@ def process_batch(
|
|
| 294 |
|
| 295 |
def main():
|
| 296 |
args = parse_args()
|
| 297 |
-
|
| 298 |
-
|
| 299 |
-
if not
|
| 300 |
-
logger.error(
|
| 301 |
-
"❌ Invalid --classes argument. Provide comma-separated class names."
|
| 302 |
-
)
|
| 303 |
sys.exit(1)
|
| 304 |
|
| 305 |
logger.info("🚀 SAM3 Object Detection")
|
| 306 |
logger.info(f" Input: {args.input_dataset}")
|
| 307 |
logger.info(f" Output: {args.output_dataset}")
|
| 308 |
-
logger.info(f"
|
| 309 |
logger.info(f" Confidence threshold: {args.confidence_threshold}")
|
| 310 |
logger.info(f" Batch size: {args.batch_size}")
|
| 311 |
|
|
@@ -344,7 +343,7 @@ def main():
|
|
| 344 |
lambda batch: process_batch(
|
| 345 |
batch,
|
| 346 |
args.image_column,
|
| 347 |
-
|
| 348 |
processor,
|
| 349 |
model,
|
| 350 |
args.confidence_threshold,
|
|
@@ -361,7 +360,7 @@ def main():
|
|
| 361 |
new_features["objects"] = Sequence(
|
| 362 |
{
|
| 363 |
"bbox": Sequence(Value("float32"), length=4),
|
| 364 |
-
"category": ClassLabel(names=
|
| 365 |
"score": Value("float32"),
|
| 366 |
}
|
| 367 |
)
|
|
|
|
| 15 |
"""
|
| 16 |
Detect objects in images using Meta's SAM3 (Segment Anything Model 3).
|
| 17 |
|
| 18 |
+
This script processes images from a HuggingFace dataset and detects a single object
|
| 19 |
+
type based on a text prompt, outputting bounding boxes in HuggingFace object detection format.
|
| 20 |
|
| 21 |
Examples:
|
| 22 |
# Detect photographs in historical newspapers
|
| 23 |
uv run detect-objects.py \\
|
| 24 |
davanstrien/newspapers-with-images-after-photography \\
|
| 25 |
my-username/newspapers-detected \\
|
| 26 |
+
--class-name photograph
|
| 27 |
|
| 28 |
+
# Detect animals in camera trap images
|
| 29 |
uv run detect-objects.py \\
|
| 30 |
+
wildlife-images \\
|
| 31 |
+
wildlife-detected \\
|
| 32 |
+
--class-name animal \\
|
| 33 |
+
--confidence-threshold 0.6
|
| 34 |
|
| 35 |
# Test on small subset
|
| 36 |
uv run detect-objects.py input output \\
|
| 37 |
+
--class-name table \\
|
| 38 |
--max-samples 10
|
| 39 |
|
| 40 |
# Run on HF Jobs with L4 GPU
|
| 41 |
+
hf jobs uv run --flavor l4x1 \\
|
| 42 |
+
-s HF_TOKEN=$HF_TOKEN \\
|
| 43 |
+
https://huggingface.co/datasets/uv-scripts/sam3/raw/main/detect-objects.py \\
|
| 44 |
+
input-dataset output-dataset \\
|
| 45 |
+
--class-name photograph \\
|
| 46 |
+
--confidence-threshold 0.5
|
| 47 |
|
| 48 |
Performance:
|
| 49 |
- L4 GPU: ~2-4 images/sec (depending on image size and batch size)
|
| 50 |
- Memory: ~8-12 GB VRAM
|
| 51 |
- Recommended batch size: 4-8 for L4, 8-16 for A10
|
| 52 |
+
|
| 53 |
+
Note: To detect multiple object types, run the script multiple times with different
|
| 54 |
+
--class-name values and merge the results.
|
| 55 |
"""
|
| 56 |
|
| 57 |
import argparse
|
|
|
|
| 102 |
|
| 103 |
# Object detection configuration
|
| 104 |
parser.add_argument(
|
| 105 |
+
"--class-name",
|
| 106 |
required=True,
|
| 107 |
+
help="Object class to detect (e.g., 'photograph', 'animal', 'table')",
|
| 108 |
)
|
| 109 |
parser.add_argument(
|
| 110 |
"--confidence-threshold",
|
|
|
|
| 210 |
def process_batch(
|
| 211 |
batch: Dict[str, List[Any]],
|
| 212 |
image_column: str,
|
| 213 |
+
class_name: str,
|
| 214 |
processor: Sam3Processor,
|
| 215 |
model: Sam3Model,
|
| 216 |
confidence_threshold: float,
|
| 217 |
mask_threshold: float,
|
| 218 |
) -> Dict[str, List[List[Dict[str, Any]]]]:
|
| 219 |
+
"""Process a batch of images and return detections for a single class."""
|
| 220 |
images = batch[image_column]
|
| 221 |
|
| 222 |
# Convert to PIL Images and ensure RGB
|
|
|
|
| 235 |
try:
|
| 236 |
inputs = processor(
|
| 237 |
images=pil_images,
|
| 238 |
+
text=class_name, # Single class name as prompt
|
| 239 |
return_tensors="pt",
|
| 240 |
)
|
| 241 |
# Move to device and convert to model's dtype
|
|
|
|
| 268 |
for result in results:
|
| 269 |
boxes = result.get("boxes", torch.tensor([]))
|
| 270 |
scores = result.get("scores", torch.tensor([]))
|
|
|
|
| 271 |
|
| 272 |
# Handle empty results
|
| 273 |
if len(boxes) == 0:
|
|
|
|
| 276 |
|
| 277 |
# Build list of detections
|
| 278 |
detections = []
|
| 279 |
+
for box, score in zip(boxes.cpu().numpy(), scores.cpu().numpy()):
|
|
|
|
|
|
|
| 280 |
x1, y1, x2, y2 = box
|
| 281 |
width = x2 - x1
|
| 282 |
height = y2 - y1
|
| 283 |
|
| 284 |
detection = {
|
| 285 |
"bbox": [float(x1), float(y1), float(width), float(height)],
|
| 286 |
+
"category": 0, # Single class, always index 0
|
| 287 |
"score": float(score),
|
| 288 |
}
|
| 289 |
detections.append(detection)
|
|
|
|
| 295 |
|
| 296 |
def main():
|
| 297 |
args = parse_args()
|
| 298 |
+
|
| 299 |
+
class_name = args.class_name.strip()
|
| 300 |
+
if not class_name:
|
| 301 |
+
logger.error("❌ Invalid --class-name argument. Provide a class name.")
|
|
|
|
|
|
|
| 302 |
sys.exit(1)
|
| 303 |
|
| 304 |
logger.info("🚀 SAM3 Object Detection")
|
| 305 |
logger.info(f" Input: {args.input_dataset}")
|
| 306 |
logger.info(f" Output: {args.output_dataset}")
|
| 307 |
+
logger.info(f" Class: {class_name}")
|
| 308 |
logger.info(f" Confidence threshold: {args.confidence_threshold}")
|
| 309 |
logger.info(f" Batch size: {args.batch_size}")
|
| 310 |
|
|
|
|
| 343 |
lambda batch: process_batch(
|
| 344 |
batch,
|
| 345 |
args.image_column,
|
| 346 |
+
class_name,
|
| 347 |
processor,
|
| 348 |
model,
|
| 349 |
args.confidence_threshold,
|
|
|
|
| 360 |
new_features["objects"] = Sequence(
|
| 361 |
{
|
| 362 |
"bbox": Sequence(Value("float32"), length=4),
|
| 363 |
+
"category": ClassLabel(names=[class_name]),
|
| 364 |
"score": Value("float32"),
|
| 365 |
}
|
| 366 |
)
|