Commit
Β·
85c3e70
1
Parent(s):
1ab0327
Refactor
Browse files- detect-objects.py +54 -62
detect-objects.py
CHANGED
|
@@ -67,8 +67,8 @@ from transformers import Sam3Processor, Sam3Model
|
|
| 67 |
# Configure logging
|
| 68 |
logging.basicConfig(
|
| 69 |
level=logging.INFO,
|
| 70 |
-
format=
|
| 71 |
-
datefmt=
|
| 72 |
)
|
| 73 |
logger = logging.getLogger(__name__)
|
| 74 |
|
|
@@ -85,59 +85,53 @@ def parse_args():
|
|
| 85 |
parser = argparse.ArgumentParser(
|
| 86 |
description="Detect objects in images using SAM3",
|
| 87 |
formatter_class=argparse.RawDescriptionHelpFormatter,
|
| 88 |
-
epilog=__doc__
|
| 89 |
)
|
| 90 |
|
| 91 |
# Required arguments
|
| 92 |
parser.add_argument(
|
| 93 |
-
"input_dataset",
|
| 94 |
-
help="Input HuggingFace dataset ID (e.g., 'username/dataset')"
|
| 95 |
)
|
| 96 |
parser.add_argument(
|
| 97 |
-
"output_dataset",
|
| 98 |
-
help="Output HuggingFace dataset ID (e.g., 'username/output')"
|
| 99 |
)
|
| 100 |
|
| 101 |
# Object detection configuration
|
| 102 |
parser.add_argument(
|
| 103 |
"--classes",
|
| 104 |
required=True,
|
| 105 |
-
help="Comma-separated list of object classes to detect (e.g., 'photograph,illustration,diagram')"
|
| 106 |
)
|
| 107 |
parser.add_argument(
|
| 108 |
"--confidence-threshold",
|
| 109 |
type=float,
|
| 110 |
default=0.5,
|
| 111 |
-
help="Minimum confidence score for detections (default: 0.5)"
|
| 112 |
)
|
| 113 |
parser.add_argument(
|
| 114 |
"--mask-threshold",
|
| 115 |
type=float,
|
| 116 |
default=0.5,
|
| 117 |
-
help="Threshold for mask generation (default: 0.5)"
|
| 118 |
)
|
| 119 |
|
| 120 |
# Dataset configuration
|
| 121 |
parser.add_argument(
|
| 122 |
"--image-column",
|
| 123 |
default="image",
|
| 124 |
-
help="Name of the column containing images (default: 'image')"
|
| 125 |
)
|
| 126 |
parser.add_argument(
|
| 127 |
-
"--split",
|
| 128 |
-
default="train",
|
| 129 |
-
help="Dataset split to process (default: 'train')"
|
| 130 |
)
|
| 131 |
parser.add_argument(
|
| 132 |
"--max-samples",
|
| 133 |
type=int,
|
| 134 |
default=None,
|
| 135 |
-
help="Maximum number of samples to process (for testing)"
|
| 136 |
)
|
| 137 |
parser.add_argument(
|
| 138 |
-
"--shuffle",
|
| 139 |
-
action="store_true",
|
| 140 |
-
help="Shuffle dataset before processing"
|
| 141 |
)
|
| 142 |
|
| 143 |
# Processing configuration
|
|
@@ -145,30 +139,28 @@ def parse_args():
|
|
| 145 |
"--batch-size",
|
| 146 |
type=int,
|
| 147 |
default=4,
|
| 148 |
-
help="Batch size for processing (default: 4)"
|
| 149 |
)
|
| 150 |
parser.add_argument(
|
| 151 |
"--model",
|
| 152 |
default="facebook/sam3",
|
| 153 |
-
help="SAM3 model ID (default: 'facebook/sam3')"
|
| 154 |
)
|
| 155 |
parser.add_argument(
|
| 156 |
"--dtype",
|
| 157 |
default="bfloat16",
|
| 158 |
choices=["float32", "float16", "bfloat16"],
|
| 159 |
-
help="Model precision (default: 'bfloat16')"
|
| 160 |
)
|
| 161 |
|
| 162 |
# Output configuration
|
| 163 |
parser.add_argument(
|
| 164 |
-
"--private",
|
| 165 |
-
action="store_true",
|
| 166 |
-
help="Make output dataset private"
|
| 167 |
)
|
| 168 |
parser.add_argument(
|
| 169 |
"--hf-token",
|
| 170 |
default=None,
|
| 171 |
-
help="HuggingFace token (default: uses HF_TOKEN env var or cached token)"
|
| 172 |
)
|
| 173 |
|
| 174 |
return parser.parse_args()
|
|
@@ -179,8 +171,8 @@ def load_and_validate_dataset(
|
|
| 179 |
split: str,
|
| 180 |
image_column: str,
|
| 181 |
max_samples: int = None,
|
| 182 |
-
shuffle: bool = False,
|
| 183 |
-
hf_token: str = None
|
| 184 |
) -> Dataset:
|
| 185 |
"""Load dataset and validate it has the required image column."""
|
| 186 |
logger.info(f"π Loading dataset: {dataset_id} (split: {split})")
|
|
@@ -218,7 +210,7 @@ def process_batch(
|
|
| 218 |
processor: Sam3Processor,
|
| 219 |
model: Sam3Model,
|
| 220 |
confidence_threshold: float,
|
| 221 |
-
mask_threshold: float
|
| 222 |
) -> Dict[str, List[List[Dict[str, Any]]]]:
|
| 223 |
"""Process a batch of images and return detections."""
|
| 224 |
images = batch[image_column]
|
|
@@ -228,9 +220,7 @@ def process_batch(
|
|
| 228 |
for img in images:
|
| 229 |
if isinstance(img, str):
|
| 230 |
img = Image.open(img)
|
| 231 |
-
if img.mode == "L":
|
| 232 |
-
img = img.convert("RGB")
|
| 233 |
-
elif img.mode != "RGB":
|
| 234 |
img = img.convert("RGB")
|
| 235 |
pil_images.append(img)
|
| 236 |
|
|
@@ -242,7 +232,7 @@ def process_batch(
|
|
| 242 |
inputs = processor(
|
| 243 |
images=pil_images,
|
| 244 |
text=class_names, # All class names as prompts
|
| 245 |
-
return_tensors="pt"
|
| 246 |
).to(model.device)
|
| 247 |
|
| 248 |
with torch.no_grad():
|
|
@@ -253,7 +243,7 @@ def process_batch(
|
|
| 253 |
outputs,
|
| 254 |
threshold=confidence_threshold,
|
| 255 |
mask_threshold=mask_threshold,
|
| 256 |
-
target_sizes=original_sizes
|
| 257 |
)
|
| 258 |
|
| 259 |
except Exception as e:
|
|
@@ -264,9 +254,9 @@ def process_batch(
|
|
| 264 |
# Convert to HuggingFace object detection format
|
| 265 |
batch_objects = []
|
| 266 |
for result in results:
|
| 267 |
-
boxes = result.get(
|
| 268 |
-
scores = result.get(
|
| 269 |
-
labels = result.get(
|
| 270 |
|
| 271 |
# Handle empty results
|
| 272 |
if len(boxes) == 0:
|
|
@@ -276,9 +266,7 @@ def process_batch(
|
|
| 276 |
# Build list of detections
|
| 277 |
detections = []
|
| 278 |
for box, score, label_idx in zip(
|
| 279 |
-
boxes.cpu().numpy(),
|
| 280 |
-
scores.cpu().numpy(),
|
| 281 |
-
labels.cpu().numpy()
|
| 282 |
):
|
| 283 |
x1, y1, x2, y2 = box
|
| 284 |
width = x2 - x1
|
|
@@ -287,7 +275,7 @@ def process_batch(
|
|
| 287 |
detection = {
|
| 288 |
"bbox": [float(x1), float(y1), float(width), float(height)],
|
| 289 |
"category": int(label_idx), # Index into class_names
|
| 290 |
-
"score": float(score)
|
| 291 |
}
|
| 292 |
detections.append(detection)
|
| 293 |
|
|
@@ -298,11 +286,12 @@ def process_batch(
|
|
| 298 |
|
| 299 |
def main():
|
| 300 |
args = parse_args()
|
| 301 |
-
|
| 302 |
# Parse class names
|
| 303 |
-
class_names = [name.strip() for name in args.classes.split(
|
| 304 |
-
if not class_names or
|
| 305 |
-
logger.error(
|
|
|
|
|
|
|
| 306 |
sys.exit(1)
|
| 307 |
|
| 308 |
logger.info("π SAM3 Object Detection")
|
|
@@ -325,7 +314,7 @@ def main():
|
|
| 325 |
args.image_column,
|
| 326 |
args.max_samples,
|
| 327 |
args.shuffle,
|
| 328 |
-
args.hf_token
|
| 329 |
)
|
| 330 |
|
| 331 |
# Load model
|
|
@@ -333,9 +322,7 @@ def main():
|
|
| 333 |
try:
|
| 334 |
processor = Sam3Processor.from_pretrained(args.model)
|
| 335 |
model = Sam3Model.from_pretrained(
|
| 336 |
-
args.model,
|
| 337 |
-
torch_dtype=getattr(torch, args.dtype),
|
| 338 |
-
device_map="auto"
|
| 339 |
)
|
| 340 |
logger.info(f"β
Model loaded on {model.device}")
|
| 341 |
except Exception as e:
|
|
@@ -353,42 +340,47 @@ def main():
|
|
| 353 |
processor,
|
| 354 |
model,
|
| 355 |
args.confidence_threshold,
|
| 356 |
-
args.mask_threshold
|
| 357 |
),
|
| 358 |
batched=True,
|
| 359 |
batch_size=args.batch_size,
|
| 360 |
-
desc="Detecting objects"
|
| 361 |
)
|
| 362 |
|
| 363 |
# Create dynamic features with ClassLabel
|
| 364 |
logger.info("π Creating output schema...")
|
| 365 |
new_features = processed_dataset.features.copy()
|
| 366 |
-
new_features["objects"] = Sequence(
|
| 367 |
-
|
| 368 |
-
|
| 369 |
-
|
| 370 |
-
|
|
|
|
|
|
|
| 371 |
|
| 372 |
# Cast to proper types
|
| 373 |
processed_dataset = processed_dataset.cast(new_features)
|
| 374 |
|
| 375 |
# Calculate statistics
|
| 376 |
total_detections = sum(len(objs) for objs in processed_dataset["objects"])
|
| 377 |
-
images_with_detections = sum(
|
| 378 |
|
| 379 |
logger.info("β
Detection complete!")
|
| 380 |
logger.info(f" Total detections: {total_detections}")
|
| 381 |
-
logger.info(
|
| 382 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 383 |
|
| 384 |
# Push to hub
|
| 385 |
logger.info(f"π€ Pushing to HuggingFace Hub: {args.output_dataset}")
|
| 386 |
try:
|
| 387 |
-
processed_dataset.push_to_hub(
|
| 388 |
-
|
| 389 |
-
|
| 390 |
)
|
| 391 |
-
logger.info(f"β
Dataset available at: https://huggingface.co/datasets/{args.output_dataset}")
|
| 392 |
except Exception as e:
|
| 393 |
logger.error(f"β Failed to push to hub: {e}")
|
| 394 |
logger.info("πΎ Saving locally as backup...")
|
|
|
|
| 67 |
# Configure logging
|
| 68 |
logging.basicConfig(
|
| 69 |
level=logging.INFO,
|
| 70 |
+
format="%(asctime)s - %(levelname)s - %(message)s",
|
| 71 |
+
datefmt="%H:%M:%S",
|
| 72 |
)
|
| 73 |
logger = logging.getLogger(__name__)
|
| 74 |
|
|
|
|
| 85 |
parser = argparse.ArgumentParser(
|
| 86 |
description="Detect objects in images using SAM3",
|
| 87 |
formatter_class=argparse.RawDescriptionHelpFormatter,
|
| 88 |
+
epilog=__doc__,
|
| 89 |
)
|
| 90 |
|
| 91 |
# Required arguments
|
| 92 |
parser.add_argument(
|
| 93 |
+
"input_dataset", help="Input HuggingFace dataset ID (e.g., 'username/dataset')"
|
|
|
|
| 94 |
)
|
| 95 |
parser.add_argument(
|
| 96 |
+
"output_dataset", help="Output HuggingFace dataset ID (e.g., 'username/output')"
|
|
|
|
| 97 |
)
|
| 98 |
|
| 99 |
# Object detection configuration
|
| 100 |
parser.add_argument(
|
| 101 |
"--classes",
|
| 102 |
required=True,
|
| 103 |
+
help="Comma-separated list of object classes to detect (e.g., 'photograph,illustration,diagram')",
|
| 104 |
)
|
| 105 |
parser.add_argument(
|
| 106 |
"--confidence-threshold",
|
| 107 |
type=float,
|
| 108 |
default=0.5,
|
| 109 |
+
help="Minimum confidence score for detections (default: 0.5)",
|
| 110 |
)
|
| 111 |
parser.add_argument(
|
| 112 |
"--mask-threshold",
|
| 113 |
type=float,
|
| 114 |
default=0.5,
|
| 115 |
+
help="Threshold for mask generation (default: 0.5)",
|
| 116 |
)
|
| 117 |
|
| 118 |
# Dataset configuration
|
| 119 |
parser.add_argument(
|
| 120 |
"--image-column",
|
| 121 |
default="image",
|
| 122 |
+
help="Name of the column containing images (default: 'image')",
|
| 123 |
)
|
| 124 |
parser.add_argument(
|
| 125 |
+
"--split", default="train", help="Dataset split to process (default: 'train')"
|
|
|
|
|
|
|
| 126 |
)
|
| 127 |
parser.add_argument(
|
| 128 |
"--max-samples",
|
| 129 |
type=int,
|
| 130 |
default=None,
|
| 131 |
+
help="Maximum number of samples to process (for testing)",
|
| 132 |
)
|
| 133 |
parser.add_argument(
|
| 134 |
+
"--shuffle", action="store_true", help="Shuffle dataset before processing"
|
|
|
|
|
|
|
| 135 |
)
|
| 136 |
|
| 137 |
# Processing configuration
|
|
|
|
| 139 |
"--batch-size",
|
| 140 |
type=int,
|
| 141 |
default=4,
|
| 142 |
+
help="Batch size for processing (default: 4)",
|
| 143 |
)
|
| 144 |
parser.add_argument(
|
| 145 |
"--model",
|
| 146 |
default="facebook/sam3",
|
| 147 |
+
help="SAM3 model ID (default: 'facebook/sam3')",
|
| 148 |
)
|
| 149 |
parser.add_argument(
|
| 150 |
"--dtype",
|
| 151 |
default="bfloat16",
|
| 152 |
choices=["float32", "float16", "bfloat16"],
|
| 153 |
+
help="Model precision (default: 'bfloat16')",
|
| 154 |
)
|
| 155 |
|
| 156 |
# Output configuration
|
| 157 |
parser.add_argument(
|
| 158 |
+
"--private", action="store_true", help="Make output dataset private"
|
|
|
|
|
|
|
| 159 |
)
|
| 160 |
parser.add_argument(
|
| 161 |
"--hf-token",
|
| 162 |
default=None,
|
| 163 |
+
help="HuggingFace token (default: uses HF_TOKEN env var or cached token)",
|
| 164 |
)
|
| 165 |
|
| 166 |
return parser.parse_args()
|
|
|
|
| 171 |
split: str,
|
| 172 |
image_column: str,
|
| 173 |
max_samples: int = None,
|
| 174 |
+
shuffle: bool = False,
|
| 175 |
+
hf_token: str = None,
|
| 176 |
) -> Dataset:
|
| 177 |
"""Load dataset and validate it has the required image column."""
|
| 178 |
logger.info(f"π Loading dataset: {dataset_id} (split: {split})")
|
|
|
|
| 210 |
processor: Sam3Processor,
|
| 211 |
model: Sam3Model,
|
| 212 |
confidence_threshold: float,
|
| 213 |
+
mask_threshold: float,
|
| 214 |
) -> Dict[str, List[List[Dict[str, Any]]]]:
|
| 215 |
"""Process a batch of images and return detections."""
|
| 216 |
images = batch[image_column]
|
|
|
|
| 220 |
for img in images:
|
| 221 |
if isinstance(img, str):
|
| 222 |
img = Image.open(img)
|
| 223 |
+
if img.mode == "L" or img.mode != "RGB":
|
|
|
|
|
|
|
| 224 |
img = img.convert("RGB")
|
| 225 |
pil_images.append(img)
|
| 226 |
|
|
|
|
| 232 |
inputs = processor(
|
| 233 |
images=pil_images,
|
| 234 |
text=class_names, # All class names as prompts
|
| 235 |
+
return_tensors="pt",
|
| 236 |
).to(model.device)
|
| 237 |
|
| 238 |
with torch.no_grad():
|
|
|
|
| 243 |
outputs,
|
| 244 |
threshold=confidence_threshold,
|
| 245 |
mask_threshold=mask_threshold,
|
| 246 |
+
target_sizes=original_sizes,
|
| 247 |
)
|
| 248 |
|
| 249 |
except Exception as e:
|
|
|
|
| 254 |
# Convert to HuggingFace object detection format
|
| 255 |
batch_objects = []
|
| 256 |
for result in results:
|
| 257 |
+
boxes = result.get("boxes", torch.tensor([]))
|
| 258 |
+
scores = result.get("scores", torch.tensor([]))
|
| 259 |
+
labels = result.get("labels", torch.tensor([]))
|
| 260 |
|
| 261 |
# Handle empty results
|
| 262 |
if len(boxes) == 0:
|
|
|
|
| 266 |
# Build list of detections
|
| 267 |
detections = []
|
| 268 |
for box, score, label_idx in zip(
|
| 269 |
+
boxes.cpu().numpy(), scores.cpu().numpy(), labels.cpu().numpy()
|
|
|
|
|
|
|
| 270 |
):
|
| 271 |
x1, y1, x2, y2 = box
|
| 272 |
width = x2 - x1
|
|
|
|
| 275 |
detection = {
|
| 276 |
"bbox": [float(x1), float(y1), float(width), float(height)],
|
| 277 |
"category": int(label_idx), # Index into class_names
|
| 278 |
+
"score": float(score),
|
| 279 |
}
|
| 280 |
detections.append(detection)
|
| 281 |
|
|
|
|
| 286 |
|
| 287 |
def main():
|
| 288 |
args = parse_args()
|
|
|
|
| 289 |
# Parse class names
|
| 290 |
+
class_names = [name.strip() for name in args.classes.split(",")]
|
| 291 |
+
if not class_names or not all(class_names):
|
| 292 |
+
logger.error(
|
| 293 |
+
"β Invalid --classes argument. Provide comma-separated class names."
|
| 294 |
+
)
|
| 295 |
sys.exit(1)
|
| 296 |
|
| 297 |
logger.info("π SAM3 Object Detection")
|
|
|
|
| 314 |
args.image_column,
|
| 315 |
args.max_samples,
|
| 316 |
args.shuffle,
|
| 317 |
+
args.hf_token,
|
| 318 |
)
|
| 319 |
|
| 320 |
# Load model
|
|
|
|
| 322 |
try:
|
| 323 |
processor = Sam3Processor.from_pretrained(args.model)
|
| 324 |
model = Sam3Model.from_pretrained(
|
| 325 |
+
args.model, torch_dtype=getattr(torch, args.dtype), device_map="auto"
|
|
|
|
|
|
|
| 326 |
)
|
| 327 |
logger.info(f"β
Model loaded on {model.device}")
|
| 328 |
except Exception as e:
|
|
|
|
| 340 |
processor,
|
| 341 |
model,
|
| 342 |
args.confidence_threshold,
|
| 343 |
+
args.mask_threshold,
|
| 344 |
),
|
| 345 |
batched=True,
|
| 346 |
batch_size=args.batch_size,
|
| 347 |
+
desc="Detecting objects",
|
| 348 |
)
|
| 349 |
|
| 350 |
# Create dynamic features with ClassLabel
|
| 351 |
logger.info("π Creating output schema...")
|
| 352 |
new_features = processed_dataset.features.copy()
|
| 353 |
+
new_features["objects"] = Sequence(
|
| 354 |
+
{
|
| 355 |
+
"bbox": Sequence(Value("float32"), length=4),
|
| 356 |
+
"category": ClassLabel(names=class_names),
|
| 357 |
+
"score": Value("float32"),
|
| 358 |
+
}
|
| 359 |
+
)
|
| 360 |
|
| 361 |
# Cast to proper types
|
| 362 |
processed_dataset = processed_dataset.cast(new_features)
|
| 363 |
|
| 364 |
# Calculate statistics
|
| 365 |
total_detections = sum(len(objs) for objs in processed_dataset["objects"])
|
| 366 |
+
images_with_detections = sum(len(objs) > 0 for objs in processed_dataset["objects"])
|
| 367 |
|
| 368 |
logger.info("β
Detection complete!")
|
| 369 |
logger.info(f" Total detections: {total_detections}")
|
| 370 |
+
logger.info(
|
| 371 |
+
f" Images with detections: {images_with_detections}/{len(processed_dataset)}"
|
| 372 |
+
)
|
| 373 |
+
logger.info(
|
| 374 |
+
f" Average detections per image: {total_detections / len(processed_dataset):.2f}"
|
| 375 |
+
)
|
| 376 |
|
| 377 |
# Push to hub
|
| 378 |
logger.info(f"π€ Pushing to HuggingFace Hub: {args.output_dataset}")
|
| 379 |
try:
|
| 380 |
+
processed_dataset.push_to_hub(args.output_dataset, private=args.private)
|
| 381 |
+
logger.info(
|
| 382 |
+
f"β
Dataset available at: https://huggingface.co/datasets/{args.output_dataset}"
|
| 383 |
)
|
|
|
|
| 384 |
except Exception as e:
|
| 385 |
logger.error(f"β Failed to push to hub: {e}")
|
| 386 |
logger.info("πΎ Saving locally as backup...")
|