Commit
·
7ecd895
1
Parent(s):
db8245e
Refactor image processing in batch to handle grayscale images and unify prompts for model input
Browse files- detect-objects.py +7 -16
detect-objects.py
CHANGED
|
@@ -224,38 +224,29 @@ def process_batch(
|
|
| 224 |
for img in images:
|
| 225 |
if isinstance(img, str):
|
| 226 |
img = Image.open(img)
|
| 227 |
-
if img.mode == "L"
|
|
|
|
|
|
|
| 228 |
img = img.convert("RGB")
|
| 229 |
pil_images.append(img)
|
| 230 |
|
| 231 |
-
# Store original sizes for post-processing
|
| 232 |
-
original_sizes = [(img.height, img.width) for img in pil_images]
|
| 233 |
-
|
| 234 |
# Process batch through model
|
| 235 |
try:
|
| 236 |
inputs = processor(
|
| 237 |
images=pil_images,
|
| 238 |
-
text=class_name, #
|
| 239 |
return_tensors="pt",
|
| 240 |
-
)
|
| 241 |
-
# Move to device and convert to model's dtype
|
| 242 |
-
inputs = {
|
| 243 |
-
k: v.to(
|
| 244 |
-
model.device,
|
| 245 |
-
dtype=model.dtype if v.dtype.is_floating_point else v.dtype,
|
| 246 |
-
)
|
| 247 |
-
for k, v in inputs.items()
|
| 248 |
-
}
|
| 249 |
|
| 250 |
with torch.no_grad():
|
| 251 |
outputs = model(**inputs)
|
| 252 |
|
| 253 |
-
# Post-process outputs
|
| 254 |
results = processor.post_process_instance_segmentation(
|
| 255 |
outputs,
|
| 256 |
threshold=confidence_threshold,
|
| 257 |
mask_threshold=mask_threshold,
|
| 258 |
-
target_sizes=original_sizes,
|
| 259 |
)
|
| 260 |
|
| 261 |
except Exception as e:
|
|
|
|
| 224 |
for img in images:
|
| 225 |
if isinstance(img, str):
|
| 226 |
img = Image.open(img)
|
| 227 |
+
if img.mode == "L":
|
| 228 |
+
img = img.convert("RGB")
|
| 229 |
+
elif img.mode != "RGB":
|
| 230 |
img = img.convert("RGB")
|
| 231 |
pil_images.append(img)
|
| 232 |
|
|
|
|
|
|
|
|
|
|
| 233 |
# Process batch through model
|
| 234 |
try:
|
| 235 |
inputs = processor(
|
| 236 |
images=pil_images,
|
| 237 |
+
text=[class_name] * len(pil_images), # Same prompt for all images
|
| 238 |
return_tensors="pt",
|
| 239 |
+
).to(model.device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 240 |
|
| 241 |
with torch.no_grad():
|
| 242 |
outputs = model(**inputs)
|
| 243 |
|
| 244 |
+
# Post-process outputs using original_sizes from processor
|
| 245 |
results = processor.post_process_instance_segmentation(
|
| 246 |
outputs,
|
| 247 |
threshold=confidence_threshold,
|
| 248 |
mask_threshold=mask_threshold,
|
| 249 |
+
target_sizes=inputs.get("original_sizes").tolist(),
|
| 250 |
)
|
| 251 |
|
| 252 |
except Exception as e:
|