Commit ·
16d14b6
1
Parent(s): dee4d7a
Fix API method name: use inference() instead of infer_image()
Browse filesThe DepthAnything3 API uses inference() method, not infer_image().
Changes:
- Call model.inference([image]) with a list of images
- Returns a Prediction object with .depth attribute
- Extract depth map with prediction.depth[0]
This fixes the AttributeError:
'DepthAnything3' object has no attribute 'infer_image'
Co-authored-by: Cursor <cursoragent@cursor.com>
- simple_app.py +11 -3
- simple_batch_process.py +3 -1
simple_app.py
CHANGED
|
@@ -135,7 +135,9 @@ class SimpleDepthApp:
|
|
| 135 |
# Measure ONLY inference time
|
| 136 |
inference_start = time.time()
|
| 137 |
with torch.no_grad():
|
| 138 |
-
|
|
|
|
|
|
|
| 139 |
inference_time = time.time() - inference_start
|
| 140 |
|
| 141 |
inference_times.append(inference_time)
|
|
@@ -234,7 +236,13 @@ class SimpleDepthApp:
|
|
| 234 |
final_status = "❌ Error processing images. Check console for details."
|
| 235 |
metrics_msg = ""
|
| 236 |
else:
|
| 237 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 238 |
|
| 239 |
# Format metrics for display
|
| 240 |
throughput = 1/metrics['avg_time_per_image'] if metrics['avg_time_per_image'] > 0 else 0
|
|
@@ -244,7 +252,7 @@ class SimpleDepthApp:
|
|
| 244 |
| Metric | Value |
|
| 245 |
|--------|-------|
|
| 246 |
| **Total Images Found** | {metrics['total_images']} |
|
| 247 |
-
| **
|
| 248 |
| **Total Inference Time** | {metrics['total_inference_time']:.2f}s |
|
| 249 |
| **Average Time per Image** | {metrics['avg_time_per_image']:.3f}s |
|
| 250 |
| **Throughput** | {throughput:.2f} images/second |
|
|
|
|
| 135 |
# Measure ONLY inference time
|
| 136 |
inference_start = time.time()
|
| 137 |
with torch.no_grad():
|
| 138 |
+
# API expects a list of images, returns Prediction object
|
| 139 |
+
prediction = model.inference([image_np])
|
| 140 |
+
depth = prediction.depth[0] # Get first (and only) depth map
|
| 141 |
inference_time = time.time() - inference_start
|
| 142 |
|
| 143 |
inference_times.append(inference_time)
|
|
|
|
| 236 |
final_status = "❌ Error processing images. Check console for details."
|
| 237 |
metrics_msg = ""
|
| 238 |
else:
|
| 239 |
+
# Check if all images were processed successfully
|
| 240 |
+
success_rate = metrics['images_processed'] / metrics['total_images'] if metrics['total_images'] > 0 else 0
|
| 241 |
+
if metrics['images_processed'] < metrics['total_images']:
|
| 242 |
+
skipped = metrics['total_images'] - metrics['images_processed']
|
| 243 |
+
final_status = f"✅ Done! Processed {metrics['images_processed']} images. ({skipped} skipped due to errors)"
|
| 244 |
+
else:
|
| 245 |
+
final_status = f"✅ Done! All {metrics['images_processed']} images processed successfully."
|
| 246 |
|
| 247 |
# Format metrics for display
|
| 248 |
throughput = 1/metrics['avg_time_per_image'] if metrics['avg_time_per_image'] > 0 else 0
|
|
|
|
| 252 |
| Metric | Value |
|
| 253 |
|--------|-------|
|
| 254 |
| **Total Images Found** | {metrics['total_images']} |
|
| 255 |
+
| **Successfully Processed** | {metrics['images_processed']} ({success_rate*100:.0f}%) |
|
| 256 |
| **Total Inference Time** | {metrics['total_inference_time']:.2f}s |
|
| 257 |
| **Average Time per Image** | {metrics['avg_time_per_image']:.3f}s |
|
| 258 |
| **Throughput** | {throughput:.2f} images/second |
|
simple_batch_process.py
CHANGED
|
@@ -81,7 +81,9 @@ def process_images_from_directory(input_dir: str, output_dir: str, model):
|
|
| 81 |
# Predict depth (measure inference time only)
|
| 82 |
inference_start = time.time()
|
| 83 |
with torch.no_grad():
|
| 84 |
-
|
|
|
|
|
|
|
| 85 |
inference_time = time.time() - inference_start
|
| 86 |
inference_times.append(inference_time)
|
| 87 |
|
|
|
|
| 81 |
# Predict depth (measure inference time only)
|
| 82 |
inference_start = time.time()
|
| 83 |
with torch.no_grad():
|
| 84 |
+
# API expects a list of images, returns Prediction object
|
| 85 |
+
prediction = model.inference([image_np])
|
| 86 |
+
depth = prediction.depth[0] # Get first (and only) depth map
|
| 87 |
inference_time = time.time() - inference_start
|
| 88 |
inference_times.append(inference_time)
|
| 89 |
|