Spaces:
Running
on
Zero
Running
on
Zero
File size: 1,721 Bytes
e7682cd b5813c5 e7682cd b5813c5 e7682cd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import torch
# Add src directory to Python path to import depth_anything_3 module
src_path = os.path.join(os.path.dirname(__file__), 'src')
if src_path not in sys.path:
sys.path.insert(0, src_path)
from depth_anything_3.api import DepthAnything3
from depth_anything_3.utils.visualize import visualize_depth
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = DepthAnything3.from_pretrained("depth-anything/DA3-LARGE")
model = model.to(device)
model.eval()
print(f"Model loaded on {device}")
# Load sample images and run inference
image_paths = [
"assets/examples/SOH/demo.png",
]
# Run inference
prediction = model.inference(
image=image_paths,
# export_dir=None,
# export_format="glb"
)
print(f"Depth shape: {prediction.depth.shape}")
print(f"Extrinsics: {prediction.extrinsics.shape if prediction.extrinsics is not None else 'None'}")
print(f"Intrinsics: {prediction.intrinsics.shape if prediction.intrinsics is not None else 'None'}")
# Visualize input images and depth maps
n_images = prediction.depth.shape[0]
fig, axes = plt.subplots(2, n_images, figsize=(12, 6))
if n_images == 1:
axes = axes.reshape(2, 1)
for i in range(n_images):
# Show original image
if prediction.processed_images is not None:
axes[0, i].imshow(prediction.processed_images[i])
axes[0, i].set_title(f"Input {i+1}")
axes[0, i].axis('off')
# Show depth map
depth_vis = visualize_depth(prediction.depth[i], cmap="Spectral")
axes[1, i].imshow(depth_vis)
axes[1, i].set_title(f"Depth {i+1}")
axes[1, i].axis('off')
plt.tight_layout()
plt.show()
|