Spaces:
Running on Zero
Running on Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -9,55 +9,52 @@ os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
|
|
| 9 |
|
| 10 |
import cv2
|
| 11 |
import gradio as gr
|
| 12 |
-
import matplotlib.pyplot as plt
|
| 13 |
import numpy as np
|
| 14 |
import spaces
|
| 15 |
import torch
|
| 16 |
from PIL import Image
|
| 17 |
from pillow_heif import register_heif_opener
|
| 18 |
|
| 19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
from mapanything.utils.hf_utils.css_and_html import (
|
| 21 |
-
get_gradio_theme,
|
| 22 |
GRADIO_CSS,
|
|
|
|
| 23 |
)
|
| 24 |
-
from mapanything.utils.hf_utils.hf_helpers import initialize_mapanything_model
|
| 25 |
-
from mapanything.utils.hf_utils.viz import predictions_to_glb
|
| 26 |
-
from mapanything.utils.image import load_images
|
| 27 |
|
| 28 |
-
#
|
|
|
|
|
|
|
|
|
|
| 29 |
try:
|
| 30 |
-
from mapanything.utils.
|
|
|
|
|
|
|
| 31 |
except ImportError:
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
dpdy = np.zeros_like(points3d)
|
| 37 |
-
dpdx[:, :-1] = points3d[:, 1:] - points3d[:, :-1]
|
| 38 |
-
dpdy[:-1, :] = points3d[1:, :] - points3d[:-1, :]
|
| 39 |
-
normals = np.cross(dpdx, dpdy)
|
| 40 |
-
norms = np.linalg.norm(normals, axis=-1, keepdims=True)
|
| 41 |
-
norms = np.maximum(norms, 1e-8)
|
| 42 |
-
normals = normals / norms
|
| 43 |
-
valid = norms.squeeze(-1) > 1e-6
|
| 44 |
-
if mask is not None:
|
| 45 |
-
valid = valid & mask
|
| 46 |
-
return normals, valid
|
| 47 |
|
|
|
|
| 48 |
try:
|
| 49 |
from mapanything.utils.hf_utils.css_and_html import MEASURE_INSTRUCTIONS_HTML
|
| 50 |
except ImportError:
|
| 51 |
-
MEASURE_INSTRUCTIONS_HTML =
|
| 52 |
-
**
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
3. The depth of each point and the computed 3D distance will be displayed below
|
| 56 |
-
4. After each measurement, click two new points for a new measurement
|
| 57 |
-
"""
|
| 58 |
|
| 59 |
-
|
| 60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
|
| 62 |
|
| 63 |
# ============================================================================
|
|
@@ -88,27 +85,30 @@ model = None
|
|
| 88 |
|
| 89 |
|
| 90 |
# ============================================================================
|
| 91 |
-
# Core Model Inference
|
| 92 |
# ============================================================================
|
| 93 |
|
|
|
|
| 94 |
@spaces.GPU(duration=120)
|
| 95 |
def run_model(
|
| 96 |
target_dir,
|
| 97 |
apply_mask=True,
|
|
|
|
|
|
|
| 98 |
):
|
| 99 |
"""
|
| 100 |
-
Run the MapAnything model
|
|
|
|
| 101 |
"""
|
| 102 |
global model
|
| 103 |
import torch
|
| 104 |
|
| 105 |
print(f"Processing images: {target_dir}")
|
| 106 |
|
| 107 |
-
# Device check
|
| 108 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 109 |
device = torch.device(device)
|
| 110 |
|
| 111 |
-
#
|
| 112 |
if model is None:
|
| 113 |
print("π₯ Loading MapAnything from HuggingFace...")
|
| 114 |
model = initialize_mapanything_model(high_level_config, device)
|
|
@@ -127,13 +127,16 @@ def run_model(
|
|
| 127 |
if len(views) == 0:
|
| 128 |
raise ValueError("No images found")
|
| 129 |
|
| 130 |
-
# Run
|
| 131 |
print("Running 3D reconstruction...")
|
| 132 |
outputs = model.infer(
|
| 133 |
-
views,
|
|
|
|
|
|
|
|
|
|
| 134 |
)
|
| 135 |
|
| 136 |
-
#
|
| 137 |
predictions = {}
|
| 138 |
extrinsic_list = []
|
| 139 |
intrinsic_list = []
|
|
@@ -178,65 +181,25 @@ def run_model(
|
|
| 178 |
if len(depth_maps.shape) == 3:
|
| 179 |
depth_maps = depth_maps[..., np.newaxis]
|
| 180 |
predictions["depth"] = depth_maps
|
| 181 |
-
|
| 182 |
predictions["images"] = np.stack(images_list, axis=0)
|
| 183 |
predictions["final_mask"] = np.stack(final_mask_list, axis=0)
|
| 184 |
|
| 185 |
-
#
|
| 186 |
-
|
|
|
|
|
|
|
| 187 |
|
| 188 |
-
|
|
|
|
| 189 |
|
| 190 |
|
| 191 |
# ============================================================================
|
| 192 |
-
#
|
| 193 |
# ============================================================================
|
| 194 |
|
| 195 |
-
def process_predictions_for_visualization(
|
| 196 |
-
predictions, filter_black_bg=False, filter_white_bg=False
|
| 197 |
-
):
|
| 198 |
-
"""Extract depth, normal, and 3D points from predictions for per-view visualization tabs."""
|
| 199 |
-
processed_data = {}
|
| 200 |
-
num_views = predictions["images"].shape[0]
|
| 201 |
-
|
| 202 |
-
for view_idx in range(num_views):
|
| 203 |
-
image = predictions["images"][view_idx] # (H, W, 3)
|
| 204 |
-
pred_pts3d = predictions["world_points"][view_idx] # (H, W, 3)
|
| 205 |
-
depth = predictions["depth"][view_idx].squeeze() # (H, W)
|
| 206 |
-
mask = predictions["final_mask"][view_idx].copy() # (H, W)
|
| 207 |
-
|
| 208 |
-
# Apply black background filtering
|
| 209 |
-
if filter_black_bg:
|
| 210 |
-
view_colors = image * 255 if image.max() <= 1.0 else image.copy()
|
| 211 |
-
black_bg_mask = view_colors.sum(axis=2) >= 16
|
| 212 |
-
mask = mask & black_bg_mask
|
| 213 |
-
|
| 214 |
-
# Apply white background filtering
|
| 215 |
-
if filter_white_bg:
|
| 216 |
-
view_colors = image * 255 if image.max() <= 1.0 else image.copy()
|
| 217 |
-
white_bg_mask = ~(
|
| 218 |
-
(view_colors[:, :, 0] > 240)
|
| 219 |
-
& (view_colors[:, :, 1] > 240)
|
| 220 |
-
& (view_colors[:, :, 2] > 240)
|
| 221 |
-
)
|
| 222 |
-
mask = mask & white_bg_mask
|
| 223 |
-
|
| 224 |
-
# Compute surface normals from 3D points
|
| 225 |
-
normals, _ = points_to_normals(pred_pts3d, mask=mask)
|
| 226 |
-
|
| 227 |
-
processed_data[view_idx] = {
|
| 228 |
-
"image": image,
|
| 229 |
-
"points3d": pred_pts3d,
|
| 230 |
-
"depth": depth,
|
| 231 |
-
"normal": normals,
|
| 232 |
-
"mask": mask,
|
| 233 |
-
}
|
| 234 |
-
|
| 235 |
-
return processed_data
|
| 236 |
-
|
| 237 |
|
| 238 |
def colorize_depth(depth_map, mask=None):
|
| 239 |
-
"""
|
| 240 |
if depth_map is None:
|
| 241 |
return None
|
| 242 |
|
|
@@ -250,160 +213,183 @@ def colorize_depth(depth_map, mask=None):
|
|
| 250 |
valid_depths = depth_normalized[valid_mask]
|
| 251 |
p5 = np.percentile(valid_depths, 5)
|
| 252 |
p95 = np.percentile(valid_depths, 95)
|
| 253 |
-
if p95
|
| 254 |
-
depth_normalized[valid_mask] = (
|
| 255 |
-
|
| 256 |
-
|
|
|
|
|
|
|
| 257 |
|
| 258 |
colormap = plt.cm.turbo_r
|
| 259 |
-
colored = colormap(
|
| 260 |
colored = (colored[:, :, :3] * 255).astype(np.uint8)
|
| 261 |
-
|
| 262 |
-
# Set invalid pixels to white
|
| 263 |
colored[~valid_mask] = [255, 255, 255]
|
| 264 |
-
|
| 265 |
return colored
|
| 266 |
|
| 267 |
|
| 268 |
def colorize_normal(normal_map, mask=None):
|
| 269 |
-
"""
|
| 270 |
if normal_map is None:
|
| 271 |
return None
|
| 272 |
|
| 273 |
normal_vis = normal_map.copy()
|
| 274 |
-
|
| 275 |
if mask is not None:
|
| 276 |
normal_vis[~mask] = [0, 0, 0]
|
| 277 |
|
| 278 |
-
# Map normals from [-1, 1] to [0, 1] then to [0, 255]
|
| 279 |
normal_vis = (normal_vis + 1.0) / 2.0
|
| 280 |
-
normal_vis = np.clip(normal_vis, 0, 1)
|
| 281 |
normal_vis = (normal_vis * 255).astype(np.uint8)
|
| 282 |
-
|
| 283 |
return normal_vis
|
| 284 |
|
| 285 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 286 |
def update_view_selectors(processed_data):
|
| 287 |
-
"""
|
| 288 |
if processed_data is None or len(processed_data) == 0:
|
| 289 |
choices = ["View 1"]
|
| 290 |
else:
|
| 291 |
-
|
| 292 |
-
choices = [f"View {i + 1}" for i in range(num_views)]
|
| 293 |
-
|
| 294 |
return (
|
| 295 |
-
gr.Dropdown(choices=choices, value=choices[0]),
|
| 296 |
-
gr.Dropdown(choices=choices, value=choices[0]),
|
| 297 |
-
gr.Dropdown(choices=choices, value=choices[0]),
|
| 298 |
)
|
| 299 |
|
| 300 |
|
| 301 |
-
def
|
| 302 |
-
"""
|
| 303 |
if processed_data is None or len(processed_data) == 0:
|
| 304 |
return None
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
|
| 308 |
-
view_index = 0
|
| 309 |
-
|
| 310 |
-
return processed_data[view_keys[view_index]]
|
| 311 |
|
| 312 |
|
| 313 |
def update_depth_view(processed_data, view_index):
|
| 314 |
-
|
| 315 |
-
|
| 316 |
-
if view_data is None or view_data["depth"] is None:
|
| 317 |
return None
|
| 318 |
-
return colorize_depth(
|
| 319 |
|
| 320 |
|
| 321 |
def update_normal_view(processed_data, view_index):
|
| 322 |
-
|
| 323 |
-
|
| 324 |
-
if view_data is None or view_data["normal"] is None:
|
| 325 |
return None
|
| 326 |
-
return colorize_normal(
|
| 327 |
|
| 328 |
|
| 329 |
def update_measure_view(processed_data, view_index):
|
| 330 |
-
"""
|
| 331 |
-
|
| 332 |
-
if
|
| 333 |
return None, []
|
| 334 |
|
| 335 |
-
image =
|
| 336 |
-
|
| 337 |
-
# Ensure image is uint8
|
| 338 |
if image.dtype != np.uint8:
|
| 339 |
-
|
| 340 |
-
|
| 341 |
-
|
| 342 |
-
|
|
|
|
| 343 |
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
|
| 347 |
-
|
| 348 |
-
overlay_color = np.array([255, 220, 220], dtype=np.uint8)
|
| 349 |
alpha = 0.5
|
| 350 |
for c in range(3):
|
| 351 |
image[:, :, c] = np.where(
|
| 352 |
-
|
| 353 |
-
(1 - alpha) * image[:, :, c] + alpha *
|
| 354 |
image[:, :, c],
|
| 355 |
).astype(np.uint8)
|
| 356 |
|
| 357 |
return image, []
|
| 358 |
|
| 359 |
|
| 360 |
-
|
| 361 |
-
"""Navigate depth view (direction: -1 for previous, +1 for next)."""
|
| 362 |
-
if processed_data is None or len(processed_data) == 0:
|
| 363 |
-
return "View 1", None
|
| 364 |
-
try:
|
| 365 |
-
current_view = int(current_selector_value.split()[1]) - 1
|
| 366 |
-
except Exception:
|
| 367 |
-
current_view = 0
|
| 368 |
-
num_views = len(processed_data)
|
| 369 |
-
new_view = (current_view + direction) % num_views
|
| 370 |
-
new_selector_value = f"View {new_view + 1}"
|
| 371 |
-
depth_vis = update_depth_view(processed_data, new_view)
|
| 372 |
-
return new_selector_value, depth_vis
|
| 373 |
|
| 374 |
|
| 375 |
-
def
|
| 376 |
-
"""
|
| 377 |
if processed_data is None or len(processed_data) == 0:
|
| 378 |
-
return "View 1", None
|
|
|
|
| 379 |
try:
|
| 380 |
-
|
| 381 |
except Exception:
|
| 382 |
-
|
| 383 |
-
num_views = len(processed_data)
|
| 384 |
-
new_view = (current_view + direction) % num_views
|
| 385 |
-
new_selector_value = f"View {new_view + 1}"
|
| 386 |
-
normal_vis = update_normal_view(processed_data, new_view)
|
| 387 |
-
return new_selector_value, normal_vis
|
| 388 |
|
|
|
|
|
|
|
|
|
|
| 389 |
|
| 390 |
-
|
| 391 |
-
|
| 392 |
-
|
| 393 |
-
|
| 394 |
-
|
| 395 |
-
|
| 396 |
-
|
| 397 |
-
|
| 398 |
-
|
| 399 |
-
|
| 400 |
-
|
| 401 |
-
|
| 402 |
-
|
|
|
|
|
|
|
| 403 |
|
| 404 |
|
| 405 |
def populate_visualization_tabs(processed_data):
|
| 406 |
-
"""
|
| 407 |
if processed_data is None or len(processed_data) == 0:
|
| 408 |
return None, None, None, []
|
| 409 |
depth_vis = update_depth_view(processed_data, 0)
|
|
@@ -412,66 +398,67 @@ def populate_visualization_tabs(processed_data):
|
|
| 412 |
return depth_vis, normal_vis, measure_img, []
|
| 413 |
|
| 414 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 415 |
def measure(processed_data, measure_points, current_view_selector, event: gr.SelectData):
|
| 416 |
-
"""
|
| 417 |
try:
|
| 418 |
if processed_data is None or len(processed_data) == 0:
|
| 419 |
return None, [], "No data available"
|
| 420 |
|
| 421 |
-
# Determine which view is currently active
|
| 422 |
try:
|
| 423 |
-
|
| 424 |
except Exception:
|
| 425 |
-
|
| 426 |
-
|
| 427 |
-
if current_view_index < 0 or current_view_index >= len(processed_data):
|
| 428 |
-
current_view_index = 0
|
| 429 |
-
|
| 430 |
-
view_keys = list(processed_data.keys())
|
| 431 |
-
current_view = processed_data[view_keys[current_view_index]]
|
| 432 |
|
|
|
|
|
|
|
| 433 |
if current_view is None:
|
| 434 |
return None, [], "No view data available"
|
| 435 |
|
| 436 |
-
point2d = event.index[0], event.index[1]
|
| 437 |
|
| 438 |
# Reject clicks on masked (invalid) areas
|
| 439 |
if (
|
| 440 |
current_view["mask"] is not None
|
| 441 |
and 0 <= point2d[1] < current_view["mask"].shape[0]
|
| 442 |
and 0 <= point2d[0] < current_view["mask"].shape[1]
|
|
|
|
| 443 |
):
|
| 444 |
-
|
| 445 |
-
|
| 446 |
-
|
| 447 |
-
|
| 448 |
-
|
| 449 |
-
|
| 450 |
-
|
| 451 |
|
| 452 |
measure_points.append(point2d)
|
| 453 |
|
| 454 |
-
|
| 455 |
-
image, _ = update_measure_view(processed_data, current_view_index)
|
| 456 |
if image is None:
|
| 457 |
return None, [], "No image available"
|
| 458 |
-
|
| 459 |
image = image.copy()
|
| 460 |
-
points3d = current_view["points3d"]
|
| 461 |
|
| 462 |
-
# Ensure uint8
|
| 463 |
if image.dtype != np.uint8:
|
| 464 |
-
|
| 465 |
-
|
| 466 |
-
|
| 467 |
-
|
|
|
|
|
|
|
|
|
|
| 468 |
|
| 469 |
-
# Draw circles
|
| 470 |
for p in measure_points:
|
| 471 |
if 0 <= p[0] < image.shape[1] and 0 <= p[1] < image.shape[0]:
|
| 472 |
image = cv2.circle(image, p, radius=5, color=(255, 0, 0), thickness=2)
|
| 473 |
|
| 474 |
-
#
|
| 475 |
depth_text = ""
|
| 476 |
for i, p in enumerate(measure_points):
|
| 477 |
if (
|
|
@@ -489,40 +476,29 @@ def measure(processed_data, measure_points, current_view_selector, event: gr.Sel
|
|
| 489 |
z = points3d[p[1], p[0], 2]
|
| 490 |
depth_text += f"- **P{i + 1} Z-coord: {z:.2f}m.**\n"
|
| 491 |
|
| 492 |
-
# If two points are marked, compute distance
|
| 493 |
if len(measure_points) == 2:
|
| 494 |
-
|
| 495 |
-
|
| 496 |
-
# Draw line between the two points
|
| 497 |
if (
|
| 498 |
-
0 <=
|
| 499 |
-
and 0 <=
|
| 500 |
-
and 0 <=
|
| 501 |
-
and 0 <=
|
| 502 |
):
|
| 503 |
-
image = cv2.line(image,
|
| 504 |
|
| 505 |
-
# Compute 3D Euclidean distance
|
| 506 |
distance_text = "- **Distance: Unable to compute**"
|
| 507 |
if (
|
| 508 |
points3d is not None
|
| 509 |
-
and 0 <=
|
| 510 |
-
and 0 <=
|
| 511 |
-
and 0 <=
|
| 512 |
-
and 0 <=
|
| 513 |
):
|
| 514 |
-
|
| 515 |
-
|
| 516 |
-
p2_3d = points3d[point2[1], point2[0]]
|
| 517 |
-
distance = np.linalg.norm(p1_3d - p2_3d)
|
| 518 |
-
distance_text = f"- **Distance: {distance:.2f}m**"
|
| 519 |
-
except Exception as e:
|
| 520 |
-
distance_text = f"- **Distance computation error: {e}**"
|
| 521 |
|
| 522 |
-
|
| 523 |
-
measure_points
|
| 524 |
-
text = depth_text + distance_text
|
| 525 |
-
return [image, measure_points, text]
|
| 526 |
else:
|
| 527 |
return [image, measure_points, depth_text]
|
| 528 |
|
|
@@ -531,20 +507,13 @@ def measure(processed_data, measure_points, current_view_selector, event: gr.Sel
|
|
| 531 |
return None, [], f"Measure error: {e}"
|
| 532 |
|
| 533 |
|
| 534 |
-
def reset_measure(processed_data):
|
| 535 |
-
"""Reset measure points and return clean image."""
|
| 536 |
-
if processed_data is None or len(processed_data) == 0:
|
| 537 |
-
return None, [], ""
|
| 538 |
-
first_view = list(processed_data.values())[0]
|
| 539 |
-
return first_view["image"], [], ""
|
| 540 |
-
|
| 541 |
-
|
| 542 |
# ============================================================================
|
| 543 |
-
#
|
| 544 |
# ============================================================================
|
| 545 |
|
|
|
|
| 546 |
def handle_uploads(input_images):
|
| 547 |
-
"""
|
| 548 |
start_time = time.time()
|
| 549 |
gc.collect()
|
| 550 |
torch.cuda.empty_cache()
|
|
@@ -562,45 +531,48 @@ def handle_uploads(input_images):
|
|
| 562 |
|
| 563 |
if input_images is not None:
|
| 564 |
for file_data in input_images:
|
| 565 |
-
|
| 566 |
-
|
| 567 |
-
|
| 568 |
-
|
| 569 |
-
|
| 570 |
file_ext = os.path.splitext(file_path)[1].lower()
|
|
|
|
| 571 |
if file_ext in [".heic", ".heif"]:
|
| 572 |
try:
|
| 573 |
with Image.open(file_path) as img:
|
| 574 |
if img.mode not in ("RGB", "L"):
|
| 575 |
img = img.convert("RGB")
|
| 576 |
-
|
| 577 |
-
|
| 578 |
-
img.save(
|
| 579 |
-
image_paths.append(
|
| 580 |
except Exception as e:
|
| 581 |
print(f"Error converting HEIC: {e}")
|
| 582 |
-
|
| 583 |
-
shutil.copy(file_path,
|
| 584 |
-
image_paths.append(
|
| 585 |
else:
|
| 586 |
-
|
| 587 |
-
shutil.copy(file_path,
|
| 588 |
-
image_paths.append(
|
| 589 |
|
| 590 |
image_paths = sorted(image_paths)
|
| 591 |
-
|
| 592 |
-
|
| 593 |
-
|
|
|
|
| 594 |
return target_dir, image_paths
|
| 595 |
|
| 596 |
|
| 597 |
def update_gallery_on_upload(input_images):
|
| 598 |
-
"""
|
| 599 |
if not input_images:
|
| 600 |
-
return None, None, None, None
|
| 601 |
target_dir, image_paths = handle_uploads(input_images)
|
| 602 |
return (
|
| 603 |
-
None,
|
|
|
|
| 604 |
target_dir,
|
| 605 |
image_paths,
|
| 606 |
"Upload complete. Click 'Start Reconstruction' to begin 3D processing.",
|
|
@@ -608,9 +580,10 @@ def update_gallery_on_upload(input_images):
|
|
| 608 |
|
| 609 |
|
| 610 |
# ============================================================================
|
| 611 |
-
# Main Reconstruction
|
| 612 |
# ============================================================================
|
| 613 |
|
|
|
|
| 614 |
@spaces.GPU(duration=120)
|
| 615 |
def gradio_demo(
|
| 616 |
target_dir,
|
|
@@ -618,48 +591,53 @@ def gradio_demo(
|
|
| 618 |
show_cam=True,
|
| 619 |
filter_black_bg=False,
|
| 620 |
filter_white_bg=False,
|
| 621 |
-
conf_thres=
|
| 622 |
apply_mask=True,
|
| 623 |
show_mesh=True,
|
| 624 |
):
|
| 625 |
-
"""
|
| 626 |
-
|
| 627 |
-
|
| 628 |
-
|
| 629 |
-
|
| 630 |
-
|
| 631 |
-
|
| 632 |
-
|
| 633 |
-
|
|
|
|
|
|
|
| 634 |
|
| 635 |
start_time = time.time()
|
| 636 |
gc.collect()
|
| 637 |
torch.cuda.empty_cache()
|
| 638 |
|
| 639 |
target_dir_images = os.path.join(target_dir, "images")
|
| 640 |
-
all_files =
|
| 641 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 642 |
frame_filter_choices = ["All"] + all_files_display
|
| 643 |
|
| 644 |
-
#
|
| 645 |
print("Running MapAnything model...")
|
| 646 |
with torch.no_grad():
|
| 647 |
-
predictions = run_model(
|
|
|
|
|
|
|
| 648 |
|
| 649 |
-
#
|
| 650 |
-
|
| 651 |
-
np.savez(prediction_save_path, **predictions)
|
| 652 |
|
| 653 |
if frame_filter is None:
|
| 654 |
frame_filter = "All"
|
| 655 |
|
| 656 |
-
|
| 657 |
-
glbfile = os.path.join(
|
| 658 |
-
target_dir,
|
| 659 |
-
f"glbscene_{frame_filter.replace('.', '_').replace(':', '').replace(' ', '_')}_cam{show_cam}_mesh{show_mesh}.glb",
|
| 660 |
-
)
|
| 661 |
|
| 662 |
-
|
|
|
|
|
|
|
| 663 |
predictions,
|
| 664 |
filter_by_frames=frame_filter,
|
| 665 |
show_cam=show_cam,
|
|
@@ -668,87 +646,87 @@ def gradio_demo(
|
|
| 668 |
as_mesh=show_mesh,
|
| 669 |
conf_percentile=conf_thres,
|
| 670 |
)
|
| 671 |
-
|
| 672 |
|
| 673 |
-
#
|
| 674 |
-
|
| 675 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 676 |
)
|
|
|
|
|
|
|
|
|
|
| 677 |
depth_vis, normal_vis, measure_img, _ = populate_visualization_tabs(processed_data)
|
| 678 |
-
|
| 679 |
|
| 680 |
# Cleanup
|
| 681 |
del predictions
|
| 682 |
gc.collect()
|
| 683 |
torch.cuda.empty_cache()
|
| 684 |
|
| 685 |
-
|
| 686 |
-
print(f"Total time elapsed: {
|
| 687 |
-
log_msg = f"β
Reconstruction successful ({len(all_files)} frames)"
|
| 688 |
|
| 689 |
return (
|
| 690 |
-
|
| 691 |
-
|
| 692 |
-
log_msg,
|
| 693 |
-
gr.Dropdown(choices=frame_filter_choices, value=frame_filter,
|
| 694 |
-
|
| 695 |
-
|
| 696 |
-
|
| 697 |
-
|
| 698 |
-
|
| 699 |
-
|
| 700 |
-
|
| 701 |
-
|
|
|
|
| 702 |
)
|
| 703 |
|
| 704 |
|
| 705 |
# ============================================================================
|
| 706 |
-
#
|
| 707 |
# ============================================================================
|
| 708 |
|
|
|
|
| 709 |
def clear_fields():
|
| 710 |
-
|
| 711 |
-
return None
|
| 712 |
|
| 713 |
|
| 714 |
def update_log():
|
| 715 |
-
"
|
| 716 |
-
return "Loading and reconstructing..."
|
| 717 |
|
| 718 |
|
| 719 |
-
def
|
| 720 |
-
target_dir,
|
| 721 |
-
|
| 722 |
-
show_cam,
|
| 723 |
-
is_example,
|
| 724 |
-
conf_thres=None,
|
| 725 |
-
filter_black_bg=False,
|
| 726 |
-
filter_white_bg=False,
|
| 727 |
-
show_mesh=True,
|
| 728 |
):
|
| 729 |
-
"""
|
| 730 |
-
Reload saved predictions from npz, create (or reuse) the GLB for new parameters.
|
| 731 |
-
KEPT AS-IS from original code.
|
| 732 |
-
"""
|
| 733 |
if is_example == "True":
|
| 734 |
-
return gr.update(), "No reconstruction available.
|
| 735 |
-
|
| 736 |
if not target_dir or target_dir == "None" or not os.path.isdir(target_dir):
|
| 737 |
-
return gr.update(), "No reconstruction available.
|
| 738 |
|
| 739 |
-
|
| 740 |
-
if not os.path.exists(
|
| 741 |
-
return gr.update(),
|
| 742 |
|
| 743 |
-
|
| 744 |
-
predictions = {key: loaded[key] for key in loaded.keys()}
|
| 745 |
|
| 746 |
-
|
|
|
|
| 747 |
target_dir,
|
| 748 |
-
f"
|
|
|
|
| 749 |
)
|
| 750 |
|
| 751 |
-
|
| 752 |
predictions,
|
| 753 |
filter_by_frames=frame_filter,
|
| 754 |
show_cam=show_cam,
|
|
@@ -757,184 +735,158 @@ def update_visualization(
|
|
| 757 |
as_mesh=show_mesh,
|
| 758 |
conf_percentile=conf_thres,
|
| 759 |
)
|
| 760 |
-
|
| 761 |
-
|
| 762 |
-
return glbfile, "Visualization updated."
|
| 763 |
|
| 764 |
|
| 765 |
-
def
|
| 766 |
target_dir, frame_filter, show_cam, is_example,
|
| 767 |
-
|
| 768 |
):
|
| 769 |
-
"""
|
| 770 |
-
|
| 771 |
-
|
| 772 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 773 |
)
|
| 774 |
-
|
|
|
|
| 775 |
|
| 776 |
|
| 777 |
def update_all_views_on_filter_change(
|
| 778 |
-
target_dir, filter_black_bg, filter_white_bg,
|
|
|
|
| 779 |
depth_view_selector, normal_view_selector, measure_view_selector,
|
| 780 |
):
|
| 781 |
-
"""
|
| 782 |
-
Re-process per-view visualization (depth / normal / measure) when
|
| 783 |
-
background filter checkboxes change.
|
| 784 |
-
"""
|
| 785 |
if not target_dir or target_dir == "None" or not os.path.isdir(target_dir):
|
| 786 |
return processed_data, None, None, None, []
|
| 787 |
|
| 788 |
-
|
| 789 |
-
if not os.path.exists(
|
| 790 |
return processed_data, None, None, None, []
|
| 791 |
|
| 792 |
try:
|
| 793 |
-
|
| 794 |
-
|
| 795 |
|
| 796 |
-
|
| 797 |
-
predictions, filter_black_bg, filter_white_bg
|
| 798 |
)
|
| 799 |
|
| 800 |
-
|
| 801 |
-
|
| 802 |
-
|
| 803 |
-
|
| 804 |
-
|
| 805 |
-
try:
|
| 806 |
-
normal_idx = int(normal_view_selector.split()[1]) - 1 if normal_view_selector else 0
|
| 807 |
-
except Exception:
|
| 808 |
-
normal_idx = 0
|
| 809 |
-
try:
|
| 810 |
-
measure_idx = int(measure_view_selector.split()[1]) - 1 if measure_view_selector else 0
|
| 811 |
-
except Exception:
|
| 812 |
-
measure_idx = 0
|
| 813 |
-
|
| 814 |
-
depth_vis = update_depth_view(new_processed_data, depth_idx)
|
| 815 |
-
normal_vis = update_normal_view(new_processed_data, normal_idx)
|
| 816 |
-
measure_img, _ = update_measure_view(new_processed_data, measure_idx)
|
| 817 |
|
| 818 |
-
|
|
|
|
|
|
|
|
|
|
| 819 |
|
| 820 |
except Exception as e:
|
| 821 |
-
print(f"Error updating views
|
| 822 |
return processed_data, None, None, None, []
|
| 823 |
|
| 824 |
|
| 825 |
# ============================================================================
|
| 826 |
-
# Example Scenes
|
| 827 |
# ============================================================================
|
| 828 |
|
|
|
|
| 829 |
def get_scene_info(examples_dir):
|
| 830 |
-
"""Get information about scenes in the examples directory."""
|
| 831 |
import glob
|
| 832 |
|
| 833 |
scenes = []
|
| 834 |
if not os.path.exists(examples_dir):
|
| 835 |
return scenes
|
| 836 |
|
| 837 |
-
for
|
| 838 |
-
|
| 839 |
-
if os.path.isdir(
|
| 840 |
-
|
| 841 |
-
|
| 842 |
-
|
| 843 |
-
|
| 844 |
-
|
| 845 |
-
|
| 846 |
-
|
| 847 |
-
|
| 848 |
-
|
| 849 |
-
|
| 850 |
-
|
| 851 |
-
|
| 852 |
-
|
| 853 |
-
|
| 854 |
-
|
| 855 |
-
|
| 856 |
-
|
| 857 |
-
"image_files": image_files,
|
| 858 |
-
}
|
| 859 |
-
)
|
| 860 |
-
|
| 861 |
return scenes
|
| 862 |
|
| 863 |
|
| 864 |
def load_example_scene(scene_name, examples_dir="examples"):
|
| 865 |
-
"""Load a scene from examples directory."""
|
| 866 |
scenes = get_scene_info(examples_dir)
|
| 867 |
-
|
| 868 |
-
|
| 869 |
-
|
| 870 |
-
|
| 871 |
-
selected_scene = scene
|
| 872 |
-
break
|
| 873 |
-
|
| 874 |
-
if selected_scene is None:
|
| 875 |
-
return None, None, None, "Scene not found"
|
| 876 |
-
|
| 877 |
-
target_dir, image_paths = handle_uploads(selected_scene["image_files"])
|
| 878 |
-
|
| 879 |
return (
|
|
|
|
| 880 |
None,
|
| 881 |
target_dir,
|
| 882 |
image_paths,
|
| 883 |
-
f"Loaded scene '{scene_name}' ({
|
|
|
|
| 884 |
)
|
| 885 |
|
| 886 |
|
| 887 |
# ============================================================================
|
| 888 |
-
# Gradio UI
|
| 889 |
# ============================================================================
|
| 890 |
|
| 891 |
theme = get_gradio_theme()
|
| 892 |
|
| 893 |
APP_CSS = GRADIO_CSS + """
|
| 894 |
-
|
| 895 |
-
.
|
| 896 |
-
|
| 897 |
-
}
|
| 898 |
-
|
| 899 |
-
/* Fixed height for Gallery */
|
| 900 |
-
.gallery-container {
|
| 901 |
-
max-height: 350px !important;
|
| 902 |
-
overflow-y: auto !important;
|
| 903 |
-
}
|
| 904 |
-
|
| 905 |
-
/* Fixed height for File component */
|
| 906 |
-
.file-preview {
|
| 907 |
-
max-height: 200px !important;
|
| 908 |
-
overflow-y: auto !important;
|
| 909 |
-
}
|
| 910 |
-
|
| 911 |
-
/* Prevent Textbox from expanding infinitely */
|
| 912 |
-
.textbox-container {
|
| 913 |
-
max-height: 100px !important;
|
| 914 |
-
}
|
| 915 |
-
|
| 916 |
-
/* Keep Tabs content area stable */
|
| 917 |
-
.tab-content {
|
| 918 |
-
min-height: 550px !important;
|
| 919 |
-
}
|
| 920 |
-
|
| 921 |
-
/* Navigation row styling */
|
| 922 |
-
.navigation-row {
|
| 923 |
-
display: flex;
|
| 924 |
-
align-items: center;
|
| 925 |
-
gap: 8px;
|
| 926 |
-
}
|
| 927 |
"""
|
| 928 |
|
| 929 |
-
with gr.Blocks() as demo:
|
| 930 |
-
|
|
|
|
| 931 |
is_example = gr.Textbox(label="is_example", visible=False, value="None")
|
| 932 |
target_dir_output = gr.Textbox(label="Target Dir", visible=False, value="None")
|
| 933 |
processed_data_state = gr.State(value=None)
|
| 934 |
measure_points_state = gr.State(value=[])
|
| 935 |
|
|
|
|
| 936 |
with gr.Row(equal_height=False):
|
| 937 |
-
|
|
|
|
| 938 |
with gr.Column(scale=1, min_width=300):
|
| 939 |
gr.Markdown("### π€ Input")
|
| 940 |
|
|
@@ -963,33 +915,33 @@ with gr.Blocks() as demo:
|
|
| 963 |
scale=1,
|
| 964 |
)
|
| 965 |
|
| 966 |
-
#
|
| 967 |
with gr.Column(scale=2, min_width=600):
|
| 968 |
gr.Markdown("### π― Output")
|
| 969 |
|
| 970 |
with gr.Tabs():
|
| 971 |
-
#
|
| 972 |
with gr.Tab("ποΈ Raw 3D"):
|
| 973 |
-
|
| 974 |
height=550,
|
| 975 |
zoom_speed=0.5,
|
| 976 |
pan_speed=0.5,
|
| 977 |
clear_color=[0.0, 0.0, 0.0, 0.0],
|
| 978 |
)
|
| 979 |
|
| 980 |
-
#
|
| 981 |
-
with gr.Tab("
|
| 982 |
-
|
| 983 |
height=550,
|
| 984 |
zoom_speed=0.5,
|
| 985 |
pan_speed=0.5,
|
| 986 |
-
clear_color=[0.
|
| 987 |
)
|
| 988 |
|
| 989 |
-
#
|
| 990 |
-
with gr.Tab("
|
| 991 |
-
with gr.Row(
|
| 992 |
-
prev_depth_btn = gr.Button("β
|
| 993 |
depth_view_selector = gr.Dropdown(
|
| 994 |
choices=["View 1"],
|
| 995 |
value="View 1",
|
|
@@ -1006,10 +958,10 @@ with gr.Blocks() as demo:
|
|
| 1006 |
interactive=False,
|
| 1007 |
)
|
| 1008 |
|
| 1009 |
-
#
|
| 1010 |
-
with gr.Tab("
|
| 1011 |
-
with gr.Row(
|
| 1012 |
-
prev_normal_btn = gr.Button("β
|
| 1013 |
normal_view_selector = gr.Dropdown(
|
| 1014 |
choices=["View 1"],
|
| 1015 |
value="View 1",
|
|
@@ -1026,11 +978,11 @@ with gr.Blocks() as demo:
|
|
| 1026 |
interactive=False,
|
| 1027 |
)
|
| 1028 |
|
| 1029 |
-
#
|
| 1030 |
-
with gr.Tab("
|
| 1031 |
gr.Markdown(MEASURE_INSTRUCTIONS_HTML)
|
| 1032 |
-
with gr.Row(
|
| 1033 |
-
prev_measure_btn = gr.Button("β
|
| 1034 |
measure_view_selector = gr.Dropdown(
|
| 1035 |
choices=["View 1"],
|
| 1036 |
value="View 1",
|
|
@@ -1048,7 +1000,8 @@ with gr.Blocks() as demo:
|
|
| 1048 |
sources=[],
|
| 1049 |
)
|
| 1050 |
gr.Markdown(
|
| 1051 |
-
"**Note:** Light-grey areas indicate regions with no
|
|
|
|
| 1052 |
)
|
| 1053 |
measure_text = gr.Markdown("")
|
| 1054 |
|
|
@@ -1060,11 +1013,11 @@ with gr.Blocks() as demo:
|
|
| 1060 |
max_lines=1,
|
| 1061 |
)
|
| 1062 |
|
| 1063 |
-
#
|
| 1064 |
with gr.Accordion("βοΈ Advanced Options", open=False):
|
| 1065 |
with gr.Row(equal_height=False):
|
| 1066 |
with gr.Column(scale=1, min_width=300):
|
| 1067 |
-
gr.Markdown("####
|
| 1068 |
frame_filter = gr.Dropdown(
|
| 1069 |
choices=["All"], value="All", label="Display Frame"
|
| 1070 |
)
|
|
@@ -1073,7 +1026,7 @@ with gr.Blocks() as demo:
|
|
| 1073 |
maximum=100,
|
| 1074 |
value=0,
|
| 1075 |
step=0.1,
|
| 1076 |
-
label="Confidence Threshold
|
| 1077 |
)
|
| 1078 |
show_cam = gr.Checkbox(label="Show Camera", value=True)
|
| 1079 |
show_mesh = gr.Checkbox(label="Show Mesh", value=True)
|
|
@@ -1083,23 +1036,22 @@ with gr.Blocks() as demo:
|
|
| 1083 |
filter_white_bg = gr.Checkbox(
|
| 1084 |
label="Filter White Background", value=False
|
| 1085 |
)
|
| 1086 |
-
|
| 1087 |
with gr.Column(scale=1, min_width=300):
|
| 1088 |
gr.Markdown("#### Reconstruction Parameters")
|
| 1089 |
apply_mask_checkbox = gr.Checkbox(
|
| 1090 |
label="Apply Depth Mask", value=True
|
| 1091 |
)
|
| 1092 |
|
| 1093 |
-
#
|
| 1094 |
with gr.Accordion("πΌοΈ Example Scenes", open=False):
|
| 1095 |
scenes = get_scene_info("examples")
|
| 1096 |
if scenes:
|
| 1097 |
for i in range(0, len(scenes), 4):
|
| 1098 |
with gr.Row(equal_height=True):
|
| 1099 |
for j in range(4):
|
| 1100 |
-
|
| 1101 |
-
if
|
| 1102 |
-
scene = scenes[
|
| 1103 |
with gr.Column(scale=1, min_width=150):
|
| 1104 |
scene_img = gr.Image(
|
| 1105 |
value=scene["thumbnail"],
|
|
@@ -1110,44 +1062,46 @@ with gr.Blocks() as demo:
|
|
| 1110 |
container=False,
|
| 1111 |
)
|
| 1112 |
gr.Markdown(
|
| 1113 |
-
f"
|
| 1114 |
-
elem_classes=["text-center"],
|
| 1115 |
)
|
| 1116 |
scene_img.select(
|
| 1117 |
fn=lambda name=scene["name"]: load_example_scene(
|
| 1118 |
name
|
| 1119 |
),
|
| 1120 |
outputs=[
|
| 1121 |
-
|
|
|
|
| 1122 |
target_dir_output,
|
| 1123 |
image_gallery,
|
| 1124 |
log_output,
|
| 1125 |
],
|
| 1126 |
)
|
| 1127 |
|
| 1128 |
-
#
|
| 1129 |
-
|
| 1130 |
-
|
| 1131 |
|
| 1132 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1133 |
input_images.change(
|
| 1134 |
fn=update_gallery_on_upload,
|
| 1135 |
inputs=[input_images],
|
| 1136 |
outputs=[
|
| 1137 |
-
|
|
|
|
| 1138 |
target_dir_output,
|
| 1139 |
image_gallery,
|
| 1140 |
log_output,
|
| 1141 |
],
|
| 1142 |
-
).then(
|
| 1143 |
-
fn=lambda: None,
|
| 1144 |
-
outputs=[reconstruction_output_3d],
|
| 1145 |
)
|
| 1146 |
|
| 1147 |
-
#
|
| 1148 |
submit_btn.click(
|
| 1149 |
-
fn=
|
| 1150 |
-
outputs=[
|
| 1151 |
).then(
|
| 1152 |
fn=update_log,
|
| 1153 |
outputs=[log_output],
|
|
@@ -1164,8 +1118,8 @@ with gr.Blocks() as demo:
|
|
| 1164 |
show_mesh,
|
| 1165 |
],
|
| 1166 |
outputs=[
|
| 1167 |
-
|
| 1168 |
-
|
| 1169 |
log_output,
|
| 1170 |
frame_filter,
|
| 1171 |
processed_data_state,
|
|
@@ -1182,49 +1136,55 @@ with gr.Blocks() as demo:
|
|
| 1182 |
outputs=[is_example],
|
| 1183 |
)
|
| 1184 |
|
| 1185 |
-
#
|
| 1186 |
-
clear_btn.add([
|
| 1187 |
|
| 1188 |
-
#
|
| 1189 |
-
|
|
|
|
| 1190 |
component.change(
|
| 1191 |
-
fn=
|
| 1192 |
inputs=[
|
| 1193 |
-
target_dir_output,
|
| 1194 |
-
|
| 1195 |
-
show_cam,
|
| 1196 |
-
is_example,
|
| 1197 |
-
conf_thres,
|
| 1198 |
-
filter_black_bg,
|
| 1199 |
-
filter_white_bg,
|
| 1200 |
-
show_mesh,
|
| 1201 |
],
|
| 1202 |
-
outputs=[
|
| 1203 |
-
|
| 1204 |
-
|
| 1205 |
-
|
|
|
|
|
|
|
| 1206 |
],
|
|
|
|
| 1207 |
)
|
| 1208 |
|
| 1209 |
-
#
|
| 1210 |
-
|
| 1211 |
-
|
| 1212 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1213 |
inputs=[
|
| 1214 |
-
target_dir_output,
|
| 1215 |
-
|
| 1216 |
-
show_cam,
|
| 1217 |
-
is_example,
|
| 1218 |
-
conf_thres,
|
| 1219 |
-
filter_black_bg,
|
| 1220 |
-
filter_white_bg,
|
| 1221 |
-
show_mesh,
|
| 1222 |
],
|
| 1223 |
-
outputs=[
|
| 1224 |
-
|
| 1225 |
-
|
| 1226 |
-
|
|
|
|
|
|
|
| 1227 |
],
|
|
|
|
| 1228 |
).then(
|
| 1229 |
fn=update_all_views_on_filter_change,
|
| 1230 |
inputs=[
|
|
@@ -1245,7 +1205,7 @@ with gr.Blocks() as demo:
|
|
| 1245 |
],
|
| 1246 |
)
|
| 1247 |
|
| 1248 |
-
#
|
| 1249 |
prev_depth_btn.click(
|
| 1250 |
fn=lambda pd, cs: navigate_depth_view(pd, cs, -1),
|
| 1251 |
inputs=[processed_data_state, depth_view_selector],
|
|
@@ -1264,7 +1224,7 @@ with gr.Blocks() as demo:
|
|
| 1264 |
outputs=[depth_map],
|
| 1265 |
)
|
| 1266 |
|
| 1267 |
-
#
|
| 1268 |
prev_normal_btn.click(
|
| 1269 |
fn=lambda pd, cs: navigate_normal_view(pd, cs, -1),
|
| 1270 |
inputs=[processed_data_state, normal_view_selector],
|
|
@@ -1283,7 +1243,7 @@ with gr.Blocks() as demo:
|
|
| 1283 |
outputs=[normal_map],
|
| 1284 |
)
|
| 1285 |
|
| 1286 |
-
#
|
| 1287 |
prev_measure_btn.click(
|
| 1288 |
fn=lambda pd, cs: navigate_measure_view(pd, cs, -1),
|
| 1289 |
inputs=[processed_data_state, measure_view_selector],
|
|
@@ -1304,7 +1264,7 @@ with gr.Blocks() as demo:
|
|
| 1304 |
outputs=[measure_image, measure_points_state],
|
| 1305 |
)
|
| 1306 |
|
| 1307 |
-
#
|
| 1308 |
measure_image.select(
|
| 1309 |
fn=measure,
|
| 1310 |
inputs=[processed_data_state, measure_points_state, measure_view_selector],
|
|
|
|
| 9 |
|
| 10 |
import cv2
|
| 11 |
import gradio as gr
|
|
|
|
| 12 |
import numpy as np
|
| 13 |
import spaces
|
| 14 |
import torch
|
| 15 |
from PIL import Image
|
| 16 |
from pillow_heif import register_heif_opener
|
| 17 |
|
| 18 |
+
register_heif_opener()
|
| 19 |
+
sys.path.append("mapanything/")
|
| 20 |
+
|
| 21 |
+
from mapanything.utils.geometry import depthmap_to_world_frame, points_to_normals
|
| 22 |
+
from mapanything.utils.image import load_images, rgb
|
| 23 |
+
|
| 24 |
from mapanything.utils.hf_utils.css_and_html import (
|
|
|
|
| 25 |
GRADIO_CSS,
|
| 26 |
+
get_gradio_theme,
|
| 27 |
)
|
|
|
|
|
|
|
|
|
|
| 28 |
|
| 29 |
+
# Import Raw 3D GLB builder (from viz module β supports conf_percentile)
|
| 30 |
+
from mapanything.utils.hf_utils.viz import predictions_to_glb as predictions_to_glb_raw
|
| 31 |
+
|
| 32 |
+
# Import 3D View GLB builder (from visual_util module β separate implementation)
|
| 33 |
try:
|
| 34 |
+
from mapanything.utils.hf_utils.visual_util import (
|
| 35 |
+
predictions_to_glb as predictions_to_glb_view,
|
| 36 |
+
)
|
| 37 |
except ImportError:
|
| 38 |
+
# Fallback: reuse the viz version if visual_util is unavailable
|
| 39 |
+
from mapanything.utils.hf_utils.viz import (
|
| 40 |
+
predictions_to_glb as predictions_to_glb_view,
|
| 41 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
|
| 43 |
+
# Optional imports for Measure tab instructions & acknowledgements
|
| 44 |
try:
|
| 45 |
from mapanything.utils.hf_utils.css_and_html import MEASURE_INSTRUCTIONS_HTML
|
| 46 |
except ImportError:
|
| 47 |
+
MEASURE_INSTRUCTIONS_HTML = (
|
| 48 |
+
"**Instructions:** Click two points on the image to measure "
|
| 49 |
+
"the 3D distance between them. Points and distance are shown in metres."
|
| 50 |
+
)
|
|
|
|
|
|
|
|
|
|
| 51 |
|
| 52 |
+
try:
|
| 53 |
+
from mapanything.utils.hf_utils.css_and_html import get_acknowledgements_html
|
| 54 |
+
except ImportError:
|
| 55 |
+
get_acknowledgements_html = None
|
| 56 |
+
|
| 57 |
+
from mapanything.utils.hf_utils.hf_helpers import initialize_mapanything_model
|
| 58 |
|
| 59 |
|
| 60 |
# ============================================================================
|
|
|
|
| 85 |
|
| 86 |
|
| 87 |
# ============================================================================
|
| 88 |
+
# Core Model Inference
|
| 89 |
# ============================================================================
|
| 90 |
|
| 91 |
+
|
| 92 |
@spaces.GPU(duration=120)
|
| 93 |
def run_model(
|
| 94 |
target_dir,
|
| 95 |
apply_mask=True,
|
| 96 |
+
filter_black_bg=False,
|
| 97 |
+
filter_white_bg=False,
|
| 98 |
):
|
| 99 |
"""
|
| 100 |
+
Run the MapAnything model on images in target_dir/images.
|
| 101 |
+
Returns (predictions dict, processed_data dict for per-view tabs).
|
| 102 |
"""
|
| 103 |
global model
|
| 104 |
import torch
|
| 105 |
|
| 106 |
print(f"Processing images: {target_dir}")
|
| 107 |
|
|
|
|
| 108 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 109 |
device = torch.device(device)
|
| 110 |
|
| 111 |
+
# Initialise model on first call
|
| 112 |
if model is None:
|
| 113 |
print("π₯ Loading MapAnything from HuggingFace...")
|
| 114 |
model = initialize_mapanything_model(high_level_config, device)
|
|
|
|
| 127 |
if len(views) == 0:
|
| 128 |
raise ValueError("No images found")
|
| 129 |
|
| 130 |
+
# Run inference
|
| 131 |
print("Running 3D reconstruction...")
|
| 132 |
outputs = model.infer(
|
| 133 |
+
views,
|
| 134 |
+
apply_mask=apply_mask,
|
| 135 |
+
mask_edges=True,
|
| 136 |
+
memory_efficient_inference=False,
|
| 137 |
)
|
| 138 |
|
| 139 |
+
# ββ Build predictions dict ββββββββββββββββββββββββββββββββββββββββββ
|
| 140 |
predictions = {}
|
| 141 |
extrinsic_list = []
|
| 142 |
intrinsic_list = []
|
|
|
|
| 181 |
if len(depth_maps.shape) == 3:
|
| 182 |
depth_maps = depth_maps[..., np.newaxis]
|
| 183 |
predictions["depth"] = depth_maps
|
|
|
|
| 184 |
predictions["images"] = np.stack(images_list, axis=0)
|
| 185 |
predictions["final_mask"] = np.stack(final_mask_list, axis=0)
|
| 186 |
|
| 187 |
+
# ββ Build processed_data for Depth / Normal / Measure tabs ββββββββββ
|
| 188 |
+
processed_data = process_predictions_for_visualization(
|
| 189 |
+
predictions, views, high_level_config, filter_black_bg, filter_white_bg
|
| 190 |
+
)
|
| 191 |
|
| 192 |
+
torch.cuda.empty_cache()
|
| 193 |
+
return predictions, processed_data
|
| 194 |
|
| 195 |
|
| 196 |
# ============================================================================
|
| 197 |
+
# Visualisation Helpers
|
| 198 |
# ============================================================================
|
| 199 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 200 |
|
| 201 |
def colorize_depth(depth_map, mask=None):
|
| 202 |
+
"""Depth map β turbo-coloured uint8 image."""
|
| 203 |
if depth_map is None:
|
| 204 |
return None
|
| 205 |
|
|
|
|
| 213 |
valid_depths = depth_normalized[valid_mask]
|
| 214 |
p5 = np.percentile(valid_depths, 5)
|
| 215 |
p95 = np.percentile(valid_depths, 95)
|
| 216 |
+
if p95 - p5 > 0:
|
| 217 |
+
depth_normalized[valid_mask] = (
|
| 218 |
+
depth_normalized[valid_mask] - p5
|
| 219 |
+
) / (p95 - p5)
|
| 220 |
+
|
| 221 |
+
import matplotlib.pyplot as plt
|
| 222 |
|
| 223 |
colormap = plt.cm.turbo_r
|
| 224 |
+
colored = colormap(depth_normalized)
|
| 225 |
colored = (colored[:, :, :3] * 255).astype(np.uint8)
|
|
|
|
|
|
|
| 226 |
colored[~valid_mask] = [255, 255, 255]
|
|
|
|
| 227 |
return colored
|
| 228 |
|
| 229 |
|
| 230 |
def colorize_normal(normal_map, mask=None):
|
| 231 |
+
"""Normal map β RGB uint8 image."""
|
| 232 |
if normal_map is None:
|
| 233 |
return None
|
| 234 |
|
| 235 |
normal_vis = normal_map.copy()
|
|
|
|
| 236 |
if mask is not None:
|
| 237 |
normal_vis[~mask] = [0, 0, 0]
|
| 238 |
|
|
|
|
| 239 |
normal_vis = (normal_vis + 1.0) / 2.0
|
|
|
|
| 240 |
normal_vis = (normal_vis * 255).astype(np.uint8)
|
|
|
|
| 241 |
return normal_vis
|
| 242 |
|
| 243 |
|
| 244 |
+
def process_predictions_for_visualization(
|
| 245 |
+
predictions, views, config, filter_black_bg=False, filter_white_bg=False
|
| 246 |
+
):
|
| 247 |
+
"""Extract per-view depth, normal, 3-D points and mask."""
|
| 248 |
+
processed_data = {}
|
| 249 |
+
|
| 250 |
+
for view_idx, view in enumerate(views):
|
| 251 |
+
image = rgb(view["img"], norm_type=config["data_norm_type"])
|
| 252 |
+
pred_pts3d = predictions["world_points"][view_idx]
|
| 253 |
+
|
| 254 |
+
view_data = {
|
| 255 |
+
"image": image[0],
|
| 256 |
+
"points3d": pred_pts3d,
|
| 257 |
+
"depth": None,
|
| 258 |
+
"normal": None,
|
| 259 |
+
"mask": None,
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
mask = predictions["final_mask"][view_idx].copy()
|
| 263 |
+
|
| 264 |
+
if filter_black_bg:
|
| 265 |
+
view_colors = image[0] * 255 if image[0].max() <= 1.0 else image[0]
|
| 266 |
+
mask = mask & (view_colors.sum(axis=2) >= 16)
|
| 267 |
+
|
| 268 |
+
if filter_white_bg:
|
| 269 |
+
view_colors = image[0] * 255 if image[0].max() <= 1.0 else image[0]
|
| 270 |
+
mask = mask & ~(
|
| 271 |
+
(view_colors[:, :, 0] > 240)
|
| 272 |
+
& (view_colors[:, :, 1] > 240)
|
| 273 |
+
& (view_colors[:, :, 2] > 240)
|
| 274 |
+
)
|
| 275 |
+
|
| 276 |
+
view_data["mask"] = mask
|
| 277 |
+
view_data["depth"] = predictions["depth"][view_idx].squeeze()
|
| 278 |
+
|
| 279 |
+
normals, _ = points_to_normals(pred_pts3d, mask=view_data["mask"])
|
| 280 |
+
view_data["normal"] = normals
|
| 281 |
+
|
| 282 |
+
processed_data[view_idx] = view_data
|
| 283 |
+
|
| 284 |
+
return processed_data
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
# ============================================================================
|
| 288 |
+
# View Navigation & Update Functions
|
| 289 |
+
# ============================================================================
|
| 290 |
+
|
| 291 |
+
|
| 292 |
def update_view_selectors(processed_data):
|
| 293 |
+
"""Return three Dropdown updates matching the number of views."""
|
| 294 |
if processed_data is None or len(processed_data) == 0:
|
| 295 |
choices = ["View 1"]
|
| 296 |
else:
|
| 297 |
+
choices = [f"View {i + 1}" for i in range(len(processed_data))]
|
|
|
|
|
|
|
| 298 |
return (
|
| 299 |
+
gr.Dropdown(choices=choices, value=choices[0]),
|
| 300 |
+
gr.Dropdown(choices=choices, value=choices[0]),
|
| 301 |
+
gr.Dropdown(choices=choices, value=choices[0]),
|
| 302 |
)
|
| 303 |
|
| 304 |
|
| 305 |
+
def _view_data(processed_data, view_index):
|
| 306 |
+
"""Safe accessor."""
|
| 307 |
if processed_data is None or len(processed_data) == 0:
|
| 308 |
return None
|
| 309 |
+
keys = list(processed_data.keys())
|
| 310 |
+
idx = max(0, min(view_index, len(keys) - 1))
|
| 311 |
+
return processed_data[keys[idx]]
|
|
|
|
|
|
|
|
|
|
| 312 |
|
| 313 |
|
| 314 |
def update_depth_view(processed_data, view_index):
|
| 315 |
+
vd = _view_data(processed_data, view_index)
|
| 316 |
+
if vd is None or vd["depth"] is None:
|
|
|
|
| 317 |
return None
|
| 318 |
+
return colorize_depth(vd["depth"], mask=vd.get("mask"))
|
| 319 |
|
| 320 |
|
| 321 |
def update_normal_view(processed_data, view_index):
|
| 322 |
+
vd = _view_data(processed_data, view_index)
|
| 323 |
+
if vd is None or vd["normal"] is None:
|
|
|
|
| 324 |
return None
|
| 325 |
+
return colorize_normal(vd["normal"], mask=vd.get("mask"))
|
| 326 |
|
| 327 |
|
| 328 |
def update_measure_view(processed_data, view_index):
|
| 329 |
+
"""Return (image_with_mask_overlay, empty_points_list)."""
|
| 330 |
+
vd = _view_data(processed_data, view_index)
|
| 331 |
+
if vd is None:
|
| 332 |
return None, []
|
| 333 |
|
| 334 |
+
image = vd["image"].copy()
|
|
|
|
|
|
|
| 335 |
if image.dtype != np.uint8:
|
| 336 |
+
image = (
|
| 337 |
+
(image * 255).astype(np.uint8)
|
| 338 |
+
if image.max() <= 1.0
|
| 339 |
+
else image.astype(np.uint8)
|
| 340 |
+
)
|
| 341 |
|
| 342 |
+
if vd["mask"] is not None:
|
| 343 |
+
invalid = ~vd["mask"]
|
| 344 |
+
if invalid.any():
|
| 345 |
+
overlay = np.array([255, 220, 220], dtype=np.uint8)
|
|
|
|
| 346 |
alpha = 0.5
|
| 347 |
for c in range(3):
|
| 348 |
image[:, :, c] = np.where(
|
| 349 |
+
invalid,
|
| 350 |
+
(1 - alpha) * image[:, :, c] + alpha * overlay[c],
|
| 351 |
image[:, :, c],
|
| 352 |
).astype(np.uint8)
|
| 353 |
|
| 354 |
return image, []
|
| 355 |
|
| 356 |
|
| 357 |
+
# ββ Navigation helpers βββββββββββββββββββββββββββββββββββββββββββββββββ
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 358 |
|
| 359 |
|
| 360 |
+
def _navigate(processed_data, current_selector, direction, update_fn):
|
| 361 |
+
"""Generic prev / next navigation."""
|
| 362 |
if processed_data is None or len(processed_data) == 0:
|
| 363 |
+
return ("View 1",) + (None,) * (3 if update_fn == update_measure_view else 1)
|
| 364 |
+
|
| 365 |
try:
|
| 366 |
+
cur = int(current_selector.split()[1]) - 1
|
| 367 |
except Exception:
|
| 368 |
+
cur = 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 369 |
|
| 370 |
+
nv = (cur + direction) % len(processed_data)
|
| 371 |
+
sel = f"View {nv + 1}"
|
| 372 |
+
result = update_fn(processed_data, nv)
|
| 373 |
|
| 374 |
+
if isinstance(result, tuple):
|
| 375 |
+
return (sel,) + result
|
| 376 |
+
return sel, result
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
def navigate_depth_view(pd, cs, d):
|
| 380 |
+
return _navigate(pd, cs, d, update_depth_view)
|
| 381 |
+
|
| 382 |
+
|
| 383 |
+
def navigate_normal_view(pd, cs, d):
|
| 384 |
+
return _navigate(pd, cs, d, update_normal_view)
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
def navigate_measure_view(pd, cs, d):
|
| 388 |
+
return _navigate(pd, cs, d, update_measure_view)
|
| 389 |
|
| 390 |
|
| 391 |
def populate_visualization_tabs(processed_data):
|
| 392 |
+
"""Initial population after reconstruction."""
|
| 393 |
if processed_data is None or len(processed_data) == 0:
|
| 394 |
return None, None, None, []
|
| 395 |
depth_vis = update_depth_view(processed_data, 0)
|
|
|
|
| 398 |
return depth_vis, normal_vis, measure_img, []
|
| 399 |
|
| 400 |
|
| 401 |
+
# ============================================================================
|
| 402 |
+
# Measurement Function
|
| 403 |
+
# ============================================================================
|
| 404 |
+
|
| 405 |
+
|
| 406 |
def measure(processed_data, measure_points, current_view_selector, event: gr.SelectData):
|
| 407 |
+
"""Click handler for the Measure tab image."""
|
| 408 |
try:
|
| 409 |
if processed_data is None or len(processed_data) == 0:
|
| 410 |
return None, [], "No data available"
|
| 411 |
|
|
|
|
| 412 |
try:
|
| 413 |
+
view_idx = int(current_view_selector.split()[1]) - 1
|
| 414 |
except Exception:
|
| 415 |
+
view_idx = 0
|
| 416 |
+
view_idx = max(0, min(view_idx, len(processed_data) - 1))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 417 |
|
| 418 |
+
keys = list(processed_data.keys())
|
| 419 |
+
current_view = processed_data[keys[view_idx]]
|
| 420 |
if current_view is None:
|
| 421 |
return None, [], "No view data available"
|
| 422 |
|
| 423 |
+
point2d = (event.index[0], event.index[1])
|
| 424 |
|
| 425 |
# Reject clicks on masked (invalid) areas
|
| 426 |
if (
|
| 427 |
current_view["mask"] is not None
|
| 428 |
and 0 <= point2d[1] < current_view["mask"].shape[0]
|
| 429 |
and 0 <= point2d[0] < current_view["mask"].shape[1]
|
| 430 |
+
and not current_view["mask"][point2d[1], point2d[0]]
|
| 431 |
):
|
| 432 |
+
img_masked, _ = update_measure_view(processed_data, view_idx)
|
| 433 |
+
return (
|
| 434 |
+
img_masked,
|
| 435 |
+
measure_points,
|
| 436 |
+
'<span style="color:red;font-weight:bold;">'
|
| 437 |
+
"Cannot measure on masked areas (shown in grey)</span>",
|
| 438 |
+
)
|
| 439 |
|
| 440 |
measure_points.append(point2d)
|
| 441 |
|
| 442 |
+
image, _ = update_measure_view(processed_data, view_idx)
|
|
|
|
| 443 |
if image is None:
|
| 444 |
return None, [], "No image available"
|
|
|
|
| 445 |
image = image.copy()
|
|
|
|
| 446 |
|
|
|
|
| 447 |
if image.dtype != np.uint8:
|
| 448 |
+
image = (
|
| 449 |
+
(image * 255).astype(np.uint8)
|
| 450 |
+
if image.max() <= 1.0
|
| 451 |
+
else image.astype(np.uint8)
|
| 452 |
+
)
|
| 453 |
+
|
| 454 |
+
points3d = current_view["points3d"]
|
| 455 |
|
| 456 |
+
# Draw circles
|
| 457 |
for p in measure_points:
|
| 458 |
if 0 <= p[0] < image.shape[1] and 0 <= p[1] < image.shape[0]:
|
| 459 |
image = cv2.circle(image, p, radius=5, color=(255, 0, 0), thickness=2)
|
| 460 |
|
| 461 |
+
# Depth text
|
| 462 |
depth_text = ""
|
| 463 |
for i, p in enumerate(measure_points):
|
| 464 |
if (
|
|
|
|
| 476 |
z = points3d[p[1], p[0], 2]
|
| 477 |
depth_text += f"- **P{i + 1} Z-coord: {z:.2f}m.**\n"
|
| 478 |
|
|
|
|
| 479 |
if len(measure_points) == 2:
|
| 480 |
+
p1, p2 = measure_points
|
|
|
|
|
|
|
| 481 |
if (
|
| 482 |
+
0 <= p1[0] < image.shape[1]
|
| 483 |
+
and 0 <= p1[1] < image.shape[0]
|
| 484 |
+
and 0 <= p2[0] < image.shape[1]
|
| 485 |
+
and 0 <= p2[1] < image.shape[0]
|
| 486 |
):
|
| 487 |
+
image = cv2.line(image, p1, p2, color=(255, 0, 0), thickness=2)
|
| 488 |
|
|
|
|
| 489 |
distance_text = "- **Distance: Unable to compute**"
|
| 490 |
if (
|
| 491 |
points3d is not None
|
| 492 |
+
and 0 <= p1[1] < points3d.shape[0]
|
| 493 |
+
and 0 <= p1[0] < points3d.shape[1]
|
| 494 |
+
and 0 <= p2[1] < points3d.shape[0]
|
| 495 |
+
and 0 <= p2[0] < points3d.shape[1]
|
| 496 |
):
|
| 497 |
+
dist = np.linalg.norm(points3d[p1[1], p1[0]] - points3d[p2[1], p2[0]])
|
| 498 |
+
distance_text = f"- **Distance: {dist:.2f}m**"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 499 |
|
| 500 |
+
measure_points = [] # reset after two-point measurement
|
| 501 |
+
return [image, measure_points, depth_text + distance_text]
|
|
|
|
|
|
|
| 502 |
else:
|
| 503 |
return [image, measure_points, depth_text]
|
| 504 |
|
|
|
|
| 507 |
return None, [], f"Measure error: {e}"
|
| 508 |
|
| 509 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 510 |
# ============================================================================
|
| 511 |
+
# File Handling (Image-only)
|
| 512 |
# ============================================================================
|
| 513 |
|
| 514 |
+
|
| 515 |
def handle_uploads(input_images):
|
| 516 |
+
"""Copy uploaded images into a unique target_dir/images folder."""
|
| 517 |
start_time = time.time()
|
| 518 |
gc.collect()
|
| 519 |
torch.cuda.empty_cache()
|
|
|
|
| 531 |
|
| 532 |
if input_images is not None:
|
| 533 |
for file_data in input_images:
|
| 534 |
+
file_path = (
|
| 535 |
+
file_data["name"]
|
| 536 |
+
if isinstance(file_data, dict) and "name" in file_data
|
| 537 |
+
else str(file_data)
|
| 538 |
+
)
|
| 539 |
file_ext = os.path.splitext(file_path)[1].lower()
|
| 540 |
+
|
| 541 |
if file_ext in [".heic", ".heif"]:
|
| 542 |
try:
|
| 543 |
with Image.open(file_path) as img:
|
| 544 |
if img.mode not in ("RGB", "L"):
|
| 545 |
img = img.convert("RGB")
|
| 546 |
+
base = os.path.splitext(os.path.basename(file_path))[0]
|
| 547 |
+
dst = os.path.join(target_dir_images, f"{base}.jpg")
|
| 548 |
+
img.save(dst, "JPEG", quality=95)
|
| 549 |
+
image_paths.append(dst)
|
| 550 |
except Exception as e:
|
| 551 |
print(f"Error converting HEIC: {e}")
|
| 552 |
+
dst = os.path.join(target_dir_images, os.path.basename(file_path))
|
| 553 |
+
shutil.copy(file_path, dst)
|
| 554 |
+
image_paths.append(dst)
|
| 555 |
else:
|
| 556 |
+
dst = os.path.join(target_dir_images, os.path.basename(file_path))
|
| 557 |
+
shutil.copy(file_path, dst)
|
| 558 |
+
image_paths.append(dst)
|
| 559 |
|
| 560 |
image_paths = sorted(image_paths)
|
| 561 |
+
print(
|
| 562 |
+
f"Files copied to {target_dir_images}; "
|
| 563 |
+
f"took {time.time() - start_time:.3f}s"
|
| 564 |
+
)
|
| 565 |
return target_dir, image_paths
|
| 566 |
|
| 567 |
|
| 568 |
def update_gallery_on_upload(input_images):
|
| 569 |
+
"""Fired whenever the file input changes."""
|
| 570 |
if not input_images:
|
| 571 |
+
return None, None, None, None, None
|
| 572 |
target_dir, image_paths = handle_uploads(input_images)
|
| 573 |
return (
|
| 574 |
+
None, # clear raw_3d_output
|
| 575 |
+
None, # clear view_3d_output
|
| 576 |
target_dir,
|
| 577 |
image_paths,
|
| 578 |
"Upload complete. Click 'Start Reconstruction' to begin 3D processing.",
|
|
|
|
| 580 |
|
| 581 |
|
| 582 |
# ============================================================================
|
| 583 |
+
# Main Reconstruction
|
| 584 |
# ============================================================================
|
| 585 |
|
| 586 |
+
|
| 587 |
@spaces.GPU(duration=120)
|
| 588 |
def gradio_demo(
|
| 589 |
target_dir,
|
|
|
|
| 591 |
show_cam=True,
|
| 592 |
filter_black_bg=False,
|
| 593 |
filter_white_bg=False,
|
| 594 |
+
conf_thres=0.0,
|
| 595 |
apply_mask=True,
|
| 596 |
show_mesh=True,
|
| 597 |
):
|
| 598 |
+
"""Run reconstruction, produce both Raw-3D and 3D-View GLBs, plus per-view data."""
|
| 599 |
+
empty = (
|
| 600 |
+
None, None, "Please upload files first",
|
| 601 |
+
gr.Dropdown(choices=["All"], value="All"),
|
| 602 |
+
None, None, None, None, "",
|
| 603 |
+
gr.Dropdown(choices=["View 1"], value="View 1"),
|
| 604 |
+
gr.Dropdown(choices=["View 1"], value="View 1"),
|
| 605 |
+
gr.Dropdown(choices=["View 1"], value="View 1"),
|
| 606 |
+
)
|
| 607 |
+
if not target_dir or target_dir == "None" or not os.path.isdir(target_dir):
|
| 608 |
+
return empty
|
| 609 |
|
| 610 |
start_time = time.time()
|
| 611 |
gc.collect()
|
| 612 |
torch.cuda.empty_cache()
|
| 613 |
|
| 614 |
target_dir_images = os.path.join(target_dir, "images")
|
| 615 |
+
all_files = (
|
| 616 |
+
sorted(os.listdir(target_dir_images))
|
| 617 |
+
if os.path.isdir(target_dir_images)
|
| 618 |
+
else []
|
| 619 |
+
)
|
| 620 |
+
all_files_display = [f"{i}: {fn}" for i, fn in enumerate(all_files)]
|
| 621 |
frame_filter_choices = ["All"] + all_files_display
|
| 622 |
|
| 623 |
+
# ββ Model inference ββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 624 |
print("Running MapAnything model...")
|
| 625 |
with torch.no_grad():
|
| 626 |
+
predictions, processed_data = run_model(
|
| 627 |
+
target_dir, apply_mask, filter_black_bg, filter_white_bg
|
| 628 |
+
)
|
| 629 |
|
| 630 |
+
# Save predictions for later re-visualisation
|
| 631 |
+
np.savez(os.path.join(target_dir, "predictions.npz"), **predictions)
|
|
|
|
| 632 |
|
| 633 |
if frame_filter is None:
|
| 634 |
frame_filter = "All"
|
| 635 |
|
| 636 |
+
ff_safe = frame_filter.replace(".", "").replace(":", "").replace(" ", "")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 637 |
|
| 638 |
+
# ββ Raw 3D GLB (viz module, with conf_percentile) ββββββββββββββββββββ
|
| 639 |
+
raw_glb_path = os.path.join(target_dir, f"raw_{ff_safe}_cam{show_cam}_mesh{show_mesh}.glb")
|
| 640 |
+
raw_scene = predictions_to_glb_raw(
|
| 641 |
predictions,
|
| 642 |
filter_by_frames=frame_filter,
|
| 643 |
show_cam=show_cam,
|
|
|
|
| 646 |
as_mesh=show_mesh,
|
| 647 |
conf_percentile=conf_thres,
|
| 648 |
)
|
| 649 |
+
raw_scene.export(file_obj=raw_glb_path)
|
| 650 |
|
| 651 |
+
# ββ 3D View GLB (visual_util module, no conf_percentile) βββββββββββββ
|
| 652 |
+
view_glb_path = os.path.join(target_dir, f"view_{ff_safe}_cam{show_cam}_mesh{show_mesh}.glb")
|
| 653 |
+
view_scene = predictions_to_glb_view(
|
| 654 |
+
predictions,
|
| 655 |
+
filter_by_frames=frame_filter,
|
| 656 |
+
show_cam=show_cam,
|
| 657 |
+
mask_black_bg=filter_black_bg,
|
| 658 |
+
mask_white_bg=filter_white_bg,
|
| 659 |
+
as_mesh=show_mesh,
|
| 660 |
)
|
| 661 |
+
view_scene.export(file_obj=view_glb_path)
|
| 662 |
+
|
| 663 |
+
# ββ Populate per-view tabs βββββββββββββββββββββββββββββββββββββββββββ
|
| 664 |
depth_vis, normal_vis, measure_img, _ = populate_visualization_tabs(processed_data)
|
| 665 |
+
depth_sel, normal_sel, measure_sel = update_view_selectors(processed_data)
|
| 666 |
|
| 667 |
# Cleanup
|
| 668 |
del predictions
|
| 669 |
gc.collect()
|
| 670 |
torch.cuda.empty_cache()
|
| 671 |
|
| 672 |
+
elapsed = time.time() - start_time
|
| 673 |
+
print(f"Total time elapsed: {elapsed:.2f}s")
|
| 674 |
+
log_msg = f"β
Reconstruction successful ({len(all_files)} frames, {elapsed:.1f}s)"
|
| 675 |
|
| 676 |
return (
|
| 677 |
+
raw_glb_path, # raw_3d_output
|
| 678 |
+
view_glb_path, # view_3d_output
|
| 679 |
+
log_msg, # log_output
|
| 680 |
+
gr.Dropdown(choices=frame_filter_choices, value=frame_filter,
|
| 681 |
+
interactive=True), # frame_filter
|
| 682 |
+
processed_data, # processed_data_state
|
| 683 |
+
depth_vis, # depth_map
|
| 684 |
+
normal_vis, # normal_map
|
| 685 |
+
measure_img, # measure_image
|
| 686 |
+
"", # measure_text
|
| 687 |
+
depth_sel, # depth_view_selector
|
| 688 |
+
normal_sel, # normal_view_selector
|
| 689 |
+
measure_sel, # measure_view_selector
|
| 690 |
)
|
| 691 |
|
| 692 |
|
| 693 |
# ============================================================================
|
| 694 |
+
# Live Re-visualisation (from saved predictions)
|
| 695 |
# ============================================================================
|
| 696 |
|
| 697 |
+
|
| 698 |
def clear_fields():
|
| 699 |
+
return None, None
|
|
|
|
| 700 |
|
| 701 |
|
| 702 |
def update_log():
|
| 703 |
+
return "Loading and reconstructingβ¦"
|
|
|
|
| 704 |
|
| 705 |
|
| 706 |
+
def update_raw_3d_visualization(
|
| 707 |
+
target_dir, frame_filter, show_cam, is_example,
|
| 708 |
+
conf_thres=0.0, filter_black_bg=False, filter_white_bg=False, show_mesh=True,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 709 |
):
|
| 710 |
+
"""Re-build the Raw 3D GLB from saved predictions."""
|
|
|
|
|
|
|
|
|
|
| 711 |
if is_example == "True":
|
| 712 |
+
return gr.update(), "No reconstruction available."
|
|
|
|
| 713 |
if not target_dir or target_dir == "None" or not os.path.isdir(target_dir):
|
| 714 |
+
return gr.update(), "No reconstruction available."
|
| 715 |
|
| 716 |
+
pred_path = os.path.join(target_dir, "predictions.npz")
|
| 717 |
+
if not os.path.exists(pred_path):
|
| 718 |
+
return gr.update(), "Run 'Start Reconstruction' first."
|
| 719 |
|
| 720 |
+
predictions = dict(np.load(pred_path, allow_pickle=True))
|
|
|
|
| 721 |
|
| 722 |
+
ff_safe = frame_filter.replace(".", "").replace(":", "").replace(" ", "")
|
| 723 |
+
glb = os.path.join(
|
| 724 |
target_dir,
|
| 725 |
+
f"raw_{ff_safe}_cam{show_cam}_mesh{show_mesh}_conf{conf_thres}"
|
| 726 |
+
f"_b{filter_black_bg}_w{filter_white_bg}.glb",
|
| 727 |
)
|
| 728 |
|
| 729 |
+
scene = predictions_to_glb_raw(
|
| 730 |
predictions,
|
| 731 |
filter_by_frames=frame_filter,
|
| 732 |
show_cam=show_cam,
|
|
|
|
| 735 |
as_mesh=show_mesh,
|
| 736 |
conf_percentile=conf_thres,
|
| 737 |
)
|
| 738 |
+
scene.export(file_obj=glb)
|
| 739 |
+
return glb, "Raw 3D updated."
|
|
|
|
| 740 |
|
| 741 |
|
| 742 |
+
def update_3d_view_visualization(
|
| 743 |
target_dir, frame_filter, show_cam, is_example,
|
| 744 |
+
filter_black_bg=False, filter_white_bg=False, show_mesh=True,
|
| 745 |
):
|
| 746 |
+
"""Re-build the 3D View GLB from saved predictions."""
|
| 747 |
+
if is_example == "True":
|
| 748 |
+
return gr.update(), "No reconstruction available."
|
| 749 |
+
if not target_dir or target_dir == "None" or not os.path.isdir(target_dir):
|
| 750 |
+
return gr.update(), "No reconstruction available."
|
| 751 |
+
|
| 752 |
+
pred_path = os.path.join(target_dir, "predictions.npz")
|
| 753 |
+
if not os.path.exists(pred_path):
|
| 754 |
+
return gr.update(), "Run 'Start Reconstruction' first."
|
| 755 |
+
|
| 756 |
+
predictions = dict(np.load(pred_path, allow_pickle=True))
|
| 757 |
+
|
| 758 |
+
ff_safe = frame_filter.replace(".", "").replace(":", "").replace(" ", "")
|
| 759 |
+
glb = os.path.join(
|
| 760 |
+
target_dir,
|
| 761 |
+
f"view_{ff_safe}_cam{show_cam}_mesh{show_mesh}"
|
| 762 |
+
f"_b{filter_black_bg}_w{filter_white_bg}.glb",
|
| 763 |
+
)
|
| 764 |
+
|
| 765 |
+
scene = predictions_to_glb_view(
|
| 766 |
+
predictions,
|
| 767 |
+
filter_by_frames=frame_filter,
|
| 768 |
+
show_cam=show_cam,
|
| 769 |
+
mask_black_bg=filter_black_bg,
|
| 770 |
+
mask_white_bg=filter_white_bg,
|
| 771 |
+
as_mesh=show_mesh,
|
| 772 |
)
|
| 773 |
+
scene.export(file_obj=glb)
|
| 774 |
+
return glb, "3D View updated."
|
| 775 |
|
| 776 |
|
| 777 |
def update_all_views_on_filter_change(
|
| 778 |
+
target_dir, filter_black_bg, filter_white_bg,
|
| 779 |
+
processed_data,
|
| 780 |
depth_view_selector, normal_view_selector, measure_view_selector,
|
| 781 |
):
|
| 782 |
+
"""Re-process per-view data when background filters change."""
|
|
|
|
|
|
|
|
|
|
| 783 |
if not target_dir or target_dir == "None" or not os.path.isdir(target_dir):
|
| 784 |
return processed_data, None, None, None, []
|
| 785 |
|
| 786 |
+
pred_path = os.path.join(target_dir, "predictions.npz")
|
| 787 |
+
if not os.path.exists(pred_path):
|
| 788 |
return processed_data, None, None, None, []
|
| 789 |
|
| 790 |
try:
|
| 791 |
+
predictions = dict(np.load(pred_path, allow_pickle=True))
|
| 792 |
+
views = load_images(os.path.join(target_dir, "images"))
|
| 793 |
|
| 794 |
+
new_pd = process_predictions_for_visualization(
|
| 795 |
+
predictions, views, high_level_config, filter_black_bg, filter_white_bg
|
| 796 |
)
|
| 797 |
|
| 798 |
+
def _idx(sel):
|
| 799 |
+
try:
|
| 800 |
+
return int(sel.split()[1]) - 1
|
| 801 |
+
except Exception:
|
| 802 |
+
return 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 803 |
|
| 804 |
+
d_vis = update_depth_view(new_pd, _idx(depth_view_selector))
|
| 805 |
+
n_vis = update_normal_view(new_pd, _idx(normal_view_selector))
|
| 806 |
+
m_img, _ = update_measure_view(new_pd, _idx(measure_view_selector))
|
| 807 |
+
return new_pd, d_vis, n_vis, m_img, []
|
| 808 |
|
| 809 |
except Exception as e:
|
| 810 |
+
print(f"Error updating views: {e}")
|
| 811 |
return processed_data, None, None, None, []
|
| 812 |
|
| 813 |
|
| 814 |
# ============================================================================
|
| 815 |
+
# Example Scenes
|
| 816 |
# ============================================================================
|
| 817 |
|
| 818 |
+
|
| 819 |
def get_scene_info(examples_dir):
|
|
|
|
| 820 |
import glob
|
| 821 |
|
| 822 |
scenes = []
|
| 823 |
if not os.path.exists(examples_dir):
|
| 824 |
return scenes
|
| 825 |
|
| 826 |
+
for folder in sorted(os.listdir(examples_dir)):
|
| 827 |
+
path = os.path.join(examples_dir, folder)
|
| 828 |
+
if not os.path.isdir(path):
|
| 829 |
+
continue
|
| 830 |
+
exts = [".jpg", ".jpeg", ".png", ".bmp", ".tiff", ".tif"]
|
| 831 |
+
files = []
|
| 832 |
+
for ext in exts:
|
| 833 |
+
files.extend(glob.glob(os.path.join(path, f"*{ext}")))
|
| 834 |
+
files.extend(glob.glob(os.path.join(path, f"*{ext.upper()}")))
|
| 835 |
+
if files:
|
| 836 |
+
files = sorted(files)
|
| 837 |
+
scenes.append(
|
| 838 |
+
{
|
| 839 |
+
"name": folder,
|
| 840 |
+
"path": path,
|
| 841 |
+
"thumbnail": files[0],
|
| 842 |
+
"num_images": len(files),
|
| 843 |
+
"image_files": files,
|
| 844 |
+
}
|
| 845 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 846 |
return scenes
|
| 847 |
|
| 848 |
|
| 849 |
def load_example_scene(scene_name, examples_dir="examples"):
|
|
|
|
| 850 |
scenes = get_scene_info(examples_dir)
|
| 851 |
+
selected = next((s for s in scenes if s["name"] == scene_name), None)
|
| 852 |
+
if selected is None:
|
| 853 |
+
return None, None, None, None, "Scene not found"
|
| 854 |
+
target_dir, image_paths = handle_uploads(selected["image_files"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 855 |
return (
|
| 856 |
+
None,
|
| 857 |
None,
|
| 858 |
target_dir,
|
| 859 |
image_paths,
|
| 860 |
+
f"Loaded scene '{scene_name}' ({selected['num_images']} images). "
|
| 861 |
+
"Click 'Start Reconstruction' to begin.",
|
| 862 |
)
|
| 863 |
|
| 864 |
|
| 865 |
# ============================================================================
|
| 866 |
+
# Gradio UI
|
| 867 |
# ============================================================================
|
| 868 |
|
| 869 |
theme = get_gradio_theme()
|
| 870 |
|
| 871 |
APP_CSS = GRADIO_CSS + """
|
| 872 |
+
.gradio-container { max-width: 100% !important; }
|
| 873 |
+
.gallery-container { max-height: 350px !important; overflow-y: auto !important; }
|
| 874 |
+
.file-preview { max-height: 200px !important; overflow-y: auto !important; }
|
| 875 |
+
.tab-content { min-height: 550px !important; }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 876 |
"""
|
| 877 |
|
| 878 |
+
with gr.Blocks(theme=theme, css=APP_CSS) as demo:
|
| 879 |
+
|
| 880 |
+
# ββ Hidden state ββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 881 |
is_example = gr.Textbox(label="is_example", visible=False, value="None")
|
| 882 |
target_dir_output = gr.Textbox(label="Target Dir", visible=False, value="None")
|
| 883 |
processed_data_state = gr.State(value=None)
|
| 884 |
measure_points_state = gr.State(value=[])
|
| 885 |
|
| 886 |
+
# ββ Layout ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 887 |
with gr.Row(equal_height=False):
|
| 888 |
+
|
| 889 |
+
# ---- Left column: inputs ----
|
| 890 |
with gr.Column(scale=1, min_width=300):
|
| 891 |
gr.Markdown("### π€ Input")
|
| 892 |
|
|
|
|
| 915 |
scale=1,
|
| 916 |
)
|
| 917 |
|
| 918 |
+
# ---- Right column: outputs ----
|
| 919 |
with gr.Column(scale=2, min_width=600):
|
| 920 |
gr.Markdown("### π― Output")
|
| 921 |
|
| 922 |
with gr.Tabs():
|
| 923 |
+
# ββ Tab 1: Raw 3D ββββββββββββββββββββββββββββββββββββββββ
|
| 924 |
with gr.Tab("ποΈ Raw 3D"):
|
| 925 |
+
raw_3d_output = gr.Model3D(
|
| 926 |
height=550,
|
| 927 |
zoom_speed=0.5,
|
| 928 |
pan_speed=0.5,
|
| 929 |
clear_color=[0.0, 0.0, 0.0, 0.0],
|
| 930 |
)
|
| 931 |
|
| 932 |
+
# ββ Tab 2: 3D View βββββββββββββββββββββββββββββββββββββββ
|
| 933 |
+
with gr.Tab("π 3D View"):
|
| 934 |
+
view_3d_output = gr.Model3D(
|
| 935 |
height=550,
|
| 936 |
zoom_speed=0.5,
|
| 937 |
pan_speed=0.5,
|
| 938 |
+
clear_color=[0.0, 0.0, 0.0, 0.0],
|
| 939 |
)
|
| 940 |
|
| 941 |
+
# ββ Tab 3: Depth βββββββββββββββββββββββββββββββββββββββββ
|
| 942 |
+
with gr.Tab("π Depth"):
|
| 943 |
+
with gr.Row():
|
| 944 |
+
prev_depth_btn = gr.Button("β Previous", size="sm", scale=1)
|
| 945 |
depth_view_selector = gr.Dropdown(
|
| 946 |
choices=["View 1"],
|
| 947 |
value="View 1",
|
|
|
|
| 958 |
interactive=False,
|
| 959 |
)
|
| 960 |
|
| 961 |
+
# ββ Tab 4: Normal ββββββββββββββββββββββββββββββββββββββββ
|
| 962 |
+
with gr.Tab("π§ Normal"):
|
| 963 |
+
with gr.Row():
|
| 964 |
+
prev_normal_btn = gr.Button("β Previous", size="sm", scale=1)
|
| 965 |
normal_view_selector = gr.Dropdown(
|
| 966 |
choices=["View 1"],
|
| 967 |
value="View 1",
|
|
|
|
| 978 |
interactive=False,
|
| 979 |
)
|
| 980 |
|
| 981 |
+
# ββ Tab 5: Measure βββββββββββββββββββββββββββββββββββββββ
|
| 982 |
+
with gr.Tab("π Measure"):
|
| 983 |
gr.Markdown(MEASURE_INSTRUCTIONS_HTML)
|
| 984 |
+
with gr.Row():
|
| 985 |
+
prev_measure_btn = gr.Button("β Previous", size="sm", scale=1)
|
| 986 |
measure_view_selector = gr.Dropdown(
|
| 987 |
choices=["View 1"],
|
| 988 |
value="View 1",
|
|
|
|
| 1000 |
sources=[],
|
| 1001 |
)
|
| 1002 |
gr.Markdown(
|
| 1003 |
+
"**Note:** Light-grey areas indicate regions with no "
|
| 1004 |
+
"depth information where measurements cannot be taken."
|
| 1005 |
)
|
| 1006 |
measure_text = gr.Markdown("")
|
| 1007 |
|
|
|
|
| 1013 |
max_lines=1,
|
| 1014 |
)
|
| 1015 |
|
| 1016 |
+
# ββ Advanced Options ββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 1017 |
with gr.Accordion("βοΈ Advanced Options", open=False):
|
| 1018 |
with gr.Row(equal_height=False):
|
| 1019 |
with gr.Column(scale=1, min_width=300):
|
| 1020 |
+
gr.Markdown("#### Visualisation Parameters")
|
| 1021 |
frame_filter = gr.Dropdown(
|
| 1022 |
choices=["All"], value="All", label="Display Frame"
|
| 1023 |
)
|
|
|
|
| 1026 |
maximum=100,
|
| 1027 |
value=0,
|
| 1028 |
step=0.1,
|
| 1029 |
+
label="Confidence Threshold β Percentile (Raw 3D only)",
|
| 1030 |
)
|
| 1031 |
show_cam = gr.Checkbox(label="Show Camera", value=True)
|
| 1032 |
show_mesh = gr.Checkbox(label="Show Mesh", value=True)
|
|
|
|
| 1036 |
filter_white_bg = gr.Checkbox(
|
| 1037 |
label="Filter White Background", value=False
|
| 1038 |
)
|
|
|
|
| 1039 |
with gr.Column(scale=1, min_width=300):
|
| 1040 |
gr.Markdown("#### Reconstruction Parameters")
|
| 1041 |
apply_mask_checkbox = gr.Checkbox(
|
| 1042 |
label="Apply Depth Mask", value=True
|
| 1043 |
)
|
| 1044 |
|
| 1045 |
+
# ββ Example Scenes ββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 1046 |
with gr.Accordion("πΌοΈ Example Scenes", open=False):
|
| 1047 |
scenes = get_scene_info("examples")
|
| 1048 |
if scenes:
|
| 1049 |
for i in range(0, len(scenes), 4):
|
| 1050 |
with gr.Row(equal_height=True):
|
| 1051 |
for j in range(4):
|
| 1052 |
+
idx = i + j
|
| 1053 |
+
if idx < len(scenes):
|
| 1054 |
+
scene = scenes[idx]
|
| 1055 |
with gr.Column(scale=1, min_width=150):
|
| 1056 |
scene_img = gr.Image(
|
| 1057 |
value=scene["thumbnail"],
|
|
|
|
| 1062 |
container=False,
|
| 1063 |
)
|
| 1064 |
gr.Markdown(
|
| 1065 |
+
f"{scene['name']} ({scene['num_images']} images)"
|
|
|
|
| 1066 |
)
|
| 1067 |
scene_img.select(
|
| 1068 |
fn=lambda name=scene["name"]: load_example_scene(
|
| 1069 |
name
|
| 1070 |
),
|
| 1071 |
outputs=[
|
| 1072 |
+
raw_3d_output,
|
| 1073 |
+
view_3d_output,
|
| 1074 |
target_dir_output,
|
| 1075 |
image_gallery,
|
| 1076 |
log_output,
|
| 1077 |
],
|
| 1078 |
)
|
| 1079 |
|
| 1080 |
+
# ββ Optional acknowledgements βββββββββββββββββββββββββββββββββββββββ
|
| 1081 |
+
if get_acknowledgements_html is not None:
|
| 1082 |
+
gr.HTML(get_acknowledgements_html())
|
| 1083 |
|
| 1084 |
+
# ================================================================
|
| 1085 |
+
# EVENT BINDINGS
|
| 1086 |
+
# ================================================================
|
| 1087 |
+
|
| 1088 |
+
# ββ Upload ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 1089 |
input_images.change(
|
| 1090 |
fn=update_gallery_on_upload,
|
| 1091 |
inputs=[input_images],
|
| 1092 |
outputs=[
|
| 1093 |
+
raw_3d_output,
|
| 1094 |
+
view_3d_output,
|
| 1095 |
target_dir_output,
|
| 1096 |
image_gallery,
|
| 1097 |
log_output,
|
| 1098 |
],
|
|
|
|
|
|
|
|
|
|
| 1099 |
)
|
| 1100 |
|
| 1101 |
+
# ββ Reconstruct βββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 1102 |
submit_btn.click(
|
| 1103 |
+
fn=clear_fields,
|
| 1104 |
+
outputs=[raw_3d_output, view_3d_output],
|
| 1105 |
).then(
|
| 1106 |
fn=update_log,
|
| 1107 |
outputs=[log_output],
|
|
|
|
| 1118 |
show_mesh,
|
| 1119 |
],
|
| 1120 |
outputs=[
|
| 1121 |
+
raw_3d_output,
|
| 1122 |
+
view_3d_output,
|
| 1123 |
log_output,
|
| 1124 |
frame_filter,
|
| 1125 |
processed_data_state,
|
|
|
|
| 1136 |
outputs=[is_example],
|
| 1137 |
)
|
| 1138 |
|
| 1139 |
+
# ββ Clear button ββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 1140 |
+
clear_btn.add([raw_3d_output, view_3d_output, log_output])
|
| 1141 |
|
| 1142 |
+
# ββ Live updates: frame_filter / show_cam / show_mesh βββββββββββββββ
|
| 1143 |
+
# These affect both Raw 3D and 3D View
|
| 1144 |
+
for component in [frame_filter, show_cam, show_mesh]:
|
| 1145 |
component.change(
|
| 1146 |
+
fn=update_raw_3d_visualization,
|
| 1147 |
inputs=[
|
| 1148 |
+
target_dir_output, frame_filter, show_cam, is_example,
|
| 1149 |
+
conf_thres, filter_black_bg, filter_white_bg, show_mesh,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1150 |
],
|
| 1151 |
+
outputs=[raw_3d_output, log_output],
|
| 1152 |
+
).then(
|
| 1153 |
+
fn=update_3d_view_visualization,
|
| 1154 |
+
inputs=[
|
| 1155 |
+
target_dir_output, frame_filter, show_cam, is_example,
|
| 1156 |
+
filter_black_bg, filter_white_bg, show_mesh,
|
| 1157 |
],
|
| 1158 |
+
outputs=[view_3d_output, log_output],
|
| 1159 |
)
|
| 1160 |
|
| 1161 |
+
# ββ Live update: conf_thres (Raw 3D only) ββββββββββββββββββββββββββ
|
| 1162 |
+
conf_thres.change(
|
| 1163 |
+
fn=update_raw_3d_visualization,
|
| 1164 |
+
inputs=[
|
| 1165 |
+
target_dir_output, frame_filter, show_cam, is_example,
|
| 1166 |
+
conf_thres, filter_black_bg, filter_white_bg, show_mesh,
|
| 1167 |
+
],
|
| 1168 |
+
outputs=[raw_3d_output, log_output],
|
| 1169 |
+
)
|
| 1170 |
+
|
| 1171 |
+
# ββ Live updates: background filters ββββββββββββββββββββββββββββββββ
|
| 1172 |
+
# These affect Raw 3D, 3D View, AND Depth/Normal/Measure tabs
|
| 1173 |
+
for bg_filter in [filter_black_bg, filter_white_bg]:
|
| 1174 |
+
bg_filter.change(
|
| 1175 |
+
fn=update_raw_3d_visualization,
|
| 1176 |
inputs=[
|
| 1177 |
+
target_dir_output, frame_filter, show_cam, is_example,
|
| 1178 |
+
conf_thres, filter_black_bg, filter_white_bg, show_mesh,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1179 |
],
|
| 1180 |
+
outputs=[raw_3d_output, log_output],
|
| 1181 |
+
).then(
|
| 1182 |
+
fn=update_3d_view_visualization,
|
| 1183 |
+
inputs=[
|
| 1184 |
+
target_dir_output, frame_filter, show_cam, is_example,
|
| 1185 |
+
filter_black_bg, filter_white_bg, show_mesh,
|
| 1186 |
],
|
| 1187 |
+
outputs=[view_3d_output, log_output],
|
| 1188 |
).then(
|
| 1189 |
fn=update_all_views_on_filter_change,
|
| 1190 |
inputs=[
|
|
|
|
| 1205 |
],
|
| 1206 |
)
|
| 1207 |
|
| 1208 |
+
# ββ Depth tab navigation βββββββββββββββββββββββββββββββββββββββββββ
|
| 1209 |
prev_depth_btn.click(
|
| 1210 |
fn=lambda pd, cs: navigate_depth_view(pd, cs, -1),
|
| 1211 |
inputs=[processed_data_state, depth_view_selector],
|
|
|
|
| 1224 |
outputs=[depth_map],
|
| 1225 |
)
|
| 1226 |
|
| 1227 |
+
# ββ Normal tab navigation ββββββββββββββββββββββββββββββββββββββββββ
|
| 1228 |
prev_normal_btn.click(
|
| 1229 |
fn=lambda pd, cs: navigate_normal_view(pd, cs, -1),
|
| 1230 |
inputs=[processed_data_state, normal_view_selector],
|
|
|
|
| 1243 |
outputs=[normal_map],
|
| 1244 |
)
|
| 1245 |
|
| 1246 |
+
# ββ Measure tab navigation βββββββββββββββββββββββββββββββββββββββββ
|
| 1247 |
prev_measure_btn.click(
|
| 1248 |
fn=lambda pd, cs: navigate_measure_view(pd, cs, -1),
|
| 1249 |
inputs=[processed_data_state, measure_view_selector],
|
|
|
|
| 1264 |
outputs=[measure_image, measure_points_state],
|
| 1265 |
)
|
| 1266 |
|
| 1267 |
+
# ββ Measure click ββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 1268 |
measure_image.select(
|
| 1269 |
fn=measure,
|
| 1270 |
inputs=[processed_data_state, measure_points_state, measure_view_selector],
|