Spaces:
Running on Zero
Running on Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -14,34 +14,20 @@ import spaces
|
|
| 14 |
import torch
|
| 15 |
from PIL import Image
|
| 16 |
from pillow_heif import register_heif_opener
|
| 17 |
-
import matplotlib.pyplot as plt
|
| 18 |
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
from mapanything.utils.hf_utils.viz import predictions_to_glb
|
| 22 |
-
except ImportError:
|
| 23 |
-
from mapanything.utils.hf_utils.visual_util import predictions_to_glb
|
| 24 |
|
| 25 |
from mapanything.utils.geometry import depthmap_to_world_frame, points_to_normals
|
| 26 |
-
from mapanything.utils.hf_utils.css_and_html import
|
| 27 |
-
get_gradio_theme,
|
| 28 |
-
GRADIO_CSS,
|
| 29 |
-
)
|
| 30 |
-
# Use a default string if MEASURE_INSTRUCTIONS_HTML is missing in older versions
|
| 31 |
-
try:
|
| 32 |
-
from mapanything.utils.hf_utils.css_and_html import MEASURE_INSTRUCTIONS_HTML
|
| 33 |
-
except ImportError:
|
| 34 |
-
MEASURE_INSTRUCTIONS_HTML = "**Measurement:** Click any two points on the image to measure the 3D distance between them."
|
| 35 |
-
|
| 36 |
from mapanything.utils.hf_utils.hf_helpers import initialize_mapanything_model
|
|
|
|
| 37 |
from mapanything.utils.image import load_images, rgb
|
| 38 |
|
| 39 |
-
register_heif_opener()
|
| 40 |
-
sys.path.append("mapanything/")
|
| 41 |
-
|
| 42 |
# ============================================================================
|
| 43 |
# Global Configuration
|
| 44 |
# ============================================================================
|
|
|
|
| 45 |
high_level_config = {
|
| 46 |
"path": "configs/train.yaml",
|
| 47 |
"hf_model_name": "facebook/map-anything",
|
|
@@ -64,16 +50,28 @@ high_level_config = {
|
|
| 64 |
model = None
|
| 65 |
|
| 66 |
# ============================================================================
|
| 67 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
# ============================================================================
|
|
|
|
|
|
|
| 69 |
@spaces.GPU(duration=120)
|
| 70 |
-
def run_model(
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
):
|
| 76 |
-
"""Run the MapAnything model and prepare output formats."""
|
| 77 |
global model
|
| 78 |
import torch
|
| 79 |
|
|
@@ -101,7 +99,10 @@ def run_model(
|
|
| 101 |
|
| 102 |
print("Running 3D reconstruction...")
|
| 103 |
outputs = model.infer(
|
| 104 |
-
views,
|
|
|
|
|
|
|
|
|
|
| 105 |
)
|
| 106 |
|
| 107 |
predictions = {}
|
|
@@ -152,20 +153,20 @@ def run_model(
|
|
| 152 |
predictions["images"] = np.stack(images_list, axis=0)
|
| 153 |
predictions["final_mask"] = np.stack(final_mask_list, axis=0)
|
| 154 |
|
| 155 |
-
# Prepare data for new tabs
|
| 156 |
-
processed_data = process_predictions_for_visualization(
|
| 157 |
-
predictions, views, high_level_config, filter_black_bg, filter_white_bg
|
| 158 |
-
)
|
| 159 |
-
|
| 160 |
torch.cuda.empty_cache()
|
| 161 |
-
return predictions
|
| 162 |
|
| 163 |
|
| 164 |
# ============================================================================
|
| 165 |
-
#
|
| 166 |
# ============================================================================
|
|
|
|
|
|
|
| 167 |
def colorize_depth(depth_map, mask=None):
|
| 168 |
-
|
|
|
|
|
|
|
|
|
|
| 169 |
depth_normalized = depth_map.copy()
|
| 170 |
valid_mask = depth_normalized > 0
|
| 171 |
|
|
@@ -176,34 +177,47 @@ def colorize_depth(depth_map, mask=None):
|
|
| 176 |
valid_depths = depth_normalized[valid_mask]
|
| 177 |
p5 = np.percentile(valid_depths, 5)
|
| 178 |
p95 = np.percentile(valid_depths, 95)
|
| 179 |
-
depth_normalized[valid_mask] = (depth_normalized[valid_mask] - p5) / (
|
| 180 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 181 |
colormap = plt.cm.turbo_r
|
| 182 |
colored = colormap(depth_normalized)
|
| 183 |
colored = (colored[:, :, :3] * 255).astype(np.uint8)
|
| 184 |
colored[~valid_mask] = [255, 255, 255]
|
| 185 |
return colored
|
| 186 |
|
|
|
|
| 187 |
def colorize_normal(normal_map, mask=None):
|
| 188 |
-
|
|
|
|
|
|
|
|
|
|
| 189 |
normal_vis = normal_map.copy()
|
| 190 |
if mask is not None:
|
| 191 |
-
|
| 192 |
-
|
| 193 |
normal_vis = (normal_vis + 1.0) / 2.0
|
| 194 |
normal_vis = (normal_vis * 255).astype(np.uint8)
|
| 195 |
return normal_vis
|
| 196 |
|
| 197 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 198 |
processed_data = {}
|
|
|
|
| 199 |
for view_idx, view in enumerate(views):
|
| 200 |
-
image = rgb(view["img"], norm_type=
|
| 201 |
pred_pts3d = predictions["world_points"][view_idx]
|
| 202 |
|
| 203 |
view_data = {
|
| 204 |
"image": image[0],
|
| 205 |
"points3d": pred_pts3d,
|
| 206 |
-
"depth":
|
| 207 |
"normal": None,
|
| 208 |
"mask": None,
|
| 209 |
}
|
|
@@ -211,136 +225,240 @@ def process_predictions_for_visualization(predictions, views, high_level_config,
|
|
| 211 |
mask = predictions["final_mask"][view_idx].copy()
|
| 212 |
|
| 213 |
if filter_black_bg:
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
mask = mask & black_bg_mask
|
| 217 |
|
| 218 |
if filter_white_bg:
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
|
|
|
| 222 |
|
| 223 |
view_data["mask"] = mask
|
| 224 |
-
|
|
|
|
|
|
|
| 225 |
view_data["normal"] = normals
|
|
|
|
| 226 |
processed_data[view_idx] = view_data
|
| 227 |
-
|
| 228 |
return processed_data
|
| 229 |
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
|
| 241 |
def get_view_data_by_index(processed_data, view_index):
|
| 242 |
-
if processed_data is None or len(processed_data) == 0:
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
|
|
|
|
|
|
| 246 |
|
| 247 |
def update_depth_view(processed_data, view_index):
|
| 248 |
-
|
| 249 |
-
if
|
| 250 |
-
|
|
|
|
|
|
|
| 251 |
|
| 252 |
def update_normal_view(processed_data, view_index):
|
| 253 |
-
|
| 254 |
-
if
|
| 255 |
-
|
|
|
|
|
|
|
| 256 |
|
| 257 |
def update_measure_view(processed_data, view_index):
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
|
|
|
|
|
|
|
|
|
| 261 |
if image.dtype != np.uint8:
|
| 262 |
-
image = (
|
|
|
|
|
|
|
|
|
|
|
|
|
| 263 |
|
| 264 |
-
if
|
| 265 |
-
|
| 266 |
-
if
|
| 267 |
-
|
| 268 |
alpha = 0.5
|
| 269 |
for c in range(3):
|
| 270 |
-
image[:, :, c] = np.where(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 271 |
return image, []
|
| 272 |
|
| 273 |
-
def navigate_view(processed_data, current_selector_value, direction, update_fn):
|
| 274 |
-
if processed_data is None or len(processed_data) == 0: return "View 1", None
|
| 275 |
-
try: current_view = int(current_selector_value.split()[1]) - 1
|
| 276 |
-
except: current_view = 0
|
| 277 |
-
new_view = (current_view + direction) % len(processed_data)
|
| 278 |
-
new_selector_value = f"View {new_view + 1}"
|
| 279 |
-
return new_selector_value, update_fn(processed_data, new_view)
|
| 280 |
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 286 |
|
| 287 |
def populate_visualization_tabs(processed_data):
|
| 288 |
if processed_data is None or len(processed_data) == 0:
|
| 289 |
return None, None, None, []
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
|
|
|
|
|
|
| 294 |
|
| 295 |
-
def measure(processed_data, measure_points, current_view_selector, event: gr.SelectData):
|
| 296 |
-
if processed_data is None or len(processed_data) == 0: return None, [], "No data available"
|
| 297 |
-
try: current_view_index = int(current_view_selector.split()[1]) - 1
|
| 298 |
-
except: current_view_index = 0
|
| 299 |
|
| 300 |
-
|
| 301 |
-
current_view = processed_data[list(processed_data.keys())[current_view_index]]
|
| 302 |
-
point2d = event.index[0], event.index[1]
|
| 303 |
|
| 304 |
-
if current_view["mask"] is not None and 0 <= point2d[1] < current_view["mask"].shape[0] and 0 <= point2d[0] < current_view["mask"].shape[1]:
|
| 305 |
-
if not current_view["mask"][point2d[1], point2d[0]]:
|
| 306 |
-
masked_image, _ = update_measure_view(processed_data, current_view_index)
|
| 307 |
-
return masked_image, measure_points, '<span style="color: red; font-weight: bold;">Cannot measure on masked areas (shown in grey)</span>'
|
| 308 |
|
| 309 |
-
|
| 310 |
-
|
| 311 |
-
|
| 312 |
-
|
|
|
|
| 313 |
|
| 314 |
-
if image.dtype != np.uint8:
|
| 315 |
-
image = (image * 255).astype(np.uint8) if image.max() <= 1.0 else image.astype(np.uint8)
|
| 316 |
|
| 317 |
-
|
| 318 |
-
|
| 319 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 320 |
|
| 321 |
-
|
| 322 |
-
|
| 323 |
-
|
| 324 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 325 |
|
| 326 |
-
|
| 327 |
-
|
| 328 |
-
|
| 329 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 330 |
|
| 331 |
-
|
| 332 |
-
|
| 333 |
-
|
| 334 |
-
distance_text = f"- **Distance: {distance:.2f}m**"
|
| 335 |
-
|
| 336 |
-
return image, [], depth_text + distance_text
|
| 337 |
-
return image, measure_points, depth_text
|
| 338 |
|
| 339 |
|
| 340 |
# ============================================================================
|
| 341 |
-
#
|
| 342 |
# ============================================================================
|
|
|
|
|
|
|
| 343 |
def handle_uploads(input_images):
|
|
|
|
| 344 |
start_time = time.time()
|
| 345 |
gc.collect()
|
| 346 |
torch.cuda.empty_cache()
|
|
@@ -349,41 +467,71 @@ def handle_uploads(input_images):
|
|
| 349 |
target_dir = f"input_images_{timestamp}"
|
| 350 |
target_dir_images = os.path.join(target_dir, "images")
|
| 351 |
|
| 352 |
-
if os.path.exists(target_dir):
|
|
|
|
| 353 |
os.makedirs(target_dir)
|
| 354 |
os.makedirs(target_dir_images)
|
| 355 |
|
| 356 |
image_paths = []
|
|
|
|
| 357 |
if input_images is not None:
|
| 358 |
for file_data in input_images:
|
| 359 |
-
file_path =
|
| 360 |
-
|
| 361 |
-
|
| 362 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 363 |
try:
|
| 364 |
-
with Image.open(file_path) as
|
| 365 |
-
if
|
| 366 |
-
|
| 367 |
-
|
| 368 |
-
|
| 369 |
-
|
|
|
|
| 370 |
except Exception as e:
|
| 371 |
-
|
| 372 |
-
|
| 373 |
-
|
|
|
|
|
|
|
|
|
|
| 374 |
else:
|
| 375 |
-
|
| 376 |
-
|
| 377 |
-
|
|
|
|
|
|
|
| 378 |
|
| 379 |
image_paths = sorted(image_paths)
|
| 380 |
-
print(
|
|
|
|
|
|
|
|
|
|
| 381 |
return target_dir, image_paths
|
| 382 |
|
|
|
|
| 383 |
def update_gallery_on_upload(input_images):
|
| 384 |
-
|
|
|
|
|
|
|
| 385 |
target_dir, image_paths = handle_uploads(input_images)
|
| 386 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 387 |
|
| 388 |
@spaces.GPU(duration=120)
|
| 389 |
def gradio_demo(
|
|
@@ -396,33 +544,45 @@ def gradio_demo(
|
|
| 396 |
apply_mask=True,
|
| 397 |
show_mesh=True,
|
| 398 |
):
|
|
|
|
| 399 |
if not os.path.isdir(target_dir) or target_dir == "None":
|
| 400 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
| 401 |
|
| 402 |
start_time = time.time()
|
| 403 |
gc.collect()
|
| 404 |
torch.cuda.empty_cache()
|
| 405 |
|
| 406 |
target_dir_images = os.path.join(target_dir, "images")
|
| 407 |
-
all_files =
|
| 408 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 409 |
|
|
|
|
| 410 |
print("Running MapAnything model...")
|
| 411 |
with torch.no_grad():
|
| 412 |
-
predictions
|
| 413 |
-
target_dir, apply_mask, filter_black_bg, filter_white_bg
|
| 414 |
-
)
|
| 415 |
|
| 416 |
prediction_save_path = os.path.join(target_dir, "predictions.npz")
|
| 417 |
np.savez(prediction_save_path, **predictions)
|
| 418 |
|
| 419 |
-
if frame_filter is None:
|
|
|
|
| 420 |
|
| 421 |
-
|
|
|
|
| 422 |
target_dir,
|
| 423 |
-
|
|
|
|
|
|
|
| 424 |
)
|
| 425 |
-
|
| 426 |
glbscene = predictions_to_glb(
|
| 427 |
predictions,
|
| 428 |
filter_by_frames=frame_filter,
|
|
@@ -432,298 +592,632 @@ def gradio_demo(
|
|
| 432 |
as_mesh=show_mesh,
|
| 433 |
conf_percentile=conf_thres,
|
| 434 |
)
|
| 435 |
-
glbscene.export(file_obj=
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 436 |
|
| 437 |
del predictions
|
| 438 |
gc.collect()
|
| 439 |
torch.cuda.empty_cache()
|
| 440 |
|
| 441 |
-
|
| 442 |
-
|
| 443 |
-
|
| 444 |
-
depth_vis, normal_vis, measure_img, _ = populate_visualization_tabs(processed_data)
|
| 445 |
-
depth_selector, normal_selector, measure_selector = update_view_selectors(processed_data)
|
| 446 |
|
| 447 |
return (
|
| 448 |
-
|
| 449 |
-
|
| 450 |
log_msg,
|
| 451 |
-
gr.Dropdown(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 452 |
processed_data,
|
| 453 |
depth_vis,
|
| 454 |
normal_vis,
|
| 455 |
measure_img,
|
| 456 |
-
"",
|
| 457 |
-
|
| 458 |
-
|
| 459 |
-
|
| 460 |
)
|
| 461 |
|
| 462 |
-
|
| 463 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 464 |
|
| 465 |
def update_visualization(
|
| 466 |
-
target_dir,
|
| 467 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 468 |
):
|
| 469 |
-
|
| 470 |
-
|
|
|
|
| 471 |
|
| 472 |
-
|
| 473 |
-
|
| 474 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 475 |
|
| 476 |
-
loaded = np.load(
|
| 477 |
-
predictions = {
|
| 478 |
|
| 479 |
-
|
| 480 |
-
|
| 481 |
-
f"
|
|
|
|
| 482 |
)
|
| 483 |
|
| 484 |
-
|
| 485 |
-
|
| 486 |
-
|
| 487 |
-
|
| 488 |
-
|
| 489 |
-
|
| 490 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 491 |
|
| 492 |
-
return glbfile, glbfile, "Visualization updated."
|
| 493 |
|
| 494 |
def update_all_views_on_filter_change(
|
| 495 |
-
target_dir,
|
| 496 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 497 |
):
|
|
|
|
| 498 |
if not target_dir or target_dir == "None" or not os.path.isdir(target_dir):
|
| 499 |
return processed_data, None, None, None, []
|
| 500 |
|
| 501 |
-
|
| 502 |
-
if not os.path.exists(
|
| 503 |
return processed_data, None, None, None, []
|
| 504 |
|
| 505 |
try:
|
| 506 |
-
loaded = np.load(
|
| 507 |
-
predictions = {
|
| 508 |
-
|
| 509 |
-
views = load_images(image_folder_path)
|
| 510 |
|
| 511 |
-
|
| 512 |
predictions, views, high_level_config, filter_black_bg, filter_white_bg
|
| 513 |
)
|
| 514 |
|
| 515 |
-
|
| 516 |
-
|
| 517 |
-
|
| 518 |
-
except: normal_view_idx = 0
|
| 519 |
-
try: measure_view_idx = int(measure_view_selector.split()[1]) - 1 if measure_view_selector else 0
|
| 520 |
-
except: measure_view_idx = 0
|
| 521 |
|
| 522 |
-
|
| 523 |
-
|
| 524 |
-
|
| 525 |
-
|
| 526 |
-
|
|
|
|
|
|
|
| 527 |
except Exception as e:
|
| 528 |
-
print(f"
|
| 529 |
return processed_data, None, None, None, []
|
| 530 |
|
|
|
|
| 531 |
# ============================================================================
|
| 532 |
-
# Example Scenes
|
| 533 |
# ============================================================================
|
|
|
|
|
|
|
| 534 |
def get_scene_info(examples_dir):
|
| 535 |
import glob
|
|
|
|
| 536 |
scenes = []
|
| 537 |
-
if not os.path.exists(examples_dir):
|
| 538 |
-
|
| 539 |
-
|
| 540 |
-
|
| 541 |
-
|
| 542 |
-
|
| 543 |
-
|
| 544 |
-
|
| 545 |
-
|
| 546 |
-
|
| 547 |
-
|
| 548 |
-
|
| 549 |
-
|
| 550 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 551 |
return scenes
|
| 552 |
|
|
|
|
| 553 |
def load_example_scene(scene_name, examples_dir="examples"):
|
| 554 |
scenes = get_scene_info(examples_dir)
|
| 555 |
-
|
| 556 |
-
if
|
|
|
|
| 557 |
|
| 558 |
-
target_dir, image_paths = handle_uploads(
|
| 559 |
return (
|
| 560 |
-
None,
|
| 561 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 562 |
)
|
| 563 |
|
|
|
|
| 564 |
# ============================================================================
|
| 565 |
-
# Gradio UI
|
| 566 |
# ============================================================================
|
|
|
|
| 567 |
theme = get_gradio_theme()
|
| 568 |
-
|
|
|
|
|
|
|
|
|
|
| 569 |
.gradio-container { max-width: 100% !important; }
|
| 570 |
.gallery-container { max-height: 350px !important; overflow-y: auto !important; }
|
| 571 |
-
.file-preview
|
| 572 |
.textbox-container { max-height: 100px !important; }
|
| 573 |
.tab-content { min-height: 550px !important; }
|
| 574 |
"""
|
|
|
|
| 575 |
|
| 576 |
with gr.Blocks(theme=theme, css=APP_CSS) as demo:
|
| 577 |
-
|
|
|
|
|
|
|
|
|
|
| 578 |
processed_data_state = gr.State(value=None)
|
| 579 |
measure_points_state = gr.State(value=[])
|
| 580 |
-
target_dir_output = gr.Textbox(label="Target Dir", visible=False, value="None")
|
| 581 |
|
| 582 |
with gr.Row(equal_height=False):
|
| 583 |
-
|
|
|
|
| 584 |
with gr.Column(scale=1, min_width=300):
|
| 585 |
gr.Markdown("### 📤 Input")
|
|
|
|
| 586 |
input_images = gr.File(
|
| 587 |
-
file_count="multiple",
|
| 588 |
-
|
|
|
|
|
|
|
| 589 |
)
|
|
|
|
| 590 |
image_gallery = gr.Gallery(
|
| 591 |
-
label="Image Preview",
|
|
|
|
|
|
|
|
|
|
|
|
|
| 592 |
)
|
|
|
|
| 593 |
with gr.Row():
|
| 594 |
-
submit_btn = gr.Button(
|
| 595 |
-
|
| 596 |
-
|
| 597 |
-
|
| 598 |
-
|
| 599 |
-
|
| 600 |
-
|
| 601 |
-
|
| 602 |
-
|
| 603 |
-
|
| 604 |
-
apply_mask_checkbox = gr.Checkbox(label="Apply Depth Mask", value=True)
|
| 605 |
-
|
| 606 |
-
with gr.Accordion("🖼️ Example Scenes", open=False):
|
| 607 |
-
scenes = get_scene_info("examples")
|
| 608 |
-
if scenes:
|
| 609 |
-
for i in range(0, len(scenes), 4):
|
| 610 |
-
with gr.Row(equal_height=True):
|
| 611 |
-
for j in range(4):
|
| 612 |
-
if (i + j) < len(scenes):
|
| 613 |
-
scene = scenes[i + j]
|
| 614 |
-
with gr.Column(scale=1, min_width=150):
|
| 615 |
-
scene_img = gr.Image(value=scene["thumbnail"], height=150, interactive=False, show_label=False)
|
| 616 |
-
gr.Markdown(f"{scene['name']} ({scene['num_images']} images)", elem_classes=["text-center"])
|
| 617 |
-
scene_img.select(
|
| 618 |
-
fn=lambda n=scene["name"]: load_example_scene(n),
|
| 619 |
-
outputs=[gr.Model3D(), gr.Model3D(), target_dir_output, image_gallery, gr.Textbox()]
|
| 620 |
-
)
|
| 621 |
-
|
| 622 |
-
# Right Side: Visualization Tabs
|
| 623 |
with gr.Column(scale=2, min_width=600):
|
| 624 |
gr.Markdown("### 🎯 Output")
|
| 625 |
-
|
| 626 |
-
value="📌 Please upload images, then click 'Start Reconstruction'",
|
| 627 |
-
label="Status Information", interactive=False, lines=1, max_lines=1
|
| 628 |
-
)
|
| 629 |
-
|
| 630 |
with gr.Tabs():
|
|
|
|
|
|
|
| 631 |
with gr.Tab("🏗️ Raw 3D"):
|
| 632 |
-
raw_3d_output = gr.Model3D(
|
| 633 |
-
|
| 634 |
-
|
| 635 |
-
|
| 636 |
-
|
| 637 |
-
|
| 638 |
-
|
| 639 |
-
|
| 640 |
-
|
| 641 |
-
|
| 642 |
-
|
| 643 |
-
|
| 644 |
-
|
| 645 |
-
|
| 646 |
-
|
| 647 |
-
|
| 648 |
-
|
| 649 |
-
|
| 650 |
-
|
| 651 |
-
|
| 652 |
-
|
| 653 |
-
|
| 654 |
-
|
| 655 |
-
|
| 656 |
-
|
| 657 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 658 |
measure_text = gr.Markdown("")
|
| 659 |
|
| 660 |
-
|
| 661 |
-
|
| 662 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 663 |
input_images.change(
|
| 664 |
-
fn=update_gallery_on_upload,
|
| 665 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 666 |
)
|
| 667 |
|
|
|
|
| 668 |
submit_btn.click(
|
| 669 |
-
fn=clear_fields,
|
|
|
|
| 670 |
).then(
|
| 671 |
-
fn=update_log,
|
|
|
|
| 672 |
).then(
|
| 673 |
fn=gradio_demo,
|
| 674 |
inputs=[
|
| 675 |
-
target_dir_output,
|
| 676 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 677 |
],
|
| 678 |
outputs=[
|
| 679 |
-
raw_3d_output,
|
| 680 |
-
|
| 681 |
-
|
| 682 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 683 |
).then(
|
| 684 |
-
fn=lambda: "False",
|
|
|
|
| 685 |
)
|
| 686 |
-
|
|
|
|
| 687 |
clear_btn.add([raw_3d_output, view_3d_output, log_output])
|
| 688 |
|
| 689 |
-
#
|
| 690 |
-
for
|
| 691 |
-
|
| 692 |
fn=update_visualization,
|
| 693 |
-
inputs=[
|
| 694 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 695 |
)
|
| 696 |
|
| 697 |
-
#
|
| 698 |
-
for
|
| 699 |
-
|
| 700 |
fn=update_visualization,
|
| 701 |
-
inputs=[
|
| 702 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 703 |
).then(
|
| 704 |
fn=update_all_views_on_filter_change,
|
| 705 |
-
inputs=[
|
| 706 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 707 |
)
|
| 708 |
|
| 709 |
-
#
|
| 710 |
-
|
| 711 |
-
fn=
|
| 712 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 713 |
)
|
| 714 |
|
| 715 |
-
#
|
| 716 |
-
|
| 717 |
-
|
| 718 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 719 |
|
| 720 |
-
|
| 721 |
-
|
| 722 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 723 |
|
| 724 |
-
prev_measure_btn.click(lambda p, c: navigate_measure_view(p, c, -1), inputs=[processed_data_state, measure_view_selector], outputs=[measure_view_selector, measure_image, measure_points_state])
|
| 725 |
-
next_measure_btn.click(lambda p, c: navigate_measure_view(p, c, 1), inputs=[processed_data_state, measure_view_selector], outputs=[measure_view_selector, measure_image, measure_points_state])
|
| 726 |
-
measure_view_selector.change(lambda p, s: update_measure_view(p, int(s.split()[1])-1) if s else (None, []), inputs=[processed_data_state, measure_view_selector], outputs=[measure_image, measure_points_state])
|
| 727 |
|
|
|
|
|
|
|
|
|
|
| 728 |
|
| 729 |
-
demo.queue(max_size=20).launch(
|
|
|
|
|
|
|
|
|
| 14 |
import torch
|
| 15 |
from PIL import Image
|
| 16 |
from pillow_heif import register_heif_opener
|
|
|
|
| 17 |
|
| 18 |
+
register_heif_opener()
|
| 19 |
+
sys.path.append("mapanything/")
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
from mapanything.utils.geometry import depthmap_to_world_frame, points_to_normals
|
| 22 |
+
from mapanything.utils.hf_utils.css_and_html import GRADIO_CSS, get_gradio_theme
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
from mapanything.utils.hf_utils.hf_helpers import initialize_mapanything_model
|
| 24 |
+
from mapanything.utils.hf_utils.viz import predictions_to_glb
|
| 25 |
from mapanything.utils.image import load_images, rgb
|
| 26 |
|
|
|
|
|
|
|
|
|
|
| 27 |
# ============================================================================
|
| 28 |
# Global Configuration
|
| 29 |
# ============================================================================
|
| 30 |
+
|
| 31 |
high_level_config = {
|
| 32 |
"path": "configs/train.yaml",
|
| 33 |
"hf_model_name": "facebook/map-anything",
|
|
|
|
| 50 |
model = None
|
| 51 |
|
| 52 |
# ============================================================================
|
| 53 |
+
# Measure Instructions (inline definition)
|
| 54 |
+
# ============================================================================
|
| 55 |
+
|
| 56 |
+
MEASURE_INSTRUCTIONS = """
|
| 57 |
+
**📏 How to Measure:**
|
| 58 |
+
1. **Click** on the image to place **Point 1** — its depth will be shown.
|
| 59 |
+
2. **Click** again to place **Point 2** — the 3D Euclidean distance between the two points is computed automatically.
|
| 60 |
+
3. After measuring, both points reset so you can measure again.
|
| 61 |
+
- Grey-overlay areas have no valid depth — clicks there are ignored.
|
| 62 |
+
"""
|
| 63 |
+
|
| 64 |
+
# ============================================================================
|
| 65 |
+
# Core Model Inference (Raw 3D — kept exactly as original)
|
| 66 |
# ============================================================================
|
| 67 |
+
|
| 68 |
+
|
| 69 |
@spaces.GPU(duration=120)
|
| 70 |
+
def run_model(target_dir, apply_mask=True):
|
| 71 |
+
"""
|
| 72 |
+
Run the MapAnything model.
|
| 73 |
+
Returns predictions dict (numpy arrays).
|
| 74 |
+
"""
|
|
|
|
|
|
|
| 75 |
global model
|
| 76 |
import torch
|
| 77 |
|
|
|
|
| 99 |
|
| 100 |
print("Running 3D reconstruction...")
|
| 101 |
outputs = model.infer(
|
| 102 |
+
views,
|
| 103 |
+
apply_mask=apply_mask,
|
| 104 |
+
mask_edges=True,
|
| 105 |
+
memory_efficient_inference=False,
|
| 106 |
)
|
| 107 |
|
| 108 |
predictions = {}
|
|
|
|
| 153 |
predictions["images"] = np.stack(images_list, axis=0)
|
| 154 |
predictions["final_mask"] = np.stack(final_mask_list, axis=0)
|
| 155 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 156 |
torch.cuda.empty_cache()
|
| 157 |
+
return predictions
|
| 158 |
|
| 159 |
|
| 160 |
# ============================================================================
|
| 161 |
+
# Visualization Helpers — Depth / Normal / Measure
|
| 162 |
# ============================================================================
|
| 163 |
+
|
| 164 |
+
|
| 165 |
def colorize_depth(depth_map, mask=None):
|
| 166 |
+
"""Convert a depth map to a turbo-coloured uint8 image."""
|
| 167 |
+
if depth_map is None:
|
| 168 |
+
return None
|
| 169 |
+
|
| 170 |
depth_normalized = depth_map.copy()
|
| 171 |
valid_mask = depth_normalized > 0
|
| 172 |
|
|
|
|
| 177 |
valid_depths = depth_normalized[valid_mask]
|
| 178 |
p5 = np.percentile(valid_depths, 5)
|
| 179 |
p95 = np.percentile(valid_depths, 95)
|
| 180 |
+
depth_normalized[valid_mask] = (depth_normalized[valid_mask] - p5) / (
|
| 181 |
+
p95 - p5 + 1e-8
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
import matplotlib.pyplot as plt
|
| 185 |
+
|
| 186 |
colormap = plt.cm.turbo_r
|
| 187 |
colored = colormap(depth_normalized)
|
| 188 |
colored = (colored[:, :, :3] * 255).astype(np.uint8)
|
| 189 |
colored[~valid_mask] = [255, 255, 255]
|
| 190 |
return colored
|
| 191 |
|
| 192 |
+
|
| 193 |
def colorize_normal(normal_map, mask=None):
|
| 194 |
+
"""Map normals from [-1,1] → [0,255] uint8; masked areas become grey."""
|
| 195 |
+
if normal_map is None:
|
| 196 |
+
return None
|
| 197 |
+
|
| 198 |
normal_vis = normal_map.copy()
|
| 199 |
if mask is not None:
|
| 200 |
+
normal_vis[~mask] = [0, 0, 0]
|
| 201 |
+
|
| 202 |
normal_vis = (normal_vis + 1.0) / 2.0
|
| 203 |
normal_vis = (normal_vis * 255).astype(np.uint8)
|
| 204 |
return normal_vis
|
| 205 |
|
| 206 |
+
|
| 207 |
+
def process_predictions_for_visualization(
|
| 208 |
+
predictions, views, config, filter_black_bg=False, filter_white_bg=False
|
| 209 |
+
):
|
| 210 |
+
"""Build per-view dicts with image / depth / normal / mask / points3d."""
|
| 211 |
processed_data = {}
|
| 212 |
+
|
| 213 |
for view_idx, view in enumerate(views):
|
| 214 |
+
image = rgb(view["img"], norm_type=config["data_norm_type"])
|
| 215 |
pred_pts3d = predictions["world_points"][view_idx]
|
| 216 |
|
| 217 |
view_data = {
|
| 218 |
"image": image[0],
|
| 219 |
"points3d": pred_pts3d,
|
| 220 |
+
"depth": None,
|
| 221 |
"normal": None,
|
| 222 |
"mask": None,
|
| 223 |
}
|
|
|
|
| 225 |
mask = predictions["final_mask"][view_idx].copy()
|
| 226 |
|
| 227 |
if filter_black_bg:
|
| 228 |
+
vc = image[0] * 255 if image[0].max() <= 1.0 else image[0]
|
| 229 |
+
mask = mask & (vc.sum(axis=2) >= 16)
|
|
|
|
| 230 |
|
| 231 |
if filter_white_bg:
|
| 232 |
+
vc = image[0] * 255 if image[0].max() <= 1.0 else image[0]
|
| 233 |
+
mask = mask & ~(
|
| 234 |
+
(vc[:, :, 0] > 240) & (vc[:, :, 1] > 240) & (vc[:, :, 2] > 240)
|
| 235 |
+
)
|
| 236 |
|
| 237 |
view_data["mask"] = mask
|
| 238 |
+
view_data["depth"] = predictions["depth"][view_idx].squeeze()
|
| 239 |
+
|
| 240 |
+
normals, _ = points_to_normals(pred_pts3d, mask=mask)
|
| 241 |
view_data["normal"] = normals
|
| 242 |
+
|
| 243 |
processed_data[view_idx] = view_data
|
| 244 |
+
|
| 245 |
return processed_data
|
| 246 |
|
| 247 |
+
|
| 248 |
+
# ── per-view accessors ────────────────────────────────────────────────────
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
def _view_index_from_selector(selector_value):
|
| 252 |
+
try:
|
| 253 |
+
return int(selector_value.split()[1]) - 1
|
| 254 |
+
except Exception:
|
| 255 |
+
return 0
|
| 256 |
+
|
| 257 |
|
| 258 |
def get_view_data_by_index(processed_data, view_index):
|
| 259 |
+
if processed_data is None or len(processed_data) == 0:
|
| 260 |
+
return None
|
| 261 |
+
keys = list(processed_data.keys())
|
| 262 |
+
view_index = max(0, min(view_index, len(keys) - 1))
|
| 263 |
+
return processed_data[keys[view_index]]
|
| 264 |
+
|
| 265 |
|
| 266 |
def update_depth_view(processed_data, view_index):
|
| 267 |
+
vd = get_view_data_by_index(processed_data, view_index)
|
| 268 |
+
if vd is None or vd["depth"] is None:
|
| 269 |
+
return None
|
| 270 |
+
return colorize_depth(vd["depth"], mask=vd.get("mask"))
|
| 271 |
+
|
| 272 |
|
| 273 |
def update_normal_view(processed_data, view_index):
|
| 274 |
+
vd = get_view_data_by_index(processed_data, view_index)
|
| 275 |
+
if vd is None or vd["normal"] is None:
|
| 276 |
+
return None
|
| 277 |
+
return colorize_normal(vd["normal"], mask=vd.get("mask"))
|
| 278 |
+
|
| 279 |
|
| 280 |
def update_measure_view(processed_data, view_index):
|
| 281 |
+
"""Return (image_with_mask_overlay, empty_points_list)."""
|
| 282 |
+
vd = get_view_data_by_index(processed_data, view_index)
|
| 283 |
+
if vd is None:
|
| 284 |
+
return None, []
|
| 285 |
+
|
| 286 |
+
image = vd["image"].copy()
|
| 287 |
if image.dtype != np.uint8:
|
| 288 |
+
image = (
|
| 289 |
+
(image * 255).astype(np.uint8)
|
| 290 |
+
if image.max() <= 1.0
|
| 291 |
+
else image.astype(np.uint8)
|
| 292 |
+
)
|
| 293 |
|
| 294 |
+
if vd["mask"] is not None:
|
| 295 |
+
inv = ~vd["mask"]
|
| 296 |
+
if inv.any():
|
| 297 |
+
overlay = np.array([255, 220, 220], dtype=np.uint8)
|
| 298 |
alpha = 0.5
|
| 299 |
for c in range(3):
|
| 300 |
+
image[:, :, c] = np.where(
|
| 301 |
+
inv,
|
| 302 |
+
(1 - alpha) * image[:, :, c] + alpha * overlay[c],
|
| 303 |
+
image[:, :, c],
|
| 304 |
+
).astype(np.uint8)
|
| 305 |
+
|
| 306 |
return image, []
|
| 307 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 308 |
|
| 309 |
+
# ── view‑selector helpers ─────────────────────────────────────────────────
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
def update_view_selectors(processed_data):
|
| 313 |
+
n = len(processed_data) if processed_data else 1
|
| 314 |
+
choices = [f"View {i + 1}" for i in range(n)]
|
| 315 |
+
return (
|
| 316 |
+
gr.Dropdown(choices=choices, value=choices[0]),
|
| 317 |
+
gr.Dropdown(choices=choices, value=choices[0]),
|
| 318 |
+
gr.Dropdown(choices=choices, value=choices[0]),
|
| 319 |
+
)
|
| 320 |
+
|
| 321 |
|
| 322 |
def populate_visualization_tabs(processed_data):
|
| 323 |
if processed_data is None or len(processed_data) == 0:
|
| 324 |
return None, None, None, []
|
| 325 |
+
return (
|
| 326 |
+
update_depth_view(processed_data, 0),
|
| 327 |
+
update_normal_view(processed_data, 0),
|
| 328 |
+
update_measure_view(processed_data, 0)[0],
|
| 329 |
+
[],
|
| 330 |
+
)
|
| 331 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 332 |
|
| 333 |
+
# ── navigation (prev / next) ─────────────────────────────────────────────
|
|
|
|
|
|
|
| 334 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 335 |
|
| 336 |
+
def navigate_depth_view(processed_data, cur, direction):
|
| 337 |
+
if not processed_data:
|
| 338 |
+
return "View 1", None
|
| 339 |
+
idx = (_view_index_from_selector(cur) + direction) % len(processed_data)
|
| 340 |
+
return f"View {idx + 1}", update_depth_view(processed_data, idx)
|
| 341 |
|
|
|
|
|
|
|
| 342 |
|
| 343 |
+
def navigate_normal_view(processed_data, cur, direction):
|
| 344 |
+
if not processed_data:
|
| 345 |
+
return "View 1", None
|
| 346 |
+
idx = (_view_index_from_selector(cur) + direction) % len(processed_data)
|
| 347 |
+
return f"View {idx + 1}", update_normal_view(processed_data, idx)
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
def navigate_measure_view(processed_data, cur, direction):
|
| 351 |
+
if not processed_data:
|
| 352 |
+
return "View 1", None, []
|
| 353 |
+
idx = (_view_index_from_selector(cur) + direction) % len(processed_data)
|
| 354 |
+
img, pts = update_measure_view(processed_data, idx)
|
| 355 |
+
return f"View {idx + 1}", img, pts
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
# ── measure click handler ────────────────────────────────────────────────
|
| 359 |
+
|
| 360 |
|
| 361 |
+
def measure(processed_data, measure_points, current_view_selector, event: gr.SelectData):
|
| 362 |
+
"""Two-click measurement: first click → depth, second click → depth + distance."""
|
| 363 |
+
try:
|
| 364 |
+
if processed_data is None or len(processed_data) == 0:
|
| 365 |
+
return None, [], "No data available"
|
| 366 |
+
|
| 367 |
+
vi = _view_index_from_selector(current_view_selector)
|
| 368 |
+
vi = max(0, min(vi, len(processed_data) - 1))
|
| 369 |
+
keys = list(processed_data.keys())
|
| 370 |
+
cv = processed_data[keys[vi]]
|
| 371 |
+
if cv is None:
|
| 372 |
+
return None, [], "No view data"
|
| 373 |
+
|
| 374 |
+
px, py = event.index[0], event.index[1]
|
| 375 |
+
|
| 376 |
+
# reject clicks on masked areas
|
| 377 |
+
if cv["mask"] is not None:
|
| 378 |
+
if 0 <= py < cv["mask"].shape[0] and 0 <= px < cv["mask"].shape[1]:
|
| 379 |
+
if not cv["mask"][py, px]:
|
| 380 |
+
img, _ = update_measure_view(processed_data, vi)
|
| 381 |
+
return (
|
| 382 |
+
img,
|
| 383 |
+
measure_points,
|
| 384 |
+
'<span style="color:red;font-weight:bold;">'
|
| 385 |
+
"Cannot measure on masked areas (grey overlay)</span>",
|
| 386 |
+
)
|
| 387 |
+
|
| 388 |
+
measure_points.append((px, py))
|
| 389 |
+
|
| 390 |
+
img, _ = update_measure_view(processed_data, vi)
|
| 391 |
+
if img is None:
|
| 392 |
+
return None, [], "No image"
|
| 393 |
+
img = img.copy()
|
| 394 |
+
pts3d = cv["points3d"]
|
| 395 |
+
|
| 396 |
+
if img.dtype != np.uint8:
|
| 397 |
+
img = (
|
| 398 |
+
(img * 255).astype(np.uint8)
|
| 399 |
+
if img.max() <= 1.0
|
| 400 |
+
else img.astype(np.uint8)
|
| 401 |
+
)
|
| 402 |
|
| 403 |
+
for p in measure_points:
|
| 404 |
+
if 0 <= p[0] < img.shape[1] and 0 <= p[1] < img.shape[0]:
|
| 405 |
+
cv2.circle(img, p, radius=5, color=(255, 0, 0), thickness=2)
|
| 406 |
+
|
| 407 |
+
depth_text = ""
|
| 408 |
+
for i, p in enumerate(measure_points):
|
| 409 |
+
if (
|
| 410 |
+
cv["depth"] is not None
|
| 411 |
+
and 0 <= p[1] < cv["depth"].shape[0]
|
| 412 |
+
and 0 <= p[0] < cv["depth"].shape[1]
|
| 413 |
+
):
|
| 414 |
+
d = cv["depth"][p[1], p[0]]
|
| 415 |
+
depth_text += f"- **P{i+1} depth: {d:.2f}m.**\n"
|
| 416 |
+
elif (
|
| 417 |
+
pts3d is not None
|
| 418 |
+
and 0 <= p[1] < pts3d.shape[0]
|
| 419 |
+
and 0 <= p[0] < pts3d.shape[1]
|
| 420 |
+
):
|
| 421 |
+
z = pts3d[p[1], p[0], 2]
|
| 422 |
+
depth_text += f"- **P{i+1} Z-coord: {z:.2f}m.**\n"
|
| 423 |
+
|
| 424 |
+
if len(measure_points) == 2:
|
| 425 |
+
p1, p2 = measure_points
|
| 426 |
+
if (
|
| 427 |
+
0 <= p1[0] < img.shape[1]
|
| 428 |
+
and 0 <= p1[1] < img.shape[0]
|
| 429 |
+
and 0 <= p2[0] < img.shape[1]
|
| 430 |
+
and 0 <= p2[1] < img.shape[0]
|
| 431 |
+
):
|
| 432 |
+
cv2.line(img, p1, p2, color=(255, 0, 0), thickness=2)
|
| 433 |
+
|
| 434 |
+
dist_text = "- **Distance: Unable to compute**"
|
| 435 |
+
if (
|
| 436 |
+
pts3d is not None
|
| 437 |
+
and 0 <= p1[1] < pts3d.shape[0]
|
| 438 |
+
and 0 <= p1[0] < pts3d.shape[1]
|
| 439 |
+
and 0 <= p2[1] < pts3d.shape[0]
|
| 440 |
+
and 0 <= p2[0] < pts3d.shape[1]
|
| 441 |
+
):
|
| 442 |
+
d3 = np.linalg.norm(pts3d[p1[1], p1[0]] - pts3d[p2[1], p2[0]])
|
| 443 |
+
dist_text = f"- **Distance: {d3:.2f}m**"
|
| 444 |
+
|
| 445 |
+
measure_points = []
|
| 446 |
+
return img, measure_points, depth_text + dist_text
|
| 447 |
+
else:
|
| 448 |
+
return img, measure_points, depth_text
|
| 449 |
|
| 450 |
+
except Exception as e:
|
| 451 |
+
print(f"Measure error: {e}")
|
| 452 |
+
return None, [], f"Error: {e}"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 453 |
|
| 454 |
|
| 455 |
# ============================================================================
|
| 456 |
+
# File Handling (images only — kept as original)
|
| 457 |
# ============================================================================
|
| 458 |
+
|
| 459 |
+
|
| 460 |
def handle_uploads(input_images):
|
| 461 |
+
"""Copy uploaded images into a timestamped target_dir/images folder."""
|
| 462 |
start_time = time.time()
|
| 463 |
gc.collect()
|
| 464 |
torch.cuda.empty_cache()
|
|
|
|
| 467 |
target_dir = f"input_images_{timestamp}"
|
| 468 |
target_dir_images = os.path.join(target_dir, "images")
|
| 469 |
|
| 470 |
+
if os.path.exists(target_dir):
|
| 471 |
+
shutil.rmtree(target_dir)
|
| 472 |
os.makedirs(target_dir)
|
| 473 |
os.makedirs(target_dir_images)
|
| 474 |
|
| 475 |
image_paths = []
|
| 476 |
+
|
| 477 |
if input_images is not None:
|
| 478 |
for file_data in input_images:
|
| 479 |
+
file_path = (
|
| 480 |
+
file_data["name"]
|
| 481 |
+
if isinstance(file_data, dict) and "name" in file_data
|
| 482 |
+
else str(file_data)
|
| 483 |
+
)
|
| 484 |
+
|
| 485 |
+
ext = os.path.splitext(file_path)[1].lower()
|
| 486 |
+
if ext in [".heic", ".heif"]:
|
| 487 |
try:
|
| 488 |
+
with Image.open(file_path) as im:
|
| 489 |
+
if im.mode not in ("RGB", "L"):
|
| 490 |
+
im = im.convert("RGB")
|
| 491 |
+
base = os.path.splitext(os.path.basename(file_path))[0]
|
| 492 |
+
dst = os.path.join(target_dir_images, f"{base}.jpg")
|
| 493 |
+
im.save(dst, "JPEG", quality=95)
|
| 494 |
+
image_paths.append(dst)
|
| 495 |
except Exception as e:
|
| 496 |
+
print(f"HEIC convert error: {e}")
|
| 497 |
+
dst = os.path.join(
|
| 498 |
+
target_dir_images, os.path.basename(file_path)
|
| 499 |
+
)
|
| 500 |
+
shutil.copy(file_path, dst)
|
| 501 |
+
image_paths.append(dst)
|
| 502 |
else:
|
| 503 |
+
dst = os.path.join(
|
| 504 |
+
target_dir_images, os.path.basename(file_path)
|
| 505 |
+
)
|
| 506 |
+
shutil.copy(file_path, dst)
|
| 507 |
+
image_paths.append(dst)
|
| 508 |
|
| 509 |
image_paths = sorted(image_paths)
|
| 510 |
+
print(
|
| 511 |
+
f"Files copied to {target_dir_images}; "
|
| 512 |
+
f"took {time.time() - start_time:.3f}s"
|
| 513 |
+
)
|
| 514 |
return target_dir, image_paths
|
| 515 |
|
| 516 |
+
|
| 517 |
def update_gallery_on_upload(input_images):
|
| 518 |
+
"""Handle new uploads — clear viewers, update gallery."""
|
| 519 |
+
if not input_images:
|
| 520 |
+
return None, None, None, None, None
|
| 521 |
target_dir, image_paths = handle_uploads(input_images)
|
| 522 |
+
return (
|
| 523 |
+
None, # clear Raw 3D
|
| 524 |
+
None, # clear 3D View
|
| 525 |
+
target_dir,
|
| 526 |
+
image_paths,
|
| 527 |
+
"Upload complete. Click 'Start Reconstruction' to begin 3D processing.",
|
| 528 |
+
)
|
| 529 |
+
|
| 530 |
+
|
| 531 |
+
# ============================================================================
|
| 532 |
+
# Main Reconstruction
|
| 533 |
+
# ============================================================================
|
| 534 |
+
|
| 535 |
|
| 536 |
@spaces.GPU(duration=120)
|
| 537 |
def gradio_demo(
|
|
|
|
| 544 |
apply_mask=True,
|
| 545 |
show_mesh=True,
|
| 546 |
):
|
| 547 |
+
"""Run reconstruction → produce GLBs + depth / normal / measure data."""
|
| 548 |
if not os.path.isdir(target_dir) or target_dir == "None":
|
| 549 |
+
return (
|
| 550 |
+
None, None,
|
| 551 |
+
"Please upload files first.",
|
| 552 |
+
None, None, None, None, None, "", None, None, None,
|
| 553 |
+
)
|
| 554 |
|
| 555 |
start_time = time.time()
|
| 556 |
gc.collect()
|
| 557 |
torch.cuda.empty_cache()
|
| 558 |
|
| 559 |
target_dir_images = os.path.join(target_dir, "images")
|
| 560 |
+
all_files = (
|
| 561 |
+
sorted(os.listdir(target_dir_images))
|
| 562 |
+
if os.path.isdir(target_dir_images)
|
| 563 |
+
else []
|
| 564 |
+
)
|
| 565 |
+
all_files_display = [f"{i}: {f}" for i, f in enumerate(all_files)]
|
| 566 |
+
frame_filter_choices = ["All"] + all_files_display
|
| 567 |
|
| 568 |
+
# ── run model (Raw 3D logic — unchanged) ──────────────────────────────
|
| 569 |
print("Running MapAnything model...")
|
| 570 |
with torch.no_grad():
|
| 571 |
+
predictions = run_model(target_dir, apply_mask)
|
|
|
|
|
|
|
| 572 |
|
| 573 |
prediction_save_path = os.path.join(target_dir, "predictions.npz")
|
| 574 |
np.savez(prediction_save_path, **predictions)
|
| 575 |
|
| 576 |
+
if frame_filter is None:
|
| 577 |
+
frame_filter = "All"
|
| 578 |
|
| 579 |
+
# ── GLB for Raw 3D tab (unchanged) ────────────────────────────────────
|
| 580 |
+
raw_glb_path = os.path.join(
|
| 581 |
target_dir,
|
| 582 |
+
"raw_"
|
| 583 |
+
+ f"glbscene_{frame_filter.replace('.','').replace(':','').replace(' ','')}"
|
| 584 |
+
+ f"_cam{show_cam}_mesh{show_mesh}.glb",
|
| 585 |
)
|
|
|
|
| 586 |
glbscene = predictions_to_glb(
|
| 587 |
predictions,
|
| 588 |
filter_by_frames=frame_filter,
|
|
|
|
| 592 |
as_mesh=show_mesh,
|
| 593 |
conf_percentile=conf_thres,
|
| 594 |
)
|
| 595 |
+
glbscene.export(file_obj=raw_glb_path)
|
| 596 |
+
|
| 597 |
+
# ── GLB for 3D View tab (same scene, separate file) ──────────────────
|
| 598 |
+
view_glb_path = os.path.join(
|
| 599 |
+
target_dir,
|
| 600 |
+
"view_"
|
| 601 |
+
+ f"glbscene_{frame_filter.replace('.','').replace(':','').replace(' ','')}"
|
| 602 |
+
+ f"_cam{show_cam}_mesh{show_mesh}.glb",
|
| 603 |
+
)
|
| 604 |
+
glbscene.export(file_obj=view_glb_path)
|
| 605 |
+
|
| 606 |
+
# ── process for Depth / Normal / Measure tabs ─────────────────────────
|
| 607 |
+
views = load_images(os.path.join(target_dir, "images"))
|
| 608 |
+
processed_data = process_predictions_for_visualization(
|
| 609 |
+
predictions, views, high_level_config, filter_black_bg, filter_white_bg
|
| 610 |
+
)
|
| 611 |
+
depth_vis, normal_vis, measure_img, _ = populate_visualization_tabs(
|
| 612 |
+
processed_data
|
| 613 |
+
)
|
| 614 |
+
depth_sel, normal_sel, measure_sel = update_view_selectors(processed_data)
|
| 615 |
|
| 616 |
del predictions
|
| 617 |
gc.collect()
|
| 618 |
torch.cuda.empty_cache()
|
| 619 |
|
| 620 |
+
elapsed = time.time() - start_time
|
| 621 |
+
print(f"Total time elapsed: {elapsed:.2f}s")
|
| 622 |
+
log_msg = f"✅ Reconstruction successful ({len(all_files)} frames, {elapsed:.1f}s)"
|
|
|
|
|
|
|
| 623 |
|
| 624 |
return (
|
| 625 |
+
raw_glb_path,
|
| 626 |
+
view_glb_path,
|
| 627 |
log_msg,
|
| 628 |
+
gr.Dropdown(
|
| 629 |
+
choices=frame_filter_choices,
|
| 630 |
+
value=frame_filter,
|
| 631 |
+
interactive=True,
|
| 632 |
+
),
|
| 633 |
processed_data,
|
| 634 |
depth_vis,
|
| 635 |
normal_vis,
|
| 636 |
measure_img,
|
| 637 |
+
"",
|
| 638 |
+
depth_sel,
|
| 639 |
+
normal_sel,
|
| 640 |
+
measure_sel,
|
| 641 |
)
|
| 642 |
|
| 643 |
+
|
| 644 |
+
# ============================================================================
|
| 645 |
+
# UI Helpers
|
| 646 |
+
# ============================================================================
|
| 647 |
+
|
| 648 |
+
|
| 649 |
+
def clear_fields():
|
| 650 |
+
"""Clear both 3D viewers."""
|
| 651 |
+
return None, None
|
| 652 |
+
|
| 653 |
+
|
| 654 |
+
def update_log():
|
| 655 |
+
return "⏳ Loading and reconstructing…"
|
| 656 |
+
|
| 657 |
|
| 658 |
def update_visualization(
|
| 659 |
+
target_dir,
|
| 660 |
+
frame_filter,
|
| 661 |
+
show_cam,
|
| 662 |
+
is_example,
|
| 663 |
+
conf_thres=None,
|
| 664 |
+
filter_black_bg=False,
|
| 665 |
+
filter_white_bg=False,
|
| 666 |
+
show_mesh=True,
|
| 667 |
):
|
| 668 |
+
"""Re-render the GLB from saved predictions (live parameter tweaks)."""
|
| 669 |
+
if is_example == "True":
|
| 670 |
+
return gr.update(), gr.update(), "No reconstruction available."
|
| 671 |
|
| 672 |
+
if not target_dir or target_dir == "None" or not os.path.isdir(target_dir):
|
| 673 |
+
return gr.update(), gr.update(), "No reconstruction available."
|
| 674 |
+
|
| 675 |
+
ppath = os.path.join(target_dir, "predictions.npz")
|
| 676 |
+
if not os.path.exists(ppath):
|
| 677 |
+
return (
|
| 678 |
+
gr.update(),
|
| 679 |
+
gr.update(),
|
| 680 |
+
"Run 'Start Reconstruction' first.",
|
| 681 |
+
)
|
| 682 |
|
| 683 |
+
loaded = np.load(ppath, allow_pickle=True)
|
| 684 |
+
predictions = {k: loaded[k] for k in loaded}
|
| 685 |
|
| 686 |
+
tag = (
|
| 687 |
+
f"{frame_filter.replace('.','').replace(':','').replace(' ','')}"
|
| 688 |
+
f"_cam{show_cam}_mesh{show_mesh}"
|
| 689 |
+
f"_black{filter_black_bg}_white{filter_white_bg}"
|
| 690 |
)
|
| 691 |
|
| 692 |
+
raw_glb = os.path.join(target_dir, f"raw_glbscene_{tag}.glb")
|
| 693 |
+
view_glb = os.path.join(target_dir, f"view_glbscene_{tag}.glb")
|
| 694 |
+
|
| 695 |
+
glbscene = predictions_to_glb(
|
| 696 |
+
predictions,
|
| 697 |
+
filter_by_frames=frame_filter,
|
| 698 |
+
show_cam=show_cam,
|
| 699 |
+
mask_black_bg=filter_black_bg,
|
| 700 |
+
mask_white_bg=filter_white_bg,
|
| 701 |
+
as_mesh=show_mesh,
|
| 702 |
+
conf_percentile=conf_thres,
|
| 703 |
+
)
|
| 704 |
+
glbscene.export(file_obj=raw_glb)
|
| 705 |
+
glbscene.export(file_obj=view_glb)
|
| 706 |
+
|
| 707 |
+
return raw_glb, view_glb, "Visualization updated."
|
| 708 |
|
|
|
|
| 709 |
|
| 710 |
def update_all_views_on_filter_change(
|
| 711 |
+
target_dir,
|
| 712 |
+
filter_black_bg,
|
| 713 |
+
filter_white_bg,
|
| 714 |
+
processed_data,
|
| 715 |
+
depth_sel,
|
| 716 |
+
normal_sel,
|
| 717 |
+
measure_sel,
|
| 718 |
):
|
| 719 |
+
"""Regenerate per-view data when background-filter checkboxes change."""
|
| 720 |
if not target_dir or target_dir == "None" or not os.path.isdir(target_dir):
|
| 721 |
return processed_data, None, None, None, []
|
| 722 |
|
| 723 |
+
ppath = os.path.join(target_dir, "predictions.npz")
|
| 724 |
+
if not os.path.exists(ppath):
|
| 725 |
return processed_data, None, None, None, []
|
| 726 |
|
| 727 |
try:
|
| 728 |
+
loaded = np.load(ppath, allow_pickle=True)
|
| 729 |
+
predictions = {k: loaded[k] for k in loaded}
|
| 730 |
+
views = load_images(os.path.join(target_dir, "images"))
|
|
|
|
| 731 |
|
| 732 |
+
new_pd = process_predictions_for_visualization(
|
| 733 |
predictions, views, high_level_config, filter_black_bg, filter_white_bg
|
| 734 |
)
|
| 735 |
|
| 736 |
+
di = _view_index_from_selector(depth_sel) if depth_sel else 0
|
| 737 |
+
ni = _view_index_from_selector(normal_sel) if normal_sel else 0
|
| 738 |
+
mi = _view_index_from_selector(measure_sel) if measure_sel else 0
|
|
|
|
|
|
|
|
|
|
| 739 |
|
| 740 |
+
return (
|
| 741 |
+
new_pd,
|
| 742 |
+
update_depth_view(new_pd, di),
|
| 743 |
+
update_normal_view(new_pd, ni),
|
| 744 |
+
update_measure_view(new_pd, mi)[0],
|
| 745 |
+
[],
|
| 746 |
+
)
|
| 747 |
except Exception as e:
|
| 748 |
+
print(f"Filter-change error: {e}")
|
| 749 |
return processed_data, None, None, None, []
|
| 750 |
|
| 751 |
+
|
| 752 |
# ============================================================================
|
| 753 |
+
# Example Scenes
|
| 754 |
# ============================================================================
|
| 755 |
+
|
| 756 |
+
|
| 757 |
def get_scene_info(examples_dir):
|
| 758 |
import glob
|
| 759 |
+
|
| 760 |
scenes = []
|
| 761 |
+
if not os.path.exists(examples_dir):
|
| 762 |
+
return scenes
|
| 763 |
+
|
| 764 |
+
for folder in sorted(os.listdir(examples_dir)):
|
| 765 |
+
spath = os.path.join(examples_dir, folder)
|
| 766 |
+
if not os.path.isdir(spath):
|
| 767 |
+
continue
|
| 768 |
+
imgs = []
|
| 769 |
+
for ext in [".jpg", ".jpeg", ".png", ".bmp", ".tiff", ".tif"]:
|
| 770 |
+
imgs.extend(glob.glob(os.path.join(spath, f"*{ext}")))
|
| 771 |
+
imgs.extend(glob.glob(os.path.join(spath, f"*{ext.upper()}")))
|
| 772 |
+
if imgs:
|
| 773 |
+
imgs = sorted(imgs)
|
| 774 |
+
scenes.append(
|
| 775 |
+
{
|
| 776 |
+
"name": folder,
|
| 777 |
+
"path": spath,
|
| 778 |
+
"thumbnail": imgs[0],
|
| 779 |
+
"num_images": len(imgs),
|
| 780 |
+
"image_files": imgs,
|
| 781 |
+
}
|
| 782 |
+
)
|
| 783 |
return scenes
|
| 784 |
|
| 785 |
+
|
| 786 |
def load_example_scene(scene_name, examples_dir="examples"):
|
| 787 |
scenes = get_scene_info(examples_dir)
|
| 788 |
+
sel = next((s for s in scenes if s["name"] == scene_name), None)
|
| 789 |
+
if sel is None:
|
| 790 |
+
return None, None, None, None, "Scene not found"
|
| 791 |
|
| 792 |
+
target_dir, image_paths = handle_uploads(sel["image_files"])
|
| 793 |
return (
|
| 794 |
+
None,
|
| 795 |
+
None,
|
| 796 |
+
target_dir,
|
| 797 |
+
image_paths,
|
| 798 |
+
f"Loaded '{scene_name}' ({sel['num_images']} images). "
|
| 799 |
+
"Click 'Start Reconstruction' to begin.",
|
| 800 |
)
|
| 801 |
|
| 802 |
+
|
| 803 |
# ============================================================================
|
| 804 |
+
# Gradio UI
|
| 805 |
# ============================================================================
|
| 806 |
+
|
| 807 |
theme = get_gradio_theme()
|
| 808 |
+
|
| 809 |
+
APP_CSS = (
|
| 810 |
+
GRADIO_CSS
|
| 811 |
+
+ """
|
| 812 |
.gradio-container { max-width: 100% !important; }
|
| 813 |
.gallery-container { max-height: 350px !important; overflow-y: auto !important; }
|
| 814 |
+
.file-preview { max-height: 200px !important; overflow-y: auto !important; }
|
| 815 |
.textbox-container { max-height: 100px !important; }
|
| 816 |
.tab-content { min-height: 550px !important; }
|
| 817 |
"""
|
| 818 |
+
)
|
| 819 |
|
| 820 |
with gr.Blocks(theme=theme, css=APP_CSS) as demo:
|
| 821 |
+
|
| 822 |
+
# ── hidden / state ────────────────────────────────────────────────────
|
| 823 |
+
is_example = gr.Textbox(visible=False, value="None")
|
| 824 |
+
target_dir_output = gr.Textbox(visible=False, value="None")
|
| 825 |
processed_data_state = gr.State(value=None)
|
| 826 |
measure_points_state = gr.State(value=[])
|
|
|
|
| 827 |
|
| 828 |
with gr.Row(equal_height=False):
|
| 829 |
+
|
| 830 |
+
# ── LEFT: upload ──────────────────────────────────────────────────
|
| 831 |
with gr.Column(scale=1, min_width=300):
|
| 832 |
gr.Markdown("### 📤 Input")
|
| 833 |
+
|
| 834 |
input_images = gr.File(
|
| 835 |
+
file_count="multiple",
|
| 836 |
+
label="Upload images (3-10 recommended)",
|
| 837 |
+
interactive=True,
|
| 838 |
+
height=200,
|
| 839 |
)
|
| 840 |
+
|
| 841 |
image_gallery = gr.Gallery(
|
| 842 |
+
label="Image Preview",
|
| 843 |
+
columns=3,
|
| 844 |
+
height=350,
|
| 845 |
+
object_fit="contain",
|
| 846 |
+
preview=True,
|
| 847 |
)
|
| 848 |
+
|
| 849 |
with gr.Row():
|
| 850 |
+
submit_btn = gr.Button(
|
| 851 |
+
"🚀 Start Reconstruction", variant="primary", scale=2
|
| 852 |
+
)
|
| 853 |
+
clear_btn = gr.ClearButton(
|
| 854 |
+
[input_images, target_dir_output, image_gallery],
|
| 855 |
+
value="🗑️ Clear",
|
| 856 |
+
scale=1,
|
| 857 |
+
)
|
| 858 |
+
|
| 859 |
+
# ── RIGHT: output tabs ────────────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 860 |
with gr.Column(scale=2, min_width=600):
|
| 861 |
gr.Markdown("### 🎯 Output")
|
| 862 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
| 863 |
with gr.Tabs():
|
| 864 |
+
|
| 865 |
+
# ---- Tab 1: Raw 3D (unchanged) ----
|
| 866 |
with gr.Tab("🏗️ Raw 3D"):
|
| 867 |
+
raw_3d_output = gr.Model3D(
|
| 868 |
+
height=550,
|
| 869 |
+
zoom_speed=0.5,
|
| 870 |
+
pan_speed=0.5,
|
| 871 |
+
clear_color=[0.0, 0.0, 0.0, 0.0],
|
| 872 |
+
)
|
| 873 |
+
|
| 874 |
+
# ---- Tab 2: 3D View (unchanged) ----
|
| 875 |
+
with gr.Tab("🔮 3D View"):
|
| 876 |
+
view_3d_output = gr.Model3D(
|
| 877 |
+
height=550,
|
| 878 |
+
zoom_speed=0.5,
|
| 879 |
+
pan_speed=0.5,
|
| 880 |
+
clear_color=[0.0, 0.0, 0.0, 0.0],
|
| 881 |
+
)
|
| 882 |
+
|
| 883 |
+
# ---- Tab 3: Depth ----
|
| 884 |
+
with gr.Tab("🌊 Depth"):
|
| 885 |
+
with gr.Row():
|
| 886 |
+
prev_depth_btn = gr.Button(
|
| 887 |
+
"◀ Previous", size="sm", scale=1
|
| 888 |
+
)
|
| 889 |
+
depth_view_selector = gr.Dropdown(
|
| 890 |
+
choices=["View 1"],
|
| 891 |
+
value="View 1",
|
| 892 |
+
label="Select View",
|
| 893 |
+
scale=2,
|
| 894 |
+
interactive=True,
|
| 895 |
+
allow_custom_value=True,
|
| 896 |
+
)
|
| 897 |
+
next_depth_btn = gr.Button(
|
| 898 |
+
"Next ▶", size="sm", scale=1
|
| 899 |
+
)
|
| 900 |
+
depth_map = gr.Image(
|
| 901 |
+
type="numpy",
|
| 902 |
+
label="Colorized Depth Map",
|
| 903 |
+
format="png",
|
| 904 |
+
interactive=False,
|
| 905 |
+
)
|
| 906 |
+
|
| 907 |
+
# ---- Tab 4: Normal ----
|
| 908 |
+
with gr.Tab("🧭 Normal"):
|
| 909 |
+
with gr.Row():
|
| 910 |
+
prev_normal_btn = gr.Button(
|
| 911 |
+
"◀ Previous", size="sm", scale=1
|
| 912 |
+
)
|
| 913 |
+
normal_view_selector = gr.Dropdown(
|
| 914 |
+
choices=["View 1"],
|
| 915 |
+
value="View 1",
|
| 916 |
+
label="Select View",
|
| 917 |
+
scale=2,
|
| 918 |
+
interactive=True,
|
| 919 |
+
allow_custom_value=True,
|
| 920 |
+
)
|
| 921 |
+
next_normal_btn = gr.Button(
|
| 922 |
+
"Next ▶", size="sm", scale=1
|
| 923 |
+
)
|
| 924 |
+
normal_map = gr.Image(
|
| 925 |
+
type="numpy",
|
| 926 |
+
label="Normal Map",
|
| 927 |
+
format="png",
|
| 928 |
+
interactive=False,
|
| 929 |
+
)
|
| 930 |
+
|
| 931 |
+
# ---- Tab 5: Measure ----
|
| 932 |
+
with gr.Tab("📏 Measure"):
|
| 933 |
+
gr.Markdown(MEASURE_INSTRUCTIONS)
|
| 934 |
+
with gr.Row():
|
| 935 |
+
prev_measure_btn = gr.Button(
|
| 936 |
+
"◀ Previous", size="sm", scale=1
|
| 937 |
+
)
|
| 938 |
+
measure_view_selector = gr.Dropdown(
|
| 939 |
+
choices=["View 1"],
|
| 940 |
+
value="View 1",
|
| 941 |
+
label="Select View",
|
| 942 |
+
scale=2,
|
| 943 |
+
interactive=True,
|
| 944 |
+
allow_custom_value=True,
|
| 945 |
+
)
|
| 946 |
+
next_measure_btn = gr.Button(
|
| 947 |
+
"Next ▶", size="sm", scale=1
|
| 948 |
+
)
|
| 949 |
+
measure_image = gr.Image(
|
| 950 |
+
type="numpy",
|
| 951 |
+
show_label=False,
|
| 952 |
+
format="webp",
|
| 953 |
+
interactive=False,
|
| 954 |
+
sources=[],
|
| 955 |
+
)
|
| 956 |
+
gr.Markdown(
|
| 957 |
+
"**Note:** Light-grey areas have no valid depth — "
|
| 958 |
+
"measurements cannot be taken there."
|
| 959 |
+
)
|
| 960 |
measure_text = gr.Markdown("")
|
| 961 |
|
| 962 |
+
log_output = gr.Textbox(
|
| 963 |
+
value="📌 Upload images, then click 'Start Reconstruction'.",
|
| 964 |
+
label="Status",
|
| 965 |
+
interactive=False,
|
| 966 |
+
lines=1,
|
| 967 |
+
max_lines=1,
|
| 968 |
+
)
|
| 969 |
+
|
| 970 |
+
# ── Advanced Options ──────────────────────────────────────────────────
|
| 971 |
+
with gr.Accordion("⚙️ Advanced Options", open=False):
|
| 972 |
+
with gr.Row(equal_height=False):
|
| 973 |
+
with gr.Column(scale=1, min_width=300):
|
| 974 |
+
gr.Markdown("#### Visualization Parameters")
|
| 975 |
+
frame_filter = gr.Dropdown(
|
| 976 |
+
choices=["All"], value="All", label="Display Frame"
|
| 977 |
+
)
|
| 978 |
+
conf_thres = gr.Slider(
|
| 979 |
+
minimum=0,
|
| 980 |
+
maximum=100,
|
| 981 |
+
value=0,
|
| 982 |
+
step=0.1,
|
| 983 |
+
label="Confidence Threshold (Percentile)",
|
| 984 |
+
)
|
| 985 |
+
show_cam = gr.Checkbox(label="Show Camera", value=True)
|
| 986 |
+
show_mesh = gr.Checkbox(label="Show Mesh", value=True)
|
| 987 |
+
filter_black_bg = gr.Checkbox(
|
| 988 |
+
label="Filter Black Background", value=False
|
| 989 |
+
)
|
| 990 |
+
filter_white_bg = gr.Checkbox(
|
| 991 |
+
label="Filter White Background", value=False
|
| 992 |
+
)
|
| 993 |
+
|
| 994 |
+
with gr.Column(scale=1, min_width=300):
|
| 995 |
+
gr.Markdown("#### Reconstruction Parameters")
|
| 996 |
+
apply_mask_checkbox = gr.Checkbox(
|
| 997 |
+
label="Apply Depth Mask", value=True
|
| 998 |
+
)
|
| 999 |
+
|
| 1000 |
+
# ── Example Scenes ────────────────────────────────────────────────────
|
| 1001 |
+
with gr.Accordion("🖼️ Example Scenes", open=False):
|
| 1002 |
+
scenes = get_scene_info("examples")
|
| 1003 |
+
if scenes:
|
| 1004 |
+
for i in range(0, len(scenes), 4):
|
| 1005 |
+
with gr.Row(equal_height=True):
|
| 1006 |
+
for j in range(4):
|
| 1007 |
+
si = i + j
|
| 1008 |
+
if si < len(scenes):
|
| 1009 |
+
sc = scenes[si]
|
| 1010 |
+
with gr.Column(scale=1, min_width=150):
|
| 1011 |
+
sc_img = gr.Image(
|
| 1012 |
+
value=sc["thumbnail"],
|
| 1013 |
+
height=150,
|
| 1014 |
+
interactive=False,
|
| 1015 |
+
show_label=False,
|
| 1016 |
+
sources=[],
|
| 1017 |
+
container=False,
|
| 1018 |
+
)
|
| 1019 |
+
gr.Markdown(
|
| 1020 |
+
f"{sc['name']} ({sc['num_images']} imgs)",
|
| 1021 |
+
elem_classes=["text-center"],
|
| 1022 |
+
)
|
| 1023 |
+
sc_img.select(
|
| 1024 |
+
fn=lambda n=sc["name"]: load_example_scene(n),
|
| 1025 |
+
outputs=[
|
| 1026 |
+
raw_3d_output,
|
| 1027 |
+
view_3d_output,
|
| 1028 |
+
target_dir_output,
|
| 1029 |
+
image_gallery,
|
| 1030 |
+
log_output,
|
| 1031 |
+
],
|
| 1032 |
+
)
|
| 1033 |
+
|
| 1034 |
+
# ======================================================================
|
| 1035 |
+
# EVENT BINDING
|
| 1036 |
+
# ======================================================================
|
| 1037 |
+
|
| 1038 |
+
# ── upload → gallery ──────────────────────────────────────────────────
|
| 1039 |
input_images.change(
|
| 1040 |
+
fn=update_gallery_on_upload,
|
| 1041 |
+
inputs=[input_images],
|
| 1042 |
+
outputs=[
|
| 1043 |
+
raw_3d_output,
|
| 1044 |
+
view_3d_output,
|
| 1045 |
+
target_dir_output,
|
| 1046 |
+
image_gallery,
|
| 1047 |
+
log_output,
|
| 1048 |
+
],
|
| 1049 |
)
|
| 1050 |
|
| 1051 |
+
# ── reconstruct ───────────────────────────────────────────────────────
|
| 1052 |
submit_btn.click(
|
| 1053 |
+
fn=clear_fields,
|
| 1054 |
+
outputs=[raw_3d_output, view_3d_output],
|
| 1055 |
).then(
|
| 1056 |
+
fn=update_log,
|
| 1057 |
+
outputs=[log_output],
|
| 1058 |
).then(
|
| 1059 |
fn=gradio_demo,
|
| 1060 |
inputs=[
|
| 1061 |
+
target_dir_output,
|
| 1062 |
+
frame_filter,
|
| 1063 |
+
show_cam,
|
| 1064 |
+
filter_black_bg,
|
| 1065 |
+
filter_white_bg,
|
| 1066 |
+
conf_thres,
|
| 1067 |
+
apply_mask_checkbox,
|
| 1068 |
+
show_mesh,
|
| 1069 |
],
|
| 1070 |
outputs=[
|
| 1071 |
+
raw_3d_output,
|
| 1072 |
+
view_3d_output,
|
| 1073 |
+
log_output,
|
| 1074 |
+
frame_filter,
|
| 1075 |
+
processed_data_state,
|
| 1076 |
+
depth_map,
|
| 1077 |
+
normal_map,
|
| 1078 |
+
measure_image,
|
| 1079 |
+
measure_text,
|
| 1080 |
+
depth_view_selector,
|
| 1081 |
+
normal_view_selector,
|
| 1082 |
+
measure_view_selector,
|
| 1083 |
+
],
|
| 1084 |
).then(
|
| 1085 |
+
fn=lambda: "False",
|
| 1086 |
+
outputs=[is_example],
|
| 1087 |
)
|
| 1088 |
+
|
| 1089 |
+
# ── clear ─────────────────────────────────────────────────────────────
|
| 1090 |
clear_btn.add([raw_3d_output, view_3d_output, log_output])
|
| 1091 |
|
| 1092 |
+
# ── live viz-parameter updates (3D viewers only) ──────────────────────
|
| 1093 |
+
for comp in [frame_filter, show_cam, conf_thres, show_mesh]:
|
| 1094 |
+
comp.change(
|
| 1095 |
fn=update_visualization,
|
| 1096 |
+
inputs=[
|
| 1097 |
+
target_dir_output,
|
| 1098 |
+
frame_filter,
|
| 1099 |
+
show_cam,
|
| 1100 |
+
is_example,
|
| 1101 |
+
conf_thres,
|
| 1102 |
+
filter_black_bg,
|
| 1103 |
+
filter_white_bg,
|
| 1104 |
+
show_mesh,
|
| 1105 |
+
],
|
| 1106 |
+
outputs=[raw_3d_output, view_3d_output, log_output],
|
| 1107 |
)
|
| 1108 |
|
| 1109 |
+
# background-filter changes → update BOTH 3D views AND per-view tabs
|
| 1110 |
+
for bg_comp in [filter_black_bg, filter_white_bg]:
|
| 1111 |
+
bg_comp.change(
|
| 1112 |
fn=update_visualization,
|
| 1113 |
+
inputs=[
|
| 1114 |
+
target_dir_output,
|
| 1115 |
+
frame_filter,
|
| 1116 |
+
show_cam,
|
| 1117 |
+
is_example,
|
| 1118 |
+
conf_thres,
|
| 1119 |
+
filter_black_bg,
|
| 1120 |
+
filter_white_bg,
|
| 1121 |
+
show_mesh,
|
| 1122 |
+
],
|
| 1123 |
+
outputs=[raw_3d_output, view_3d_output, log_output],
|
| 1124 |
).then(
|
| 1125 |
fn=update_all_views_on_filter_change,
|
| 1126 |
+
inputs=[
|
| 1127 |
+
target_dir_output,
|
| 1128 |
+
filter_black_bg,
|
| 1129 |
+
filter_white_bg,
|
| 1130 |
+
processed_data_state,
|
| 1131 |
+
depth_view_selector,
|
| 1132 |
+
normal_view_selector,
|
| 1133 |
+
measure_view_selector,
|
| 1134 |
+
],
|
| 1135 |
+
outputs=[
|
| 1136 |
+
processed_data_state,
|
| 1137 |
+
depth_map,
|
| 1138 |
+
normal_map,
|
| 1139 |
+
measure_image,
|
| 1140 |
+
measure_points_state,
|
| 1141 |
+
],
|
| 1142 |
)
|
| 1143 |
|
| 1144 |
+
# ── Depth navigation ─────────────────────────────────────────────────
|
| 1145 |
+
prev_depth_btn.click(
|
| 1146 |
+
fn=lambda pd, cs: navigate_depth_view(pd, cs, -1),
|
| 1147 |
+
inputs=[processed_data_state, depth_view_selector],
|
| 1148 |
+
outputs=[depth_view_selector, depth_map],
|
| 1149 |
+
)
|
| 1150 |
+
next_depth_btn.click(
|
| 1151 |
+
fn=lambda pd, cs: navigate_depth_view(pd, cs, 1),
|
| 1152 |
+
inputs=[processed_data_state, depth_view_selector],
|
| 1153 |
+
outputs=[depth_view_selector, depth_map],
|
| 1154 |
+
)
|
| 1155 |
+
depth_view_selector.change(
|
| 1156 |
+
fn=lambda pd, sv: (
|
| 1157 |
+
update_depth_view(pd, _view_index_from_selector(sv))
|
| 1158 |
+
if sv
|
| 1159 |
+
else None
|
| 1160 |
+
),
|
| 1161 |
+
inputs=[processed_data_state, depth_view_selector],
|
| 1162 |
+
outputs=[depth_map],
|
| 1163 |
)
|
| 1164 |
|
| 1165 |
+
# ── Normal navigation ─────────────────────────────────────────────────
|
| 1166 |
+
prev_normal_btn.click(
|
| 1167 |
+
fn=lambda pd, cs: navigate_normal_view(pd, cs, -1),
|
| 1168 |
+
inputs=[processed_data_state, normal_view_selector],
|
| 1169 |
+
outputs=[normal_view_selector, normal_map],
|
| 1170 |
+
)
|
| 1171 |
+
next_normal_btn.click(
|
| 1172 |
+
fn=lambda pd, cs: navigate_normal_view(pd, cs, 1),
|
| 1173 |
+
inputs=[processed_data_state, normal_view_selector],
|
| 1174 |
+
outputs=[normal_view_selector, normal_map],
|
| 1175 |
+
)
|
| 1176 |
+
normal_view_selector.change(
|
| 1177 |
+
fn=lambda pd, sv: (
|
| 1178 |
+
update_normal_view(pd, _view_index_from_selector(sv))
|
| 1179 |
+
if sv
|
| 1180 |
+
else None
|
| 1181 |
+
),
|
| 1182 |
+
inputs=[processed_data_state, normal_view_selector],
|
| 1183 |
+
outputs=[normal_map],
|
| 1184 |
+
)
|
| 1185 |
|
| 1186 |
+
# ── Measure navigation + click ────────────────────────────────────────
|
| 1187 |
+
prev_measure_btn.click(
|
| 1188 |
+
fn=lambda pd, cs: navigate_measure_view(pd, cs, -1),
|
| 1189 |
+
inputs=[processed_data_state, measure_view_selector],
|
| 1190 |
+
outputs=[measure_view_selector, measure_image, measure_points_state],
|
| 1191 |
+
)
|
| 1192 |
+
next_measure_btn.click(
|
| 1193 |
+
fn=lambda pd, cs: navigate_measure_view(pd, cs, 1),
|
| 1194 |
+
inputs=[processed_data_state, measure_view_selector],
|
| 1195 |
+
outputs=[measure_view_selector, measure_image, measure_points_state],
|
| 1196 |
+
)
|
| 1197 |
+
measure_view_selector.change(
|
| 1198 |
+
fn=lambda pd, sv: (
|
| 1199 |
+
update_measure_view(pd, _view_index_from_selector(sv))
|
| 1200 |
+
if sv
|
| 1201 |
+
else (None, [])
|
| 1202 |
+
),
|
| 1203 |
+
inputs=[processed_data_state, measure_view_selector],
|
| 1204 |
+
outputs=[measure_image, measure_points_state],
|
| 1205 |
+
)
|
| 1206 |
+
measure_image.select(
|
| 1207 |
+
fn=measure,
|
| 1208 |
+
inputs=[
|
| 1209 |
+
processed_data_state,
|
| 1210 |
+
measure_points_state,
|
| 1211 |
+
measure_view_selector,
|
| 1212 |
+
],
|
| 1213 |
+
outputs=[measure_image, measure_points_state, measure_text],
|
| 1214 |
+
)
|
| 1215 |
|
|
|
|
|
|
|
|
|
|
| 1216 |
|
| 1217 |
+
# ============================================================================
|
| 1218 |
+
# Launch
|
| 1219 |
+
# ============================================================================
|
| 1220 |
|
| 1221 |
+
demo.queue(max_size=20).launch(
|
| 1222 |
+
theme=theme, css=APP_CSS, show_error=True, share=True, ssr_mode=False
|
| 1223 |
+
)
|