jskvrna commited on
Commit
a70b55e
·
1 Parent(s): a341c73

Refactors COLMAP point projection for vertex detection

Browse files

Simplifies vertex detection by introducing dedicated functions for projecting COLMAP points into 2D image coordinates. This change enhances code readability and maintainability. The original combined function is separated into smaller, modular functions, each responsible for a specific task: projecting visible points, projecting to 2D, and getting apex/eave points.

These helper functions enhance modularity and readability.

Files changed (2) hide show
  1. predict.py +87 -167
  2. train.py +1 -1
predict.py CHANGED
@@ -413,10 +413,10 @@ def predict_wireframe(entry) -> Tuple[np.ndarray, List[int]]:
413
  gest_seg = gest.resize(depth_size)
414
  gest_seg_np = np.array(gest_seg).astype(np.uint8)
415
 
416
- vertices_ours, connections_ours, vertices_3d_ours = our_get_vertices_and_edges(gest_seg_np, colmap_rec, img_id, ade_seg, depth, K=K, R=R, t=t, frame=good_entry)
417
- #vertices, connections, vertices_3d = vertices_ours, connections_ours, vertices_3d_ours
418
  # Get 2D vertices and edges first
419
- vertices, connections = get_vertices_and_edges_from_segmentation(gest_seg_np, edge_th=25.)
420
 
421
  #gt_verts = []
422
  #gt_verts, gt_connects, gt_verts3d = get_gt_vertices_and_edges(good_entry, i, depth, colmap_rec, K, R, t, img_id, ade_seg)
@@ -444,7 +444,7 @@ def predict_wireframe(entry) -> Tuple[np.ndarray, List[int]]:
444
  continue
445
 
446
  # Call the refactored function to get 3D points
447
- vertices_3d = create_3d_wireframe_single_image(vertices, connections, depth, colmap_rec, img_id, ade_seg, K, R, t)
448
  #vertices_3d = gt_verts3d
449
  # Store original 2D vertices, connections, and computed 3D points
450
 
@@ -491,28 +491,7 @@ def predict_wireframe(entry) -> Tuple[np.ndarray, List[int]]:
491
 
492
  return all_3d_vertices_clean, connections_3d_clean
493
 
494
-
495
- def our_get_vertices_and_edges(gest_seg_np, colmap_rec, img_id_substring, ade_seg, depth, K=None, R=None, t=None, ):
496
- """
497
- Identify apex and eave-end vertices, then detect lines for eave/ridge/rake/valley.
498
- Also find all COLMAP points that project into apex or eave_end masks.
499
- """
500
- #--------------------------------------------------------------------------------
501
- # Step A: Collect apex and eave_end vertices
502
- #--------------------------------------------------------------------------------
503
- if not isinstance(gest_seg_np, np.ndarray):
504
- gest_seg_np = np.array(gest_seg_np)
505
-
506
- # Apex
507
- apex_color = np.array(gestalt_color_mapping['apex'])
508
- apex_mask = cv2.inRange(gest_seg_np, apex_color-10., apex_color+10.)
509
-
510
- # Eave end
511
- eave_end_color = np.array(gestalt_color_mapping['eave_end_point'])
512
- eave_end_mask = cv2.inRange(gest_seg_np, eave_end_color-10, eave_end_color+10)
513
-
514
- H, W = gest_seg_np.shape[:2]
515
-
516
  # 1) Find the matching COLMAP image to get its associated 3D points
517
  # This part remains to identify which 3D points are relevant for this image view
518
  found_img = None
@@ -548,6 +527,9 @@ def our_get_vertices_and_edges(gest_seg_np, colmap_rec, img_id_substring, ade_se
548
  points_cam_h = (world_to_cam_mat @ points_xyz_world_h.T).T # (N, 4)
549
  points_cam = points_cam_h[:, :3] / points_cam_h[:, 3, np.newaxis] # (N, 3) in camera coordinates
550
 
 
 
 
551
  uv = []
552
  valid_indices = [] # Track which original points are valid
553
 
@@ -569,7 +551,12 @@ def our_get_vertices_and_edges(gest_seg_np, colmap_rec, img_id_substring, ade_se
569
  if 0 <= u_i_int < W and 0 <= v_i_int < H:
570
  uv.append((u_i_int, v_i_int))
571
  valid_indices.append(i) # Store original index
 
 
 
 
572
 
 
573
  uv_colmap = []
574
  valid_indices_colmap = []
575
  for i, xyz in enumerate(points_xyz_world):
@@ -583,31 +570,27 @@ def our_get_vertices_and_edges(gest_seg_np, colmap_rec, img_id_substring, ade_se
583
  uv_colmap.append((u_i, v_i))
584
  valid_indices_colmap.append(i) # Store original index
585
 
586
- if not uv:
587
- print(f"No points projected into image bounds for {img_id_substring} using K,R,t.")
588
- return [], [], []
589
 
590
- house_mask = get_house_mask(ade_seg)
591
-
592
- uv = np.array(uv, dtype=int)
593
- valid_indices = np.array(valid_indices)
 
 
 
594
 
595
- # Filter points that fall within the apex or eave_end masks
596
  filtered_points_xyz = []
597
  filtered_point_idxs = []
598
  filtered_points_color = []
599
  filtered_vertices_apex = []
600
  filtered_vertices_apex_uv = []
601
- filtered_vertices_eave_end = []
602
- filtered_vertices_eave_end_uv = []
603
 
604
- # Apex
605
- apex_color = np.array(gestalt_color_mapping['apex'])
606
- apex_mask = cv2.inRange(gest_seg_np, apex_color-10., apex_color+10.)
607
  if apex_mask.sum() > 0:
608
  output = cv2.connectedComponentsWithStats(apex_mask, 8, cv2.CV_32S)
609
  (numLabels, labels, stats, centroids) = output
610
- stats, centroids = stats[1:], centroids[1:]
611
  for i in range(1, numLabels):
612
  cur_mask = labels == i
613
  # Dilate the current mask to make it slightly larger
@@ -623,72 +606,7 @@ def our_get_vertices_and_edges(gest_seg_np, colmap_rec, img_id_substring, ade_se
623
  valid_points_mask = cur_mask[uv[:, 1], uv[:, 0]] & house_mask[uv[:, 1], uv[:, 0]]
624
  else:
625
  break
626
- #
627
- if np.any(valid_points_mask):
628
- # Get indices of valid points
629
- valid_point_indices = valid_indices[valid_points_mask]
630
-
631
- # Get 3D points in camera coordinates for depth filtering
632
- valid_world_points = points_xyz_world[valid_point_indices]
633
- valid_cam_points = points_cam[valid_point_indices]
634
-
635
- # Compute depths (Z coordinates in camera space)
636
- depths = valid_cam_points[:, 2]
637
-
638
- # Find minimum depth and filter points within min_depth + 2 meters
639
- if len(depths) > 0:
640
- min_depth = np.min(depths)
641
- depth_filter = depths <= (min_depth + 2.0)
642
-
643
- # Apply depth filter
644
- final_valid_indices = valid_point_indices[depth_filter]
645
-
646
- # Add corresponding points to filtered lists
647
- filtered_points_xyz.extend(points_xyz_world[final_valid_indices])
648
- filtered_point_idxs.extend(points_idxs[final_valid_indices])
649
- filtered_points_color.extend([color] * np.sum(depth_filter))
650
 
651
- # Find the point with lowest depth in the filtered points
652
- if len(final_valid_indices) > 0:
653
- lowest_depth_idx = np.argmin(depths[depth_filter])
654
- lowest_depth_point = final_valid_indices[lowest_depth_idx]
655
- filtered_vertices_apex.append(points_xyz_world[lowest_depth_point])
656
- filtered_points_xyz.append(points_xyz_world[lowest_depth_point])
657
- filtered_point_idxs.append(points_idxs[lowest_depth_point])
658
- filtered_points_color.append(np.array([1., 1., 0.]))
659
-
660
- # Project the lowest depth point back to image coordinates for visualization
661
- lowest_cam_point = points_cam[lowest_depth_point]
662
-
663
- # Project to image plane using K
664
- u_proj = (K[0, 0] * lowest_cam_point[0] / lowest_cam_point[2]) + K[0, 2]
665
- v_proj = (K[1, 1] * lowest_cam_point[1] / lowest_cam_point[2]) + K[1, 2]
666
-
667
- u_proj_int = int(round(u_proj))
668
- v_proj_int = int(round(v_proj))
669
-
670
- filtered_vertices_apex_uv.append((u_proj_int, v_proj_int))
671
-
672
- # Eave end
673
- eave_end_color = np.array(gestalt_color_mapping['eave_end_point'])
674
- eave_end_mask = cv2.inRange(gest_seg_np, eave_end_color-10, eave_end_color+10)
675
- if eave_end_mask.sum() > 0:
676
- output = cv2.connectedComponentsWithStats(eave_end_mask, 8, cv2.CV_32S)
677
- (numLabels, labels, stats, centroids) = output
678
- stats, centroids = stats[1:], centroids[1:]
679
- for i in range(1, numLabels):
680
- cur_mask = labels == i
681
- kernel = np.ones((5,5), np.uint8)
682
- cur_mask = cv2.dilate(cur_mask.astype(np.uint8), kernel, iterations=2).astype(bool)
683
- color = np.random.rand(3)
684
- valid_points_mask = cur_mask[uv[:, 1], uv[:, 0]] & house_mask[uv[:, 1], uv[:, 0]]
685
-
686
- for z in range(5):
687
- if np.sum(valid_points_mask) < 5:
688
- cur_mask = cv2.dilate(cur_mask.astype(np.uint8), kernel, iterations=1).astype(bool)
689
- valid_points_mask = cur_mask[uv[:, 1], uv[:, 0]] & house_mask[uv[:, 1], uv[:, 0]]
690
- else:
691
- break
692
  if np.any(valid_points_mask):
693
  # Get indices of valid points
694
  valid_point_indices = valid_indices[valid_points_mask]
@@ -717,38 +635,35 @@ def our_get_vertices_and_edges(gest_seg_np, colmap_rec, img_id_substring, ade_se
717
  if len(final_valid_indices) > 0:
718
  lowest_depth_idx = np.argmin(depths[depth_filter])
719
  lowest_depth_point = final_valid_indices[lowest_depth_idx]
720
- filtered_vertices_eave_end.append(points_xyz_world[lowest_depth_point])
 
721
  filtered_points_xyz.append(points_xyz_world[lowest_depth_point])
722
  filtered_point_idxs.append(points_idxs[lowest_depth_point])
723
  filtered_points_color.append(np.array([1., 1., 0.]))
 
724
 
725
- # Project the lowest depth point back to image coordinates for visualization
726
- lowest_cam_point = points_cam[lowest_depth_point]
727
-
728
- # Project to image plane using K
729
- u_proj = (K[0, 0] * lowest_cam_point[0] / lowest_cam_point[2]) + K[0, 2]
730
- v_proj = (K[1, 1] * lowest_cam_point[1] / lowest_cam_point[2]) + K[1, 2]
731
 
732
- u_proj_int = int(round(u_proj))
733
- v_proj_int = int(round(v_proj))
 
734
 
735
- filtered_vertices_eave_end_uv.append((u_proj_int, v_proj_int))
 
 
 
736
 
737
- '''
738
- for i, (u, v) in enumerate(uv):
739
- # Check if this projected point falls within the combined maskvalid_indices
740
- if combined_mask[v, u] > 0 and house_mask[v, u] > 0:
741
- original_idx = valid_indices[i] # Get original index
742
- filtered_points_xyz.append(points_xyz_world[original_idx])
743
- filtered_point_idxs.append(points_idxs[original_idx])
744
- '''
745
  filtered_points_xyz = np.array(filtered_points_xyz[::-1]) if filtered_points_xyz else np.empty((0, 3))
746
  filtered_point_idxs = np.array(filtered_point_idxs[::-1]) if filtered_point_idxs else np.empty((0,))
747
  filtered_points_color = np.array(filtered_points_color[::-1]) if filtered_points_color else np.empty((0, 3))
748
  filtered_vertices_apex = np.array(filtered_vertices_apex) if filtered_vertices_apex else np.empty((0, 3))
749
- filtered_vertices_eave_end = np.array(filtered_vertices_eave_end) if filtered_vertices_eave_end else np.empty((0, 3))
 
 
750
 
 
751
 
 
752
  connections = []
753
  edge_classes = ['eave', 'ridge', 'rake', 'valley']
754
  edge_th = 25.0 # threshold for proximity to line segments
@@ -765,7 +680,7 @@ def our_get_vertices_and_edges(gest_seg_np, colmap_rec, img_id_substring, ade_se
765
  vertex_types.append('apex')
766
 
767
  # Add eave_end vertices
768
- for i, (vertex_3d, vertex_uv) in enumerate(zip(filtered_vertices_eave_end, filtered_vertices_eave_end_uv)):
769
  all_vertices_3d.append(vertex_3d)
770
  all_vertices_uv.append(vertex_uv)
771
  vertex_types.append('eave_end')
@@ -840,42 +755,17 @@ def our_get_vertices_and_edges(gest_seg_np, colmap_rec, img_id_substring, ade_se
840
  if conn not in connections:
841
  connections.append(conn)
842
 
843
- '''
844
- depth_fitted, depth_sparse, _, col_img = get_fitted_dense_depth(depth, colmap_rec, img_id_substring, ade_seg, K, R, t)
845
-
846
- # Segment the depth_fitted to get points in apex/eave_end regions
847
- segmented_points_3d = []
848
-
849
- # Get coordinates where the combined mask is active
850
- mask_coords = np.where(combined_mask > 0)
851
- v_coords, u_coords = mask_coords
852
-
853
- # Also apply house mask for additional filtering
854
- house_coords = np.where(house_mask > 0)
855
- house_v, house_u = house_coords
856
-
857
- # Find intersection of combined_mask and house_mask
858
- valid_mask = np.logical_and(combined_mask > 0, house_mask > 0)
859
- valid_coords = np.where(valid_mask)
860
- v_valid, u_valid = valid_coords
861
 
862
- if len(v_valid) > 0:
863
- # Get depth values at these coordinates
864
- depth_values = depth_fitted[v_valid, u_valid]
865
-
866
- # Filter out zero or invalid depth values
867
- valid_depth_mask = depth_values > 0
868
- if np.any(valid_depth_mask):
869
- u_final = u_valid[valid_depth_mask]
870
- v_final = v_valid[valid_depth_mask]
871
- depth_final = depth_values[valid_depth_mask]
872
-
873
- # Create UV coordinates for backprojection
874
- uv_depth = np.column_stack((u_final, v_final))
875
-
876
- # Backproject to 3D world coordinates
877
- segmented_points_3d = project_vertices_to_3d(uv_depth, depth_final, col_img, K, R, t)
878
- '''
879
  segmented_points_3d = []
880
 
881
  # Visualize with the segmented depth points in blue
@@ -912,12 +802,42 @@ def our_get_vertices_and_edges(gest_seg_np, colmap_rec, img_id_substring, ade_se
912
  geometries.append(pcd_depth)
913
 
914
  #o3d.visualization.draw_geometries(geometries, window_name=f"Combined Point Cloud - {img_id_substring}")
915
- # Convert all_vertices_uv and vertex_types to the required format
916
- vertices_formatted = []
917
- for uv, vertex_type in zip(all_vertices_uv, vertex_types):
918
- vertices_formatted.append({
919
- 'xy': np.array(uv, dtype=float),
920
- 'type': vertex_type
921
- })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
922
 
923
  return vertices_formatted, connections, all_vertices_3d
 
413
  gest_seg = gest.resize(depth_size)
414
  gest_seg_np = np.array(gest_seg).astype(np.uint8)
415
 
416
+ vertices_ours, connections_ours, vertices_3d_ours = our_get_vertices_and_edges(gest_seg_np, colmap_rec, img_id, ade_seg, depth, K=K, R=R, t=t)
417
+ vertices, connections, vertices_3d = vertices_ours, connections_ours, vertices_3d_ours
418
  # Get 2D vertices and edges first
419
+ #vertices, connections = get_vertices_and_edges_from_segmentation(gest_seg_np, edge_th=25.)
420
 
421
  #gt_verts = []
422
  #gt_verts, gt_connects, gt_verts3d = get_gt_vertices_and_edges(good_entry, i, depth, colmap_rec, K, R, t, img_id, ade_seg)
 
444
  continue
445
 
446
  # Call the refactored function to get 3D points
447
+ #vertices_3d = create_3d_wireframe_single_image(vertices, connections, depth, colmap_rec, img_id, ade_seg, K, R, t)
448
  #vertices_3d = gt_verts3d
449
  # Store original 2D vertices, connections, and computed 3D points
450
 
 
491
 
492
  return all_3d_vertices_clean, connections_3d_clean
493
 
494
+ def get_visible_points(colmap_rec, img_id_substring, R=None, t=None):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
495
  # 1) Find the matching COLMAP image to get its associated 3D points
496
  # This part remains to identify which 3D points are relevant for this image view
497
  found_img = None
 
527
  points_cam_h = (world_to_cam_mat @ points_xyz_world_h.T).T # (N, 4)
528
  points_cam = points_cam_h[:, :3] / points_cam_h[:, 3, np.newaxis] # (N, 3) in camera coordinates
529
 
530
+ return points_cam, points_xyz_world, points_idxs
531
+
532
+ def project_points_to_2d(points_cam, K, H, W):
533
  uv = []
534
  valid_indices = [] # Track which original points are valid
535
 
 
551
  if 0 <= u_i_int < W and 0 <= v_i_int < H:
552
  uv.append((u_i_int, v_i_int))
553
  valid_indices.append(i) # Store original index
554
+
555
+ uv = np.array(uv, dtype=int) # shape (M,2)
556
+ valid_indices = np.array(valid_indices) # shape (M,)
557
+ return uv, valid_indices
558
 
559
+ def project_points_to_2d_colmap(points_xyz_world, found_img, H, W):
560
  uv_colmap = []
561
  valid_indices_colmap = []
562
  for i, xyz in enumerate(points_xyz_world):
 
570
  uv_colmap.append((u_i, v_i))
571
  valid_indices_colmap.append(i) # Store original index
572
 
573
+ uv_colmap = np.array(uv_colmap, dtype=int)
574
+ valid_indices_colmap = np.array(valid_indices_colmap)
575
+ return uv_colmap, valid_indices_colmap
576
 
577
+ def get_apex_or_eave_points(apex, uv, gest_seg_np, house_mask, valid_indices, points_xyz_world, points_cam, points_idxs):
578
+ # Apex
579
+ if apex:
580
+ apex_color = np.array(gestalt_color_mapping['apex'])
581
+ else:
582
+ apex_color = np.array(gestalt_color_mapping['eave_end_point'])
583
+ apex_mask = cv2.inRange(gest_seg_np, apex_color-10., apex_color+10.)
584
 
 
585
  filtered_points_xyz = []
586
  filtered_point_idxs = []
587
  filtered_points_color = []
588
  filtered_vertices_apex = []
589
  filtered_vertices_apex_uv = []
 
 
590
 
 
 
 
591
  if apex_mask.sum() > 0:
592
  output = cv2.connectedComponentsWithStats(apex_mask, 8, cv2.CV_32S)
593
  (numLabels, labels, stats, centroids) = output
 
594
  for i in range(1, numLabels):
595
  cur_mask = labels == i
596
  # Dilate the current mask to make it slightly larger
 
606
  valid_points_mask = cur_mask[uv[:, 1], uv[:, 0]] & house_mask[uv[:, 1], uv[:, 0]]
607
  else:
608
  break
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
609
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
610
  if np.any(valid_points_mask):
611
  # Get indices of valid points
612
  valid_point_indices = valid_indices[valid_points_mask]
 
635
  if len(final_valid_indices) > 0:
636
  lowest_depth_idx = np.argmin(depths[depth_filter])
637
  lowest_depth_point = final_valid_indices[lowest_depth_idx]
638
+
639
+ filtered_vertices_apex.append(points_xyz_world[lowest_depth_point])
640
  filtered_points_xyz.append(points_xyz_world[lowest_depth_point])
641
  filtered_point_idxs.append(points_idxs[lowest_depth_point])
642
  filtered_points_color.append(np.array([1., 1., 0.]))
643
+ filtered_vertices_apex_uv.append(centroids[i])
644
 
645
+ return filtered_points_xyz, filtered_point_idxs, filtered_points_color, filtered_vertices_apex, filtered_vertices_apex_uv
 
 
 
 
 
646
 
647
+ def get_vertexes(uv, gest_seg_np, house_mask, valid_indices, points_xyz_world, points_cam, points_idxs):
648
+ filtered_points_xyz_apex, filtered_point_idxs_apex, filtered_points_color_apex, filtered_vertices_apex, filtered_vertices_apex_uv = get_apex_or_eave_points(True, uv, gest_seg_np, house_mask, valid_indices, points_xyz_world, points_cam, points_idxs)
649
+ filtered_points_xyz_eave, filtered_point_idxs_eave, filtered_points_color_eave, filtered_vertices_eave, filtered_vertices_eave_uv = get_apex_or_eave_points(False, uv, gest_seg_np, house_mask, valid_indices, points_xyz_world, points_cam, points_idxs)
650
 
651
+ # Combine filtered points from both apex and eave_end
652
+ filtered_points_xyz = filtered_points_xyz_apex + filtered_points_xyz_eave
653
+ filtered_point_idxs = filtered_point_idxs_apex + filtered_point_idxs_eave
654
+ filtered_points_color = filtered_points_color_apex + filtered_points_color_eave
655
 
 
 
 
 
 
 
 
 
656
  filtered_points_xyz = np.array(filtered_points_xyz[::-1]) if filtered_points_xyz else np.empty((0, 3))
657
  filtered_point_idxs = np.array(filtered_point_idxs[::-1]) if filtered_point_idxs else np.empty((0,))
658
  filtered_points_color = np.array(filtered_points_color[::-1]) if filtered_points_color else np.empty((0, 3))
659
  filtered_vertices_apex = np.array(filtered_vertices_apex) if filtered_vertices_apex else np.empty((0, 3))
660
+ filtered_vertices_apex_uv = np.array(filtered_vertices_apex_uv) if filtered_vertices_apex_uv else np.empty((0, 2))
661
+ filtered_vertices_eave = np.array(filtered_vertices_eave) if filtered_vertices_eave else np.empty((0, 3))
662
+ filtered_vertices_eave_uv = np.array(filtered_vertices_eave_uv) if filtered_vertices_eave_uv else np.empty((0, 2))
663
 
664
+ return filtered_points_xyz, filtered_point_idxs, filtered_points_color, filtered_vertices_apex, filtered_vertices_apex_uv, filtered_vertices_eave, filtered_vertices_eave_uv
665
 
666
+ def get_connections(gest_seg_np, filtered_vertices_apex, filtered_vertices_eave, filtered_vertices_apex_uv, filtered_vertices_eave_uv):
667
  connections = []
668
  edge_classes = ['eave', 'ridge', 'rake', 'valley']
669
  edge_th = 25.0 # threshold for proximity to line segments
 
680
  vertex_types.append('apex')
681
 
682
  # Add eave_end vertices
683
+ for i, (vertex_3d, vertex_uv) in enumerate(zip(filtered_vertices_eave, filtered_vertices_eave_uv)):
684
  all_vertices_3d.append(vertex_3d)
685
  all_vertices_uv.append(vertex_uv)
686
  vertex_types.append('eave_end')
 
755
  if conn not in connections:
756
  connections.append(conn)
757
 
758
+ # Convert all_vertices_uv and vertex_types to the required format
759
+ vertices_formatted = []
760
+ for uv, vertex_type in zip(all_vertices_uv, vertex_types):
761
+ vertices_formatted.append({
762
+ 'xy': np.array(uv, dtype=float),
763
+ 'type': vertex_type
764
+ })
765
+
766
+ return vertices_formatted, connections, all_vertices_3d
 
 
 
 
 
 
 
 
 
767
 
768
+ def visualize_3d_wireframe(colmap_rec, filtered_points_xyz, filtered_points_color, vertices_3d, connections):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
769
  segmented_points_3d = []
770
 
771
  # Visualize with the segmented depth points in blue
 
802
  geometries.append(pcd_depth)
803
 
804
  #o3d.visualization.draw_geometries(geometries, window_name=f"Combined Point Cloud - {img_id_substring}")
805
+
806
+ def our_get_vertices_and_edges(gest_seg_np, colmap_rec, img_id_substring, ade_seg, depth, K=None, R=None, t=None, ):
807
+ """
808
+ Identify apex and eave-end vertices, then detect lines for eave/ridge/rake/valley.
809
+ Also find all COLMAP points that project into apex or eave_end masks.
810
+ """
811
+ #--------------------------------------------------------------------------------
812
+ # Step A: Collect apex and eave_end vertices
813
+ #--------------------------------------------------------------------------------
814
+ if not isinstance(gest_seg_np, np.ndarray):
815
+ gest_seg_np = np.array(gest_seg_np)
816
+
817
+ # Apex
818
+ apex_color = np.array(gestalt_color_mapping['apex'])
819
+ apex_mask = cv2.inRange(gest_seg_np, apex_color-10., apex_color+10.)
820
+
821
+ # Eave end
822
+ eave_end_color = np.array(gestalt_color_mapping['eave_end_point'])
823
+ eave_end_mask = cv2.inRange(gest_seg_np, eave_end_color-10, eave_end_color+10)
824
+
825
+ H, W = gest_seg_np.shape[:2]
826
+
827
+ points_cam, points_xyz_world, points_idxs = get_visible_points(colmap_rec, img_id_substring, R=R, t=t)
828
+
829
+ uv, valid_indices = project_points_to_2d(points_cam, K, H, W)
830
+
831
+ if len(uv) == 0:
832
+ print(f"No points projected into image bounds for {img_id_substring} using K,R,t.")
833
+ return [], [], []
834
+
835
+ house_mask = get_house_mask(ade_seg)
836
+
837
+ filtered_points_xyz, filtered_point_idxs, filtered_points_color, filtered_vertices_apex, filtered_vertices_apex_uv, filtered_vertices_eave, filtered_vertices_eave_uv = get_vertexes(uv, gest_seg_np, house_mask, valid_indices, points_xyz_world, points_cam, points_idxs)
838
+
839
+ vertices_formatted, connections, all_vertices_3d = get_connections(gest_seg_np, filtered_vertices_apex, filtered_vertices_eave, filtered_vertices_apex_uv, filtered_vertices_eave_uv)
840
+
841
+ #visualize_3d_wireframe(colmap_rec, filtered_points_xyz, filtered_points_color, all_vertices_3d, connections)
842
 
843
  return vertices_formatted, connections, all_vertices_3d
train.py CHANGED
@@ -14,7 +14,7 @@ from hoho2025.metric_helper import hss
14
  from predict import predict_wireframe
15
 
16
  ds = load_dataset("usm3d/hoho25k", streaming=True, trust_remote_code=True)
17
-
18
  scores_hss = []
19
  scores_f1 = []
20
  scores_iou = []
 
14
  from predict import predict_wireframe
15
 
16
  ds = load_dataset("usm3d/hoho25k", streaming=True, trust_remote_code=True)
17
+
18
  scores_hss = []
19
  scores_f1 = []
20
  scores_iou = []