jskvrna commited on
Commit
a341c73
·
1 Parent(s): 919ccdd

Improves wireframe prediction with edge fitting

Browse files

Refactors the wireframe prediction pipeline to incorporate edge fitting based on segment proximity.

The updated approach extracts apex and eave-end vertices and fits lines to edge segments based on proximity within a given threshold. This enables the generation of connections between nearby vertices, refining the overall wireframe structure. Additionally, visualization tools were added for debug purposes.

Files changed (1) hide show
  1. predict.py +177 -31
predict.py CHANGED
@@ -1,6 +1,6 @@
1
  import numpy as np
2
  from typing import Tuple, List
3
- from hoho2025.example_solutions import empty_solution, read_colmap_rec, get_vertices_and_edges_from_segmentation, get_house_mask, fit_scale_robust_median, get_uv_depth, merge_vertices_3d, prune_not_connected, prune_too_far
4
  from hoho2025.color_mappings import ade20k_color_mapping, gestalt_color_mapping
5
  from PIL import Image, ImageDraw
6
  from visu import save_gestalt_with_proj, draw_crosses_on_image
@@ -9,6 +9,7 @@ import pycolmap
9
  from PIL import Image as PImage
10
  import cv2
11
  import open3d as o3d
 
12
 
13
  def convert_entry_to_human_readable(entry):
14
  out = {}
@@ -412,15 +413,12 @@ def predict_wireframe(entry) -> Tuple[np.ndarray, List[int]]:
412
  gest_seg = gest.resize(depth_size)
413
  gest_seg_np = np.array(gest_seg).astype(np.uint8)
414
 
415
- pcloud_segmented, pcloud_idxs = extract_segmented_pcloud(gest_seg_np, colmap_rec, img_id, ade_seg, depth, K=K, R=R, t=t)
416
- for idx, p3D in enumerate(colmap_rec.points3D.values()):
417
- if idx in pcloud_idxs:
418
- p3D.color = np.array([255, 0, 0])
419
-
420
  # Get 2D vertices and edges first
421
- vertices, connections = get_vertices_and_edges_from_segmentation(gest_seg_np, edge_th=20.)
422
 
423
- gt_verts = []
424
  #gt_verts, gt_connects, gt_verts3d = get_gt_vertices_and_edges(good_entry, i, depth, colmap_rec, K, R, t, img_id, ade_seg)
425
  #vertices, connections = gt_verts, gt_connects
426
 
@@ -446,15 +444,24 @@ def predict_wireframe(entry) -> Tuple[np.ndarray, List[int]]:
446
  continue
447
 
448
  # Call the refactored function to get 3D points
449
- vertices_3d = create_3d_wireframe_single_image(
450
- vertices, connections, depth, colmap_rec, img_id, ade_seg, K, R, t
451
- )
452
  #vertices_3d = gt_verts3d
453
  # Store original 2D vertices, connections, and computed 3D points
 
 
 
 
 
 
 
 
 
 
 
454
  vert_edge_per_image[i] = vertices, connections, vertices_3d
455
 
456
  # Visualize colored COLMAP point cloud with Open3D
457
-
458
  # Create Open3D point cloud from COLMAP reconstruction
459
  pcd = o3d.geometry.PointCloud()
460
 
@@ -472,7 +479,7 @@ def predict_wireframe(entry) -> Tuple[np.ndarray, List[int]]:
472
 
473
  # Visualize the point cloud
474
  o3d.visualization.draw_geometries([pcd], window_name="COLMAP Point Cloud")
475
-
476
  # Merge vertices from all images
477
  all_3d_vertices, connections_3d = merge_vertices_3d(vert_edge_per_image, 0.5)
478
  all_3d_vertices_clean, connections_3d_clean = prune_not_connected(all_3d_vertices, connections_3d, keep_largest=False)
@@ -485,7 +492,7 @@ def predict_wireframe(entry) -> Tuple[np.ndarray, List[int]]:
485
  return all_3d_vertices_clean, connections_3d_clean
486
 
487
 
488
- def extract_segmented_pcloud(gest_seg_np, colmap_rec, img_id_substring, ade_seg, depth, K=None, R=None, t=None):
489
  """
490
  Identify apex and eave-end vertices, then detect lines for eave/ridge/rake/valley.
491
  Also find all COLMAP points that project into apex or eave_end masks.
@@ -504,9 +511,6 @@ def extract_segmented_pcloud(gest_seg_np, colmap_rec, img_id_substring, ade_seg,
504
  eave_end_color = np.array(gestalt_color_mapping['eave_end_point'])
505
  eave_end_mask = cv2.inRange(gest_seg_np, eave_end_color-10, eave_end_color+10)
506
 
507
- # Combined mask for apex and eave_end
508
- combined_mask = cv2.bitwise_or(apex_mask, eave_end_mask)
509
-
510
  H, W = gest_seg_np.shape[:2]
511
 
512
  # 1) Find the matching COLMAP image to get its associated 3D points
@@ -518,7 +522,7 @@ def extract_segmented_pcloud(gest_seg_np, colmap_rec, img_id_substring, ade_seg,
518
  break
519
  if found_img is None:
520
  print(f"Image substring {img_id_substring} not found in COLMAP.")
521
- return np.zeros((H, W), dtype=np.float32), False, None
522
 
523
  # 2) Gather 3D points that this image sees (according to COLMAP)
524
  points_xyz_world = []
@@ -529,15 +533,11 @@ def extract_segmented_pcloud(gest_seg_np, colmap_rec, img_id_substring, ade_seg,
529
  points_idxs.append(pid)
530
  if not points_xyz_world:
531
  print(f"No 3D points associated with {found_img.name} in COLMAP.")
532
- return np.zeros((H, W), dtype=np.float32), False, found_img # Return found_img for consistency
533
 
534
  points_xyz_world = np.array(points_xyz_world) # (N, 3)
535
  points_idxs = np.array(points_idxs) # (N,)
536
-
537
- # 3) Project points_xyz_world to camera coordinates using R, t
538
- # points_cam = R @ points_xyz_world.T + t.reshape(3,1)
539
- # points_cam = points_cam.T (N,3)
540
- # More robustly:
541
  points_xyz_world_h = np.hstack((points_xyz_world, np.ones((points_xyz_world.shape[0], 1)))) # (N, 4)
542
 
543
  # World to Camera transformation matrix
@@ -585,7 +585,7 @@ def extract_segmented_pcloud(gest_seg_np, colmap_rec, img_id_substring, ade_seg,
585
 
586
  if not uv:
587
  print(f"No points projected into image bounds for {img_id_substring} using K,R,t.")
588
- return np.zeros((H, W), dtype=np.float32), False, found_img
589
 
590
  house_mask = get_house_mask(ade_seg)
591
 
@@ -596,6 +596,10 @@ def extract_segmented_pcloud(gest_seg_np, colmap_rec, img_id_substring, ade_seg,
596
  filtered_points_xyz = []
597
  filtered_point_idxs = []
598
  filtered_points_color = []
 
 
 
 
599
 
600
  # Apex
601
  apex_color = np.array(gestalt_color_mapping['apex'])
@@ -644,6 +648,26 @@ def extract_segmented_pcloud(gest_seg_np, colmap_rec, img_id_substring, ade_seg,
644
  filtered_point_idxs.extend(points_idxs[final_valid_indices])
645
  filtered_points_color.extend([color] * np.sum(depth_filter))
646
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
647
 
648
  # Eave end
649
  eave_end_color = np.array(gestalt_color_mapping['eave_end_point'])
@@ -689,6 +713,27 @@ def extract_segmented_pcloud(gest_seg_np, colmap_rec, img_id_substring, ade_seg,
689
  filtered_point_idxs.extend(points_idxs[final_valid_indices])
690
  filtered_points_color.extend([color] * np.sum(depth_filter))
691
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
692
  '''
693
  for i, (u, v) in enumerate(uv):
694
  # Check if this projected point falls within the combined maskvalid_indices
@@ -697,9 +742,103 @@ def extract_segmented_pcloud(gest_seg_np, colmap_rec, img_id_substring, ade_seg,
697
  filtered_points_xyz.append(points_xyz_world[original_idx])
698
  filtered_point_idxs.append(points_idxs[original_idx])
699
  '''
700
- filtered_points_xyz = np.array(filtered_points_xyz) if filtered_points_xyz else np.empty((0, 3))
701
- filtered_point_idxs = np.array(filtered_point_idxs) if filtered_point_idxs else np.empty((0,))
702
- filtered_points_color = np.array(filtered_points_color) if filtered_points_color else np.empty((0, 3))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
703
 
704
  '''
705
  depth_fitted, depth_sparse, _, col_img = get_fitted_dense_depth(depth, colmap_rec, img_id_substring, ade_seg, K, R, t)
@@ -765,13 +904,20 @@ def extract_segmented_pcloud(gest_seg_np, colmap_rec, img_id_substring, ade_seg,
765
  pcd_depth.points = o3d.utility.Vector3dVector(segmented_points_3d)
766
  pcd_depth.colors = o3d.utility.Vector3dVector(np.full((len(segmented_points_3d), 3), [0.0, 0.0, 1.0]))
767
 
768
- # Visualize all point clouds
769
  geometries = [pcd_all]
770
  if len(filtered_points_xyz) > 0:
771
  geometries.append(pcd_filtered)
772
  if len(segmented_points_3d) > 0:
773
  geometries.append(pcd_depth)
774
 
775
- o3d.visualization.draw_geometries(geometries, window_name=f"Combined Point Cloud - {img_id_substring}")
 
 
 
 
 
 
 
776
 
777
- return filtered_points_xyz, filtered_point_idxs
 
1
  import numpy as np
2
  from typing import Tuple, List
3
+ from hoho2025.example_solutions import empty_solution, read_colmap_rec, get_vertices_and_edges_from_segmentation, get_house_mask, fit_scale_robust_median, get_uv_depth, merge_vertices_3d, prune_not_connected, prune_too_far, point_to_segment_dist
4
  from hoho2025.color_mappings import ade20k_color_mapping, gestalt_color_mapping
5
  from PIL import Image, ImageDraw
6
  from visu import save_gestalt_with_proj, draw_crosses_on_image
 
9
  from PIL import Image as PImage
10
  import cv2
11
  import open3d as o3d
12
+ from visu import plot_reconstruction_local, plot_wireframe_local, plot_bpo_cameras_from_entry_local
13
 
14
  def convert_entry_to_human_readable(entry):
15
  out = {}
 
413
  gest_seg = gest.resize(depth_size)
414
  gest_seg_np = np.array(gest_seg).astype(np.uint8)
415
 
416
+ vertices_ours, connections_ours, vertices_3d_ours = our_get_vertices_and_edges(gest_seg_np, colmap_rec, img_id, ade_seg, depth, K=K, R=R, t=t, frame=good_entry)
417
+ #vertices, connections, vertices_3d = vertices_ours, connections_ours, vertices_3d_ours
 
 
 
418
  # Get 2D vertices and edges first
419
+ vertices, connections = get_vertices_and_edges_from_segmentation(gest_seg_np, edge_th=25.)
420
 
421
+ #gt_verts = []
422
  #gt_verts, gt_connects, gt_verts3d = get_gt_vertices_and_edges(good_entry, i, depth, colmap_rec, K, R, t, img_id, ade_seg)
423
  #vertices, connections = gt_verts, gt_connects
424
 
 
444
  continue
445
 
446
  # Call the refactored function to get 3D points
447
+ vertices_3d = create_3d_wireframe_single_image(vertices, connections, depth, colmap_rec, img_id, ade_seg, K, R, t)
 
 
448
  #vertices_3d = gt_verts3d
449
  # Store original 2D vertices, connections, and computed 3D points
450
+
451
+ if False:
452
+ pcd, geometries = plot_reconstruction_local(None, colmap_rec, points=True, cameras=True, crop_outliers=True)
453
+ wireframe = plot_wireframe_local(None, good_entry['wf_vertices'], good_entry['wf_edges'], good_entry['wf_classifications'])
454
+ wireframe2 = plot_wireframe_local(None, vertices_3d_ours, connections_ours, None, color='rgb(255, 0, 0)')
455
+ wireframe3 = plot_wireframe_local(None, vertices_3d, connections, None, color='rgb(0, 0, 255)')
456
+ bpo_cams = plot_bpo_cameras_from_entry_local(None, good_entry)
457
+
458
+ visu_all = [pcd] + geometries + wireframe + bpo_cams + wireframe2 + wireframe3
459
+ #o3d.visualization.draw_geometries(visu_all, window_name="3D Reconstruction")
460
+
461
  vert_edge_per_image[i] = vertices, connections, vertices_3d
462
 
463
  # Visualize colored COLMAP point cloud with Open3D
464
+ '''
465
  # Create Open3D point cloud from COLMAP reconstruction
466
  pcd = o3d.geometry.PointCloud()
467
 
 
479
 
480
  # Visualize the point cloud
481
  o3d.visualization.draw_geometries([pcd], window_name="COLMAP Point Cloud")
482
+ '''
483
  # Merge vertices from all images
484
  all_3d_vertices, connections_3d = merge_vertices_3d(vert_edge_per_image, 0.5)
485
  all_3d_vertices_clean, connections_3d_clean = prune_not_connected(all_3d_vertices, connections_3d, keep_largest=False)
 
492
  return all_3d_vertices_clean, connections_3d_clean
493
 
494
 
495
+ def our_get_vertices_and_edges(gest_seg_np, colmap_rec, img_id_substring, ade_seg, depth, K=None, R=None, t=None, ):
496
  """
497
  Identify apex and eave-end vertices, then detect lines for eave/ridge/rake/valley.
498
  Also find all COLMAP points that project into apex or eave_end masks.
 
511
  eave_end_color = np.array(gestalt_color_mapping['eave_end_point'])
512
  eave_end_mask = cv2.inRange(gest_seg_np, eave_end_color-10, eave_end_color+10)
513
 
 
 
 
514
  H, W = gest_seg_np.shape[:2]
515
 
516
  # 1) Find the matching COLMAP image to get its associated 3D points
 
522
  break
523
  if found_img is None:
524
  print(f"Image substring {img_id_substring} not found in COLMAP.")
525
+ return [], [], []
526
 
527
  # 2) Gather 3D points that this image sees (according to COLMAP)
528
  points_xyz_world = []
 
533
  points_idxs.append(pid)
534
  if not points_xyz_world:
535
  print(f"No 3D points associated with {found_img.name} in COLMAP.")
536
+ return [], [], []
537
 
538
  points_xyz_world = np.array(points_xyz_world) # (N, 3)
539
  points_idxs = np.array(points_idxs) # (N,)
540
+
 
 
 
 
541
  points_xyz_world_h = np.hstack((points_xyz_world, np.ones((points_xyz_world.shape[0], 1)))) # (N, 4)
542
 
543
  # World to Camera transformation matrix
 
585
 
586
  if not uv:
587
  print(f"No points projected into image bounds for {img_id_substring} using K,R,t.")
588
+ return [], [], []
589
 
590
  house_mask = get_house_mask(ade_seg)
591
 
 
596
  filtered_points_xyz = []
597
  filtered_point_idxs = []
598
  filtered_points_color = []
599
+ filtered_vertices_apex = []
600
+ filtered_vertices_apex_uv = []
601
+ filtered_vertices_eave_end = []
602
+ filtered_vertices_eave_end_uv = []
603
 
604
  # Apex
605
  apex_color = np.array(gestalt_color_mapping['apex'])
 
648
  filtered_point_idxs.extend(points_idxs[final_valid_indices])
649
  filtered_points_color.extend([color] * np.sum(depth_filter))
650
 
651
+ # Find the point with lowest depth in the filtered points
652
+ if len(final_valid_indices) > 0:
653
+ lowest_depth_idx = np.argmin(depths[depth_filter])
654
+ lowest_depth_point = final_valid_indices[lowest_depth_idx]
655
+ filtered_vertices_apex.append(points_xyz_world[lowest_depth_point])
656
+ filtered_points_xyz.append(points_xyz_world[lowest_depth_point])
657
+ filtered_point_idxs.append(points_idxs[lowest_depth_point])
658
+ filtered_points_color.append(np.array([1., 1., 0.]))
659
+
660
+ # Project the lowest depth point back to image coordinates for visualization
661
+ lowest_cam_point = points_cam[lowest_depth_point]
662
+
663
+ # Project to image plane using K
664
+ u_proj = (K[0, 0] * lowest_cam_point[0] / lowest_cam_point[2]) + K[0, 2]
665
+ v_proj = (K[1, 1] * lowest_cam_point[1] / lowest_cam_point[2]) + K[1, 2]
666
+
667
+ u_proj_int = int(round(u_proj))
668
+ v_proj_int = int(round(v_proj))
669
+
670
+ filtered_vertices_apex_uv.append((u_proj_int, v_proj_int))
671
 
672
  # Eave end
673
  eave_end_color = np.array(gestalt_color_mapping['eave_end_point'])
 
713
  filtered_point_idxs.extend(points_idxs[final_valid_indices])
714
  filtered_points_color.extend([color] * np.sum(depth_filter))
715
 
716
+ # Find the point with lowest depth in the filtered points
717
+ if len(final_valid_indices) > 0:
718
+ lowest_depth_idx = np.argmin(depths[depth_filter])
719
+ lowest_depth_point = final_valid_indices[lowest_depth_idx]
720
+ filtered_vertices_eave_end.append(points_xyz_world[lowest_depth_point])
721
+ filtered_points_xyz.append(points_xyz_world[lowest_depth_point])
722
+ filtered_point_idxs.append(points_idxs[lowest_depth_point])
723
+ filtered_points_color.append(np.array([1., 1., 0.]))
724
+
725
+ # Project the lowest depth point back to image coordinates for visualization
726
+ lowest_cam_point = points_cam[lowest_depth_point]
727
+
728
+ # Project to image plane using K
729
+ u_proj = (K[0, 0] * lowest_cam_point[0] / lowest_cam_point[2]) + K[0, 2]
730
+ v_proj = (K[1, 1] * lowest_cam_point[1] / lowest_cam_point[2]) + K[1, 2]
731
+
732
+ u_proj_int = int(round(u_proj))
733
+ v_proj_int = int(round(v_proj))
734
+
735
+ filtered_vertices_eave_end_uv.append((u_proj_int, v_proj_int))
736
+
737
  '''
738
  for i, (u, v) in enumerate(uv):
739
  # Check if this projected point falls within the combined maskvalid_indices
 
742
  filtered_points_xyz.append(points_xyz_world[original_idx])
743
  filtered_point_idxs.append(points_idxs[original_idx])
744
  '''
745
+ filtered_points_xyz = np.array(filtered_points_xyz[::-1]) if filtered_points_xyz else np.empty((0, 3))
746
+ filtered_point_idxs = np.array(filtered_point_idxs[::-1]) if filtered_point_idxs else np.empty((0,))
747
+ filtered_points_color = np.array(filtered_points_color[::-1]) if filtered_points_color else np.empty((0, 3))
748
+ filtered_vertices_apex = np.array(filtered_vertices_apex) if filtered_vertices_apex else np.empty((0, 3))
749
+ filtered_vertices_eave_end = np.array(filtered_vertices_eave_end) if filtered_vertices_eave_end else np.empty((0, 3))
750
+
751
+
752
+ connections = []
753
+ edge_classes = ['eave', 'ridge', 'rake', 'valley']
754
+ edge_th = 25.0 # threshold for proximity to line segments
755
+
756
+ # Combine apex and eave_end vertices and their UV coordinates
757
+ all_vertices_3d = []
758
+ all_vertices_uv = []
759
+ vertex_types = []
760
+
761
+ # Add apex vertices
762
+ for i, (vertex_3d, vertex_uv) in enumerate(zip(filtered_vertices_apex, filtered_vertices_apex_uv)):
763
+ all_vertices_3d.append(vertex_3d)
764
+ all_vertices_uv.append(vertex_uv)
765
+ vertex_types.append('apex')
766
+
767
+ # Add eave_end vertices
768
+ for i, (vertex_3d, vertex_uv) in enumerate(zip(filtered_vertices_eave_end, filtered_vertices_eave_end_uv)):
769
+ all_vertices_3d.append(vertex_3d)
770
+ all_vertices_uv.append(vertex_uv)
771
+ vertex_types.append('eave_end')
772
+
773
+ all_vertices_3d = np.array(all_vertices_3d)
774
+ all_vertices_uv = np.array(all_vertices_uv)
775
+
776
+ if len(all_vertices_uv) < 2:
777
+ return [], [], []
778
+
779
+ for edge_class in edge_classes:
780
+ edge_color = np.array(gestalt_color_mapping[edge_class])
781
+ mask_raw = cv2.inRange(gest_seg_np, edge_color-10, edge_color+10)
782
+ # Morphological operations to clean up the mask
783
+ kernel = np.ones((5, 5), np.uint8)
784
+ mask = cv2.morphologyEx(mask_raw, cv2.MORPH_CLOSE, kernel)
785
+ if mask.sum() == 0:
786
+ continue
787
+
788
+ # Connected components
789
+ output = cv2.connectedComponentsWithStats(mask, 8, cv2.CV_32S)
790
+ (numLabels, labels, stats, centroids) = output
791
+ # Skip the background
792
+ stats, centroids = stats[1:], centroids[1:]
793
+ label_indices = range(1, numLabels)
794
+
795
+ # For each connected component, do a line fit
796
+ for lbl in label_indices:
797
+ ys, xs = np.where(labels == lbl)
798
+ if len(xs) < 2:
799
+ continue
800
+
801
+ # Fit a line using cv2.fitLine
802
+ pts_for_fit = np.column_stack([xs, ys]).astype(np.float32)
803
+ line_params = cv2.fitLine(pts_for_fit, distType=cv2.DIST_L2,
804
+ param=0, reps=0.01, aeps=0.01)
805
+ vx, vy, x0, y0 = line_params.ravel()
806
+
807
+ # Find line segment endpoints by projecting points onto the line
808
+ proj = ((xs - x0)*vx + (ys - y0)*vy)
809
+ proj_min, proj_max = proj.min(), proj.max()
810
+ p1 = np.array([x0 + proj_min*vx, y0 + proj_min*vy])
811
+ p2 = np.array([x0 + proj_max*vx, y0 + proj_max*vy])
812
+
813
+ # Find vertices that are close to this line segment
814
+ if len(all_vertices_uv) < 2:
815
+ continue
816
+
817
+ # Calculate distance from each vertex UV to the line segment
818
+ dists = []
819
+ for vertex_uv in all_vertices_uv:
820
+ dist = point_to_segment_dist(vertex_uv, p1, p2)
821
+ dists.append(dist)
822
+
823
+ dists = np.array(dists)
824
+
825
+ # Find vertices that are near this line segment
826
+ near_mask = (dists <= edge_th)
827
+ near_indices = np.where(near_mask)[0]
828
+
829
+ if len(near_indices) < 2:
830
+ continue
831
+
832
+ # Connect each pair among these near vertices
833
+ for i in range(len(near_indices)):
834
+ for j in range(i+1, len(near_indices)):
835
+ idx_a = near_indices[i]
836
+ idx_b = near_indices[j]
837
+
838
+ # Create connection tuple (using sorted indices for consistency)
839
+ conn = tuple(sorted((idx_a, idx_b)))
840
+ if conn not in connections:
841
+ connections.append(conn)
842
 
843
  '''
844
  depth_fitted, depth_sparse, _, col_img = get_fitted_dense_depth(depth, colmap_rec, img_id_substring, ade_seg, K, R, t)
 
904
  pcd_depth.points = o3d.utility.Vector3dVector(segmented_points_3d)
905
  pcd_depth.colors = o3d.utility.Vector3dVector(np.full((len(segmented_points_3d), 3), [0.0, 0.0, 1.0]))
906
 
907
+ # Visualize all point clouds and spheres
908
  geometries = [pcd_all]
909
  if len(filtered_points_xyz) > 0:
910
  geometries.append(pcd_filtered)
911
  if len(segmented_points_3d) > 0:
912
  geometries.append(pcd_depth)
913
 
914
+ #o3d.visualization.draw_geometries(geometries, window_name=f"Combined Point Cloud - {img_id_substring}")
915
+ # Convert all_vertices_uv and vertex_types to the required format
916
+ vertices_formatted = []
917
+ for uv, vertex_type in zip(all_vertices_uv, vertex_types):
918
+ vertices_formatted.append({
919
+ 'xy': np.array(uv, dtype=float),
920
+ 'type': vertex_type
921
+ })
922
 
923
+ return vertices_formatted, connections, all_vertices_3d