Denys Rozumnyi commited on
Commit
a0cd1aa
·
1 Parent(s): 725639c
Files changed (4) hide show
  1. geom_solver.py +26 -20
  2. helpers.py +1 -1
  3. my_solution.py +1 -2
  4. testing.ipynb +0 -0
geom_solver.py CHANGED
@@ -16,12 +16,14 @@ def my_empty_solution():
16
 
17
  class GeomSolver(object):
18
 
19
- def __init__(self, entry):
20
  self.min_vertices = 18
 
 
 
21
 
22
- human_entry = convert_entry_to_human_readable(entry)
23
- self.human_entry = human_entry
24
-
25
  col_cams = [hoho.Rt_to_eye_target(human_entry['ade20k'][0], to_K(*human_entry['cameras'][1].params), quaternion_to_rotation_matrix(colmap_img.qvec), colmap_img.tvec) for colmap_img in human_entry['images'].values()]
26
  eye, target, up, fov = col_cams[0]
27
 
@@ -35,23 +37,19 @@ class GeomSolver(object):
35
  gestalt_to_colmap_cams = [colmap_cameras_tf[np.argmin(((gcam - col_camcet)**2).sum(1)**0.5)] for gcam in gestalt_camcet]
36
  broken_cams = np.array([np.min(((gcam - col_camcet)**2).sum(1)**0.5) for gcam in gestalt_camcet]) > 300
37
 
38
- # def get_vertices(self):
39
- clr_th = 2.5
40
- device = 'cuda:0'
41
  height = cameras[1].height
42
  width = cameras[1].width
43
  N = len(gestalt_to_colmap_cams)
44
  K = to_K(*human_entry['cameras'][1].params)[None].repeat(N, 0)
45
- # print(gestalt_to_colmap_cams, N, human_entry['images'])
46
- # print(sorted(human_entry['images'].keys()))
47
  R = np.stack([quaternion_to_rotation_matrix(human_entry['images'][gestalt_to_colmap_cams[ind]].qvec) for ind in range(N)])
48
  T = np.stack([human_entry['images'][gestalt_to_colmap_cams[ind]].tvec for ind in range(N)])
49
 
50
  R = np.linalg.inv(R)
51
- image_size=torch.Tensor([height, width]).repeat(N, 1)
52
- pyt_cameras = PerspectiveCameras(device=device, R=R, T=T, in_ndc=False, focal_length=K[:, 0, :1], principal_point=K[:, :2, 2], image_size=image_size)
53
 
54
- verts = torch.from_numpy(xyz.astype(np.float32)).to(device)
55
 
56
  apex_color = np.array(gestalt_color_mapping['apex'])
57
  eave_end_color = np.array(gestalt_color_mapping['eave_end_point'])
@@ -66,8 +64,8 @@ class GeomSolver(object):
66
  cki = gestalt_to_colmap_cams[ki]
67
 
68
  gest = np.array(human_entry['gestalt'][ki])
69
- apex_mask = cv2.inRange(gest, apex_color-clr_th, apex_color+clr_th)
70
- eave_end_mask = cv2.inRange(gest, eave_end_color-clr_th, eave_end_color+clr_th)
71
  vert_mask = apex_mask + eave_end_mask
72
  vert_mask = (vert_mask > 0).astype(np.uint8)
73
 
@@ -99,16 +97,15 @@ class GeomSolver(object):
99
  criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 200, 0.3)
100
  flags = cv2.KMEANS_RANDOM_CENTERS
101
  centers = None
102
- kmeans_th = 150
103
- for tempi in range(1,11):
104
  retval, bestLabels, temp_centers = cv2.kmeans(xyz[selected_points][dense_pnts].astype(np.float32), tempi, None, criteria, 200,flags)
105
  cpnts = torch.from_numpy(temp_centers.astype(np.float32))[None]
106
  bdists, inds, nn = ball_query(cpnts, cpnts, K=1, radius=100)
107
  if bdists.max() > 0:
108
  closest_nn = (bdists[bdists>0].min()**0.5).item()
109
  else:
110
- closest_nn = kmeans_th
111
- if closest_nn < kmeans_th:
112
  break
113
  centers = temp_centers
114
  if centers is None:
@@ -123,8 +120,6 @@ class GeomSolver(object):
123
 
124
  self.vertices = centers
125
 
126
-
127
- def get_vertices(self, visualize=False):
128
  if self.with_broken_cams:
129
  vertices = self.vertices
130
  print("There are broken cams.")
@@ -134,9 +129,20 @@ class GeomSolver(object):
134
  added_one = self.wf_center
135
  added = added_one[None].repeat(self.min_vertices - nvert,0)
136
  vertices = np.concatenate((self.vertices, added))
 
 
 
 
 
 
 
 
137
  if visualize:
138
  from hoho.viz3d import plot_estimate_and_gt
139
  plot_estimate_and_gt(vertices, [(0,0)], self.human_entry['wf_vertices'], self.human_entry['wf_edges'])
 
140
  return vertices
141
 
 
 
142
 
 
16
 
17
  class GeomSolver(object):
18
 
19
+ def __init__(self):
20
  self.min_vertices = 18
21
+ self.kmeans_th = 150
22
+ self.clr_th = 2.5
23
+ self.device = 'cuda:0'
24
 
25
+ def process_vertices(self):
26
+ human_entry = self.human_entry
 
27
  col_cams = [hoho.Rt_to_eye_target(human_entry['ade20k'][0], to_K(*human_entry['cameras'][1].params), quaternion_to_rotation_matrix(colmap_img.qvec), colmap_img.tvec) for colmap_img in human_entry['images'].values()]
28
  eye, target, up, fov = col_cams[0]
29
 
 
37
  gestalt_to_colmap_cams = [colmap_cameras_tf[np.argmin(((gcam - col_camcet)**2).sum(1)**0.5)] for gcam in gestalt_camcet]
38
  broken_cams = np.array([np.min(((gcam - col_camcet)**2).sum(1)**0.5) for gcam in gestalt_camcet]) > 300
39
 
40
+
 
 
41
  height = cameras[1].height
42
  width = cameras[1].width
43
  N = len(gestalt_to_colmap_cams)
44
  K = to_K(*human_entry['cameras'][1].params)[None].repeat(N, 0)
 
 
45
  R = np.stack([quaternion_to_rotation_matrix(human_entry['images'][gestalt_to_colmap_cams[ind]].qvec) for ind in range(N)])
46
  T = np.stack([human_entry['images'][gestalt_to_colmap_cams[ind]].tvec for ind in range(N)])
47
 
48
  R = np.linalg.inv(R)
49
+ image_size = torch.Tensor([height, width]).repeat(N, 1)
50
+ pyt_cameras = PerspectiveCameras(device=self.device, R=R, T=T, in_ndc=False, focal_length=K[:, 0, :1], principal_point=K[:, :2, 2], image_size=image_size)
51
 
52
+ verts = torch.from_numpy(xyz.astype(np.float32)).to(self.device)
53
 
54
  apex_color = np.array(gestalt_color_mapping['apex'])
55
  eave_end_color = np.array(gestalt_color_mapping['eave_end_point'])
 
64
  cki = gestalt_to_colmap_cams[ki]
65
 
66
  gest = np.array(human_entry['gestalt'][ki])
67
+ apex_mask = cv2.inRange(gest, apex_color-self.clr_th, apex_color+self.clr_th)
68
+ eave_end_mask = cv2.inRange(gest, eave_end_color-self.clr_th, eave_end_color+self.clr_th)
69
  vert_mask = apex_mask + eave_end_mask
70
  vert_mask = (vert_mask > 0).astype(np.uint8)
71
 
 
97
  criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 200, 0.3)
98
  flags = cv2.KMEANS_RANDOM_CENTERS
99
  centers = None
100
+ for tempi in range(1, 20):
 
101
  retval, bestLabels, temp_centers = cv2.kmeans(xyz[selected_points][dense_pnts].astype(np.float32), tempi, None, criteria, 200,flags)
102
  cpnts = torch.from_numpy(temp_centers.astype(np.float32))[None]
103
  bdists, inds, nn = ball_query(cpnts, cpnts, K=1, radius=100)
104
  if bdists.max() > 0:
105
  closest_nn = (bdists[bdists>0].min()**0.5).item()
106
  else:
107
+ closest_nn = self.kmeans_th
108
+ if closest_nn < self.kmeans_th:
109
  break
110
  centers = temp_centers
111
  if centers is None:
 
120
 
121
  self.vertices = centers
122
 
 
 
123
  if self.with_broken_cams:
124
  vertices = self.vertices
125
  print("There are broken cams.")
 
129
  added_one = self.wf_center
130
  added = added_one[None].repeat(self.min_vertices - nvert,0)
131
  vertices = np.concatenate((self.vertices, added))
132
+ self.vertices_aug = vertices
133
+
134
+ def solve(self, entry, visualize=False):
135
+ human_entry = convert_entry_to_human_readable(entry)
136
+ self.human_entry = human_entry
137
+ self.process_vertices()
138
+ vertices = self.vertices_aug
139
+
140
  if visualize:
141
  from hoho.viz3d import plot_estimate_and_gt
142
  plot_estimate_and_gt(vertices, [(0,0)], self.human_entry['wf_vertices'], self.human_entry['wf_edges'])
143
+
144
  return vertices
145
 
146
+
147
+
148
 
helpers.py CHANGED
@@ -119,7 +119,7 @@ def my_compute_WED(pd_vertices, pd_edges, gt_vertices, gt_edges, cv_ins=-1/2, cv
119
  # Step 5: Calculation of WED
120
  WED = translation_costs + deletion_costs + insertion_costs + deletion_edge_costs + insertion_edge_costs
121
  print(translation_costs, deletion_costs, insertion_costs, deletion_edge_costs, insertion_edge_costs)
122
-
123
  if normalized:
124
  total_length_of_gt_edges = np.linalg.norm((gt_vertices[gt_edges[:, 0]] - gt_vertices[gt_edges[:, 1]]), axis=1).sum()
125
  WED = WED / total_length_of_gt_edges
 
119
  # Step 5: Calculation of WED
120
  WED = translation_costs + deletion_costs + insertion_costs + deletion_edge_costs + insertion_edge_costs
121
  print(translation_costs, deletion_costs, insertion_costs, deletion_edge_costs, insertion_edge_costs)
122
+
123
  if normalized:
124
  total_length_of_gt_edges = np.linalg.norm((gt_vertices[gt_edges[:, 0]] - gt_vertices[gt_edges[:, 1]]), axis=1).sum()
125
  WED = WED / total_length_of_gt_edges
my_solution.py CHANGED
@@ -36,8 +36,7 @@ def predict(entry, visualize=False) -> Tuple[np.ndarray, List[int]]:
36
  # return (entry['__key__'], *my_empty_solution())
37
  vertices0, edges0 = my_empty_solution()
38
  try:
39
- solver = GeomSolver(entry)
40
- vertices = solver.get_vertices()
41
  edges = edges0
42
  except:
43
  vertices, edges = vertices0, edges0
 
36
  # return (entry['__key__'], *my_empty_solution())
37
  vertices0, edges0 = my_empty_solution()
38
  try:
39
+ vertices = GeomSolver().solve(entry)
 
40
  edges = edges0
41
  except:
42
  vertices, edges = vertices0, edges0
testing.ipynb CHANGED
The diff for this file is too large to render. See raw diff