Denys Rozumnyi commited on
Commit ·
87a5a37
1
Parent(s): 7258769
update
Browse files- geom_solver.py +93 -13
- my_solution.py +1 -2
- testing.ipynb +0 -0
geom_solver.py
CHANGED
|
@@ -18,10 +18,11 @@ class GeomSolver(object):
|
|
| 18 |
def __init__(self):
|
| 19 |
self.min_vertices = 18
|
| 20 |
self.kmeans_th = 150
|
|
|
|
| 21 |
self.device = 'cuda:0'
|
| 22 |
|
| 23 |
|
| 24 |
-
def cluster_points(self, point_types
|
| 25 |
point_colors = []
|
| 26 |
for point_type in point_types:
|
| 27 |
point_colors.append(np.array(gestalt_color_mapping[point_type]))
|
|
@@ -38,7 +39,7 @@ class GeomSolver(object):
|
|
| 38 |
gest = self.gests[ki]
|
| 39 |
vert_mask = 0
|
| 40 |
for point_color in point_colors:
|
| 41 |
-
my_mask = cv2.inRange(gest, point_color-clr_th, point_color+clr_th)
|
| 42 |
vert_mask = vert_mask + my_mask
|
| 43 |
vert_mask = (vert_mask > 0).astype(np.uint8)
|
| 44 |
|
|
@@ -66,10 +67,11 @@ class GeomSolver(object):
|
|
| 66 |
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 200, 0.3)
|
| 67 |
flags = cv2.KMEANS_RANDOM_CENTERS
|
| 68 |
centers = np.zeros((0, 3))
|
|
|
|
| 69 |
if len(self.xyz[selected_points][dense_pnts]) == 0:
|
| 70 |
-
return centers
|
| 71 |
for tempi in range(1, 20):
|
| 72 |
-
retval,
|
| 73 |
cpnts = torch.from_numpy(temp_centers.astype(np.float32))[None]
|
| 74 |
bdists, inds, nn = ball_query(cpnts, cpnts, K=1, radius=100)
|
| 75 |
if bdists.max() > 0:
|
|
@@ -78,11 +80,16 @@ class GeomSolver(object):
|
|
| 78 |
closest_nn = self.kmeans_th
|
| 79 |
if closest_nn < self.kmeans_th:
|
| 80 |
break
|
| 81 |
-
centers = temp_centers
|
| 82 |
if centers.shape[0] == 0:
|
| 83 |
-
centers = temp_centers
|
| 84 |
|
| 85 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
|
| 87 |
|
| 88 |
def process_vertices(self):
|
|
@@ -112,12 +119,12 @@ class GeomSolver(object):
|
|
| 112 |
self.pyt_cameras = PerspectiveCameras(device=self.device, R=R, T=T, in_ndc=False, focal_length=K[:, 0, :1], principal_point=K[:, :2, 2], image_size=image_size)
|
| 113 |
self.verts = torch.from_numpy(self.xyz.astype(np.float32)).to(self.device)
|
| 114 |
|
| 115 |
-
centers_apex = self.cluster_points(['apex'])
|
| 116 |
-
centers_eave = self.cluster_points(['eave_end_point'])
|
| 117 |
centers = np.concatenate((centers_apex, centers_eave))
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
|
| 122 |
z_th = centers[:,-1].min() - 50
|
| 123 |
self.wf_center = self.xyz[self.xyz[:,-1] > z_th].mean(0)
|
|
@@ -135,17 +142,90 @@ class GeomSolver(object):
|
|
| 135 |
vertices = np.concatenate((self.vertices, added))
|
| 136 |
self.vertices_aug = vertices
|
| 137 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 138 |
def solve(self, entry, visualize=False):
|
| 139 |
human_entry = convert_entry_to_human_readable(entry)
|
| 140 |
self.human_entry = human_entry
|
| 141 |
self.process_vertices()
|
| 142 |
vertices = self.vertices_aug
|
|
|
|
| 143 |
|
| 144 |
if visualize:
|
| 145 |
from hoho.viz3d import plot_estimate_and_gt
|
| 146 |
plot_estimate_and_gt(vertices, [(0,0)], self.human_entry['wf_vertices'], self.human_entry['wf_edges'])
|
| 147 |
|
| 148 |
-
return vertices
|
| 149 |
|
| 150 |
|
| 151 |
|
|
|
|
| 18 |
def __init__(self):
|
| 19 |
self.min_vertices = 18
|
| 20 |
self.kmeans_th = 150
|
| 21 |
+
self.clr_th = 2.5
|
| 22 |
self.device = 'cuda:0'
|
| 23 |
|
| 24 |
|
| 25 |
+
def cluster_points(self, point_types):
|
| 26 |
point_colors = []
|
| 27 |
for point_type in point_types:
|
| 28 |
point_colors.append(np.array(gestalt_color_mapping[point_type]))
|
|
|
|
| 39 |
gest = self.gests[ki]
|
| 40 |
vert_mask = 0
|
| 41 |
for point_color in point_colors:
|
| 42 |
+
my_mask = cv2.inRange(gest, point_color-self.clr_th, point_color+self.clr_th)
|
| 43 |
vert_mask = vert_mask + my_mask
|
| 44 |
vert_mask = (vert_mask > 0).astype(np.uint8)
|
| 45 |
|
|
|
|
| 67 |
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 200, 0.3)
|
| 68 |
flags = cv2.KMEANS_RANDOM_CENTERS
|
| 69 |
centers = np.zeros((0, 3))
|
| 70 |
+
assigned_points = []
|
| 71 |
if len(self.xyz[selected_points][dense_pnts]) == 0:
|
| 72 |
+
return centers, assigned_points
|
| 73 |
for tempi in range(1, 20):
|
| 74 |
+
retval, temp_bestLabels, temp_centers = cv2.kmeans(self.xyz[selected_points][dense_pnts].astype(np.float32), tempi, None, criteria, 200,flags)
|
| 75 |
cpnts = torch.from_numpy(temp_centers.astype(np.float32))[None]
|
| 76 |
bdists, inds, nn = ball_query(cpnts, cpnts, K=1, radius=100)
|
| 77 |
if bdists.max() > 0:
|
|
|
|
| 80 |
closest_nn = self.kmeans_th
|
| 81 |
if closest_nn < self.kmeans_th:
|
| 82 |
break
|
| 83 |
+
centers, bestLabels = temp_centers, temp_bestLabels
|
| 84 |
if centers.shape[0] == 0:
|
| 85 |
+
centers, bestLabels = temp_centers, temp_bestLabels
|
| 86 |
|
| 87 |
+
point_inds = np.arange(self.xyz.shape[0])
|
| 88 |
+
for ci in range(centers.shape[0]):
|
| 89 |
+
assigned_inds = point_inds[selected_points][dense_pnts][bestLabels[:,0] == ci]
|
| 90 |
+
assigned_points.append(assigned_inds)
|
| 91 |
+
|
| 92 |
+
return centers, assigned_points
|
| 93 |
|
| 94 |
|
| 95 |
def process_vertices(self):
|
|
|
|
| 119 |
self.pyt_cameras = PerspectiveCameras(device=self.device, R=R, T=T, in_ndc=False, focal_length=K[:, 0, :1], principal_point=K[:, :2, 2], image_size=image_size)
|
| 120 |
self.verts = torch.from_numpy(self.xyz.astype(np.float32)).to(self.device)
|
| 121 |
|
| 122 |
+
centers_apex, assigned_apex = self.cluster_points(['apex'])
|
| 123 |
+
centers_eave, assigned_eave = self.cluster_points(['eave_end_point'])
|
| 124 |
centers = np.concatenate((centers_apex, centers_eave))
|
| 125 |
+
self.assigned_points = assigned_apex + assigned_eave
|
| 126 |
+
self.is_apex = np.zeros((centers.shape[0], )).astype(int)
|
| 127 |
+
self.is_apex[:centers_apex.shape[0]] = 1
|
| 128 |
|
| 129 |
z_th = centers[:,-1].min() - 50
|
| 130 |
self.wf_center = self.xyz[self.xyz[:,-1] > z_th].mean(0)
|
|
|
|
| 142 |
vertices = np.concatenate((self.vertices, added))
|
| 143 |
self.vertices_aug = vertices
|
| 144 |
|
| 145 |
+
|
| 146 |
+
def process_edges(self):
|
| 147 |
+
N = len(self.gests)
|
| 148 |
+
image_ids = np.array([p.id for p in self.points3D.values()])
|
| 149 |
+
center_visibility = [set(np.concatenate([self.points3D[image_ids[pind]].image_ids for pind in ass_item])) for ass_item in self.assigned_points]
|
| 150 |
+
|
| 151 |
+
pyt_centers = torch.from_numpy(self.vertices.astype(np.float32)).to(self.device)
|
| 152 |
+
|
| 153 |
+
edge_dists = []
|
| 154 |
+
uvs = []
|
| 155 |
+
edge_types = {0 : ['eave'], 1 : ['rake', 'valley'], 2 : ['ridge']}
|
| 156 |
+
for ki in range(N):
|
| 157 |
+
gest = self.gests[ki]
|
| 158 |
+
edge_masks = {}
|
| 159 |
+
per_type_dists = {}
|
| 160 |
+
for etype in edge_types:
|
| 161 |
+
edge_mask = 0
|
| 162 |
+
for edge_class in edge_types[etype]:
|
| 163 |
+
edge_color = np.array(gestalt_color_mapping[edge_class])
|
| 164 |
+
mask = cv2.morphologyEx(cv2.inRange(gest,
|
| 165 |
+
edge_color-self.clr_th,
|
| 166 |
+
edge_color+self.clr_th),
|
| 167 |
+
cv2.MORPH_DILATE, np.ones((3, 3)))
|
| 168 |
+
edge_mask += mask
|
| 169 |
+
edge_mask = (edge_mask > 0).astype(np.uint8)
|
| 170 |
+
edge_masks[etype] = edge_mask
|
| 171 |
+
dist = cv2.distanceTransform(1-edge_mask, cv2.DIST_L2, 3)
|
| 172 |
+
per_type_dists[etype] = dist
|
| 173 |
+
edge_dists.append(per_type_dists)
|
| 174 |
+
|
| 175 |
+
uv = torch.round(self.pyt_cameras[ki].transform_points(pyt_centers)[:, :2]).cpu().numpy().astype(int)
|
| 176 |
+
uv_inl = (uv[:, 0] >= 0) * (uv[:, 1] >= 0) * (uv[:, 0] < self.width) * (uv[:, 1] < self.height)
|
| 177 |
+
uv = uv[uv_inl]
|
| 178 |
+
uvs.append(uv)
|
| 179 |
+
|
| 180 |
+
edges = []
|
| 181 |
+
thresholds_min_mean = {0 : [5, 7], 1 : [15, 25], 2: [30, 1000]}
|
| 182 |
+
for i in range(pyt_centers.shape[0]):
|
| 183 |
+
for j in range(i+1, pyt_centers.shape[0]):
|
| 184 |
+
etype = (self.is_apex[i] + self.is_apex[j])
|
| 185 |
+
|
| 186 |
+
points_inter = pyt_centers[i][None] + torch.linspace(0, 1, 20)[:, None].to(self.device) * (pyt_centers[j][None] - pyt_centers[i][None])
|
| 187 |
+
min_mean_dist = 1000
|
| 188 |
+
all_dists = []
|
| 189 |
+
best_ki = -1
|
| 190 |
+
best_uvi = -1
|
| 191 |
+
for ki in range(N):
|
| 192 |
+
cki = self.gestalt_to_colmap_cams[ki]
|
| 193 |
+
|
| 194 |
+
if not ( (cki in center_visibility[i]) or (cki in center_visibility[j]) ):
|
| 195 |
+
continue
|
| 196 |
+
|
| 197 |
+
uvi = torch.round(self.pyt_cameras[ki].transform_points(points_inter)[:, :2]).cpu().numpy().astype(int)
|
| 198 |
+
if (uvi <= 0).any() or (uvi[:,0] >= self.width).any() or (uvi[:,1] >= self.height).any():
|
| 199 |
+
continue
|
| 200 |
+
mean_dist = edge_dists[ki][etype][uvi[:,1], uvi[:,0]].mean()
|
| 201 |
+
all_dists.append(mean_dist)
|
| 202 |
+
if mean_dist < min_mean_dist:
|
| 203 |
+
min_mean_dist = mean_dist
|
| 204 |
+
best_ki = ki
|
| 205 |
+
best_uvi = uvi
|
| 206 |
+
|
| 207 |
+
if best_ki == -1:
|
| 208 |
+
continue
|
| 209 |
+
ths = thresholds_min_mean[etype]
|
| 210 |
+
if min_mean_dist < ths[0] and np.mean(all_dists) < ths[1]:
|
| 211 |
+
edges.append((i,j))
|
| 212 |
+
if len(edges) == 0:
|
| 213 |
+
edges.append((0, 0))
|
| 214 |
+
return edges
|
| 215 |
+
|
| 216 |
+
|
| 217 |
def solve(self, entry, visualize=False):
|
| 218 |
human_entry = convert_entry_to_human_readable(entry)
|
| 219 |
self.human_entry = human_entry
|
| 220 |
self.process_vertices()
|
| 221 |
vertices = self.vertices_aug
|
| 222 |
+
edges = self.process_edges()
|
| 223 |
|
| 224 |
if visualize:
|
| 225 |
from hoho.viz3d import plot_estimate_and_gt
|
| 226 |
plot_estimate_and_gt(vertices, [(0,0)], self.human_entry['wf_vertices'], self.human_entry['wf_edges'])
|
| 227 |
|
| 228 |
+
return vertices, edges
|
| 229 |
|
| 230 |
|
| 231 |
|
my_solution.py
CHANGED
|
@@ -36,8 +36,7 @@ def predict(entry, visualize=False) -> Tuple[np.ndarray, List[int]]:
|
|
| 36 |
# return (entry['__key__'], *my_empty_solution())
|
| 37 |
vertices0, edges0 = my_empty_solution()
|
| 38 |
try:
|
| 39 |
-
vertices = GeomSolver().solve(entry)
|
| 40 |
-
edges = edges0
|
| 41 |
except:
|
| 42 |
print('ERROR')
|
| 43 |
vertices, edges = vertices0, edges0
|
|
|
|
| 36 |
# return (entry['__key__'], *my_empty_solution())
|
| 37 |
vertices0, edges0 = my_empty_solution()
|
| 38 |
try:
|
| 39 |
+
vertices, edges = GeomSolver().solve(entry)
|
|
|
|
| 40 |
except:
|
| 41 |
print('ERROR')
|
| 42 |
vertices, edges = vertices0, edges0
|
testing.ipynb
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|