ADE category statistics
Browse files- handcrafted_solution.py +104 -6
handcrafted_solution.py
CHANGED
|
@@ -121,6 +121,94 @@ def get_uv_depth(vertices, depth):
|
|
| 121 |
vertex_depth = depth[(uv_int[:, 1] , uv_int[:, 0])]
|
| 122 |
return uv, vertex_depth
|
| 123 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 124 |
def get_uv_dept_category(vertices, depth, ade_seg):
|
| 125 |
'''Get the depth of the vertices from the depth image'''
|
| 126 |
uv = []
|
|
@@ -133,11 +221,19 @@ def get_uv_dept_category(vertices, depth, ade_seg):
|
|
| 133 |
uv_int[:, 1] = np.clip( uv_int[:, 1], 0, H-1)
|
| 134 |
vertex_depth = depth[(uv_int[:, 1] , uv_int[:, 0])]
|
| 135 |
vertex_category = ade_seg[(uv_int[:, 1] , uv_int[:, 0])]
|
| 136 |
-
|
| 137 |
-
filter_ind = [i for i, ele in enumerate(vertex_category) if ele in
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 138 |
print(f'retain {len(filter_ind)} idx')
|
| 139 |
print(vertex_category[filter_ind])
|
| 140 |
-
|
|
|
|
|
|
|
|
|
|
| 141 |
|
| 142 |
def merge_vertices_3d(vert_edge_per_image, th=0.1):
|
| 143 |
'''Merge vertices that are close to each other in 3D space and are of same types'''
|
|
@@ -232,13 +328,15 @@ def predict(entry, visualize=False) -> Tuple[np.ndarray, List[int]]:
|
|
| 232 |
gest_seg_np = np.array(gest_seg).astype(np.uint8)
|
| 233 |
# Metric3D
|
| 234 |
depth_np = np.array(depth) / 2.5 # 2.5 is the scale estimation coefficient
|
| 235 |
-
vertices, connections = get_vertices_and_edges_from_segmentation(gest_seg_np, edge_th = 20.)
|
|
|
|
|
|
|
| 236 |
if (len(vertices) < 2) or (len(connections) < 1):
|
| 237 |
print (f'Not enough vertices or connections in image {i}')
|
| 238 |
vert_edge_per_image[i] = np.empty((0, 2)), [], np.empty((0, 3))
|
| 239 |
continue
|
| 240 |
-
|
| 241 |
-
uv, depth_vert, ade_category_vert = get_uv_dept_category(vertices, depth_np, ade_seg_np)
|
| 242 |
# Normalize the uv to the camera intrinsics
|
| 243 |
xy_local = np.ones((len(uv), 3))
|
| 244 |
xy_local[:, 0] = (uv[:, 0] - K[0,2]) / K[0,0]
|
|
|
|
| 121 |
vertex_depth = depth[(uv_int[:, 1] , uv_int[:, 0])]
|
| 122 |
return uv, vertex_depth
|
| 123 |
|
| 124 |
+
def get_vertices_and_edges_from_two_segmentations(ade_seg_np, gest_seg_np, edge_th = 50.0):
|
| 125 |
+
'''Get the vertices and edges from the gestalt segmentation mask of the house'''
|
| 126 |
+
vertices = []
|
| 127 |
+
connections = []
|
| 128 |
+
# combined map from ade
|
| 129 |
+
print(gest_seg_np.shape, ade_seg_np.shape)
|
| 130 |
+
ade_color0 = np.array([0,0,0])
|
| 131 |
+
ade_mask0 = cv2.inRange(ade_seg_np, ade_color0-0.5, ade_color0+0.5)
|
| 132 |
+
ade_color1 = np.array([120,120,120])
|
| 133 |
+
ade_mask1 = cv2.inRange(ade_seg_np, ade_color1-0.5, ade_color1+0.5)
|
| 134 |
+
ade_color2 = np.array([180,120,120])
|
| 135 |
+
ade_mask2 = cv2.inRange(ade_seg_np, ade_color2-0.5, ade_color2+0.5)
|
| 136 |
+
ade_color3 = np.array([255,9,224])
|
| 137 |
+
ade_mask3 = cv2.inRange(ade_seg_np, ade_color3-0.5, ade_color3+0.5)
|
| 138 |
+
ade_mask = cv2.bitwise_or(ade_mask3, ade_mask2)
|
| 139 |
+
ade_mask = cv2.bitwise_or(ade_mask1, ade_mask)
|
| 140 |
+
print(ade_mask.any())
|
| 141 |
+
# Apex
|
| 142 |
+
apex_color = np.array(gestalt_color_mapping['apex'])
|
| 143 |
+
apex_mask = cv2.inRange(gest_seg_np, apex_color-0.5, apex_color+0.5)
|
| 144 |
+
apex_mask = cv2.bitwise_and(apex_mask, ade_mask)
|
| 145 |
+
if apex_mask.sum() > 0:
|
| 146 |
+
output = cv2.connectedComponentsWithStats(apex_mask, 8, cv2.CV_32S)
|
| 147 |
+
(numLabels, labels, stats, centroids) = output
|
| 148 |
+
stats, centroids = stats[1:], centroids[1:]
|
| 149 |
+
|
| 150 |
+
for i in range(numLabels-1):
|
| 151 |
+
vert = {"xy": centroids[i], "type": "apex"}
|
| 152 |
+
vertices.append(vert)
|
| 153 |
+
|
| 154 |
+
eave_end_color = np.array(gestalt_color_mapping['eave_end_point'])
|
| 155 |
+
eave_end_mask = cv2.inRange(gest_seg_np, eave_end_color-0.5, eave_end_color+0.5)
|
| 156 |
+
eave_end_mask = cv2.bitwise_and(eave_end_mask, ade_mask)
|
| 157 |
+
if eave_end_mask.sum() > 0:
|
| 158 |
+
output = cv2.connectedComponentsWithStats(eave_end_mask, 8, cv2.CV_32S)
|
| 159 |
+
(numLabels, labels, stats, centroids) = output
|
| 160 |
+
stats, centroids = stats[1:], centroids[1:]
|
| 161 |
+
|
| 162 |
+
for i in range(numLabels-1):
|
| 163 |
+
vert = {"xy": centroids[i], "type": "eave_end_point"}
|
| 164 |
+
vertices.append(vert)
|
| 165 |
+
|
| 166 |
+
print(f'{len(vertices)} vertices detected')
|
| 167 |
+
# Connectivity
|
| 168 |
+
apex_pts = []
|
| 169 |
+
apex_pts_idxs = []
|
| 170 |
+
for j, v in enumerate(vertices):
|
| 171 |
+
apex_pts.append(v['xy'])
|
| 172 |
+
apex_pts_idxs.append(j)
|
| 173 |
+
apex_pts = np.array(apex_pts)
|
| 174 |
+
|
| 175 |
+
# Ridge connects two apex points
|
| 176 |
+
for edge_class in ['eave', 'ridge', 'rake', 'valley']:
|
| 177 |
+
edge_color = np.array(gestalt_color_mapping[edge_class])
|
| 178 |
+
mask = cv2.morphologyEx(cv2.inRange(gest_seg_np,
|
| 179 |
+
edge_color-0.5,
|
| 180 |
+
edge_color+0.5),
|
| 181 |
+
cv2.MORPH_DILATE, np.ones((11, 11)))
|
| 182 |
+
line_img = np.copy(gest_seg_np) * 0
|
| 183 |
+
if mask.sum() > 0:
|
| 184 |
+
output = cv2.connectedComponentsWithStats(mask, 8, cv2.CV_32S)
|
| 185 |
+
(numLabels, labels, stats, centroids) = output
|
| 186 |
+
stats, centroids = stats[1:], centroids[1:]
|
| 187 |
+
edges = []
|
| 188 |
+
for i in range(1, numLabels):
|
| 189 |
+
y,x = np.where(labels == i)
|
| 190 |
+
xleft_idx = np.argmin(x)
|
| 191 |
+
x_left = x[xleft_idx]
|
| 192 |
+
y_left = y[xleft_idx]
|
| 193 |
+
xright_idx = np.argmax(x)
|
| 194 |
+
x_right = x[xright_idx]
|
| 195 |
+
y_right = y[xright_idx]
|
| 196 |
+
edges.append((x_left, y_left, x_right, y_right))
|
| 197 |
+
cv2.line(line_img, (x_left, y_left), (x_right, y_right), (255, 255, 255), 2)
|
| 198 |
+
edges = np.array(edges)
|
| 199 |
+
if (len(apex_pts) < 2) or len(edges) <1:
|
| 200 |
+
continue
|
| 201 |
+
pts_to_edges_dist = np.minimum(cdist(apex_pts, edges[:,:2]), cdist(apex_pts, edges[:,2:]))
|
| 202 |
+
connectivity_mask = pts_to_edges_dist <= edge_th
|
| 203 |
+
edge_connects = connectivity_mask.sum(axis=0)
|
| 204 |
+
for edge_idx, edgesum in enumerate(edge_connects):
|
| 205 |
+
if edgesum>=2:
|
| 206 |
+
connected_verts = np.where(connectivity_mask[:,edge_idx])[0]
|
| 207 |
+
for a_i, a in enumerate(connected_verts):
|
| 208 |
+
for b in connected_verts[a_i+1:]:
|
| 209 |
+
connections.append((a, b))
|
| 210 |
+
return vertices, connections
|
| 211 |
+
|
| 212 |
def get_uv_dept_category(vertices, depth, ade_seg):
|
| 213 |
'''Get the depth of the vertices from the depth image'''
|
| 214 |
uv = []
|
|
|
|
| 221 |
uv_int[:, 1] = np.clip( uv_int[:, 1], 0, H-1)
|
| 222 |
vertex_depth = depth[(uv_int[:, 1] , uv_int[:, 0])]
|
| 223 |
vertex_category = ade_seg[(uv_int[:, 1] , uv_int[:, 0])]
|
| 224 |
+
target_color = set([(120,120,120), (180, 120, 120), (255,9,224)])
|
| 225 |
+
#filter_ind = [i for i, ele in enumerate(vertex_category) if tuple(ele) in target_color]
|
| 226 |
+
filter_ind = []
|
| 227 |
+
for i, ele in enumerate(vertex_category):
|
| 228 |
+
if tuple(ele) in target_color:
|
| 229 |
+
filter_ind.append(i)
|
| 230 |
+
|
| 231 |
print(f'retain {len(filter_ind)} idx')
|
| 232 |
print(vertex_category[filter_ind])
|
| 233 |
+
#print(vertices)
|
| 234 |
+
#print(filter_ind)
|
| 235 |
+
vertices = [vertices[i] for i in filter_ind]
|
| 236 |
+
return uv[filter_ind], vertex_depth[filter_ind], vertex_category[filter_ind], vertices
|
| 237 |
|
| 238 |
def merge_vertices_3d(vert_edge_per_image, th=0.1):
|
| 239 |
'''Merge vertices that are close to each other in 3D space and are of same types'''
|
|
|
|
| 328 |
gest_seg_np = np.array(gest_seg).astype(np.uint8)
|
| 329 |
# Metric3D
|
| 330 |
depth_np = np.array(depth) / 2.5 # 2.5 is the scale estimation coefficient
|
| 331 |
+
#vertices, connections = get_vertices_and_edges_from_segmentation(gest_seg_np, edge_th = 20.)
|
| 332 |
+
vertices, connections = get_vertices_and_edges_from_two_segmentations(ade_seg_np, gest_seg_np, edge_th = 20.)
|
| 333 |
+
|
| 334 |
if (len(vertices) < 2) or (len(connections) < 1):
|
| 335 |
print (f'Not enough vertices or connections in image {i}')
|
| 336 |
vert_edge_per_image[i] = np.empty((0, 2)), [], np.empty((0, 3))
|
| 337 |
continue
|
| 338 |
+
uv, depth_vert = get_uv_depth(vertices, depth_np)
|
| 339 |
+
#uv, depth_vert, ade_category_vert, vertices = get_uv_dept_category(vertices, depth_np, ade_seg_np)
|
| 340 |
# Normalize the uv to the camera intrinsics
|
| 341 |
xy_local = np.ones((len(uv), 3))
|
| 342 |
xy_local[:, 0] = (uv[:, 0] - K[0,2]) / K[0,0]
|