Datasets:

DOI:
License:
ToF-360 / assets /preprocessing /depth2normal.py
kanayamaHideaki's picture
Add semantics, instances, layout_eval, preprocessing and modifying README.md.
324d4da
# common
import numpy as np
import open3d as o3d
import os
import glob
import argparse
import yaml
import cv2
from tqdm import tqdm
def config_setup():
config = {}
config["home_param"] = "<scene>/"
config["depth_max"] = 10
config["depth_min"] = 0.5
return config
def load_depth(depth_path, config):
# depth follows stanford-2D-3D-S (URL: http://buildingparser.stanford.edu/images/2D-3D-S_2017.pdf)
# one unit of change in pixel intensity (e.g. a value from 45 to 46) corresponds to a 1/512 m change in depth (16 bit)
depth_img = cv2.imread(depth_path, cv2.IMREAD_ANYDEPTH) / 512
# depth is defined from depth_min to depth_max
depth_img[depth_img > config["depth_max"]] = config["depth_max"]
depth_img[depth_img < config["depth_min"]] = 0
return depth_img
def equi2pcd(depth):
# -----------------------------------------
# image: [h,w,c], range:[0-255]
# depth: [h,w], unit:meter
# -----------------------------------------
H,W = depth.shape
# intrinsic parameter
int_mtx = np.array([[max(H, W), 0, W/2], [0, max(H, W), H/2], [0, 0, 1]])
if int_mtx.max() > 1:
int_mtx[0, :] = int_mtx[0, :] / float(W)
int_mtx[1, :] = int_mtx[1, :] / float(H)
int_mtx_pix = int_mtx * np.array([[W], [H], [1.]])
int_mtx_pix = int_mtx * np.array([[W], [H], [1.]])
cam_param_pix_inv = np.linalg.inv(int_mtx_pix)
k_00, k_02, k_11, k_12 = cam_param_pix_inv[0, 0], cam_param_pix_inv[0, 2], \
cam_param_pix_inv[1, 1], cam_param_pix_inv[1, 2]
# Convert from meshgrid of depth images to xyz 3D coordinates
xyz = np.zeros((H*W,3))
sx = np.arange(H).repeat(W)
sy = np.arange(W)[None,:].repeat(H,axis=0).reshape(-1)
sd = depth.reshape(-1)
yaw = 2 * np.pi * ((sy+0.5) * k_00 + k_02) # yaw:-π~πの範囲に変換
pitch = 2 * np.pi * ((sx+0.5) * k_11 + k_12) # pitch:-π/2~π/2の範囲に変換
xyz[:,0] = np.cos(pitch) * np.sin(yaw) * abs(sd)
xyz[:,1] = np.sin(pitch) * abs(sd)
xyz[:,2] = np.cos(pitch) * np.cos(yaw) * abs(sd)
# Stored as Open3d pointcloud
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(xyz)
return pcd
def pcd2normalimg(pcd, depth):
# --------------------------------
# Normals are calculated on pointcloud
# --------------------------------
H, W = depth.shape
pcd.estimate_normals()
normal = np.asarray(pcd.normals)
normal = normal_align(normal, pcd)
# stanford-2D-3D-S adapts left-handed system but open3d follows right-handed system
normal[:,2] *= -1
return normal
def normal_align(normal, pcd):
# --------------------------------
# All of the vector are aligned with the direction to the camera
# --------------------------------
points = np.asarray(pcd.points)
vec2cam = np.array([0,0,0])[None,:].repeat(points.shape[0], axis=0) - points
direction = np.sum(np.multiply(vec2cam, normal),axis=1) < 0
normal[direction, :] *= -1
return normal
def main():
config = config_setup()
print("home_path:", config["home_param"])
save_folder_path = config["home_param"] + "normal/"
if not os.path.exists(save_folder_path):
os.mkdir(save_folder_path)
# Search depth images defined as png
depth_paths = sorted(glob.glob(config["home_param"] + "depth/*.png"))
for idx, depth_path in tqdm(enumerate(depth_paths)):
print("\n")
print("depth file:", depth_path.split("/")[-1])
depth = load_depth(depth_path, config)
# To reduce the calculation costs
H, W = (int(depth.shape[0]/4), int(depth.shape[1]/4))
depth_img = cv2.resize(depth, (W, H), interpolation=cv2.INTER_NEAREST)
pcd = equi2pcd(depth_img, config)
normal = pcd2normalimg(pcd, depth_img)
# To visualize the normal as colored pointcloud (It contains normal information itself)
pcd.normals = o3d.utility.Vector3dVector(normal)
pcd.colors = o3d.utility.Vector3dVector((normal+1)/2)
# o3d.io.write_point_cloud(save_folder_path + f"{idx:03d}_" + "equi_normal.ply", pcd)
save_path = save_folder_path + f"{idx:03d}_" + "equi_normal.png"
print("output image:", save_path.split("/")[-1])
# 8 bit value centered at 127.5 (-1~1 >> 0~255)
normal_img = 127.5*(normal.reshape(H,W,3)+1.)
# outlier, blank
normal_img[depth_img<config["depth_min"], :] = [128,128,128]
img_color = cv2.resize(cv2.cvtColor(normal_img.astype(np.uint8), cv2.COLOR_RGB2BGR), (depth.shape[1], depth.shape[0]), interpolation=cv2.INTER_NEAREST)
cv2.imwrite(save_path, img_color)
if __name__ == "__main__":
main()