Dynam3D / data /scene_datasets /ARKitScenes /generate_pcd.py
MrZihanWang's picture
Add files using upload-large-folder tool
fb6f9ba verified
import open3d as o3d
import matplotlib.pyplot as plt
import numpy as np
import os
import math
import torch
from PIL import Image
import random
from einops import einsum
import cv2
@torch.no_grad()
def get_frustum_mask(points, H, W, intrinsics, view_matrices, near = 0.02, far = 10.):
ones = torch.ones_like(points[:, 0]).unsqueeze(-1)
homo_points = torch.cat([points, ones], dim=-1)
view_points = einsum(view_matrices, homo_points, "b c, N c -> N b")
view_points = view_points[:, :3]
uv_points = einsum(intrinsics, view_points, "b c, N c -> N b")
z = uv_points[:, -1:]
uv_points = uv_points[:, :2] / z
u, v = uv_points[:, 0], uv_points[:, 1]
depth = view_points[:, -1]
cull_near_fars = (depth >= near) & (depth <= far)
mask = cull_near_fars & (u >= 0) & (u <= W-1) & (v >= 0) & (v <= H-1)
return mask
# from ARKitScene code base
def convert_angle_axis_to_matrix3(angle_axis):
"""Return a Matrix3 for the angle axis.
Arguments:
angle_axis {Point3} -- a rotation in angle axis form.
"""
matrix, jacobian = cv2.Rodrigues(angle_axis)
return matrix
# from ARKit Scene, some with modifications
def TrajStringToMatrix(traj_str):
""" convert traj_str into translation and rotation matrices
Args:
traj_str: A space-delimited file where each line represents a camera position at a particular timestamp.
The file has seven columns:
* Column 1: timestamp
* Columns 2-4: rotation (axis-angle representation in radians)
* Columns 5-7: translation (usually in meters)
Returns:
Rt: rotation matrix, translation matrix
"""
tokens = traj_str.split()
assert len(tokens) == 7
# Rotation in angle axis
angle_axis = [float(tokens[1]), float(tokens[2]), float(tokens[3])]
r_w_to_p = convert_angle_axis_to_matrix3(np.asarray(angle_axis))
# Translation
t_w_to_p = np.asarray([float(tokens[4]), float(tokens[5]), float(tokens[6])])
extrinsics = np.eye(4, 4)
extrinsics[:3, :3] = r_w_to_p
extrinsics[:3, -1] = t_w_to_p
Rt = np.linalg.inv(extrinsics)
return Rt
def st2_camera_intrinsics(filename):
w, h, fx, fy, hw, hh = np.loadtxt(filename)
return np.asarray([[fx, 0, hw], [0, fy, hh], [0, 0, 1]])
def config_parser():
import configargparse
parser = configargparse.ArgumentParser()
# training options
parser.add_argument("--near", type=float, default=0.,
help='near distance')
parser.add_argument("--far", type=float, default=10.,
help='far distance')
parser.add_argument("--camera_height", type=int, default=24,
help='height of the feature map')
parser.add_argument("--camera_width", type=int, default=24,
help='width of the feature map')
parser.add_argument("--feature_fields_search_radius", type=float, default=1.,
help='search radius for near features')
parser.add_argument("--feature_fields_search_num", type=int, default=4,
help='The number of searched near features')
parser.add_argument("--mlp_net_layers", type=int, default=8,
help='layers in mlp network')
parser.add_argument("--mlp_net_width", type=int, default=768,
help='channels per layer in mlp net')
# rendering options
parser.add_argument("--N_samples", type=int, default=512,
help='number of coarse samples per ray')
parser.add_argument("--N_importance", type=int, default=16,
help='number of fine samples per ray')
return parser
parser = config_parser()
args, unknown = parser.parse_known_args() #parser.parse_args()
scene_list = os.listdir('3dod/Training')
image_list = []
for scene_id in scene_list:
image_path = '3dod/Training/'+scene_id+'/'+scene_id+'_frames/lowres_wide'
image_list = os.listdir(image_path)
image_list.sort()
extrinsic_file = '3dod/Training/'+scene_id+'/'+scene_id+'_frames/lowres_wide.traj'
with open(extrinsic_file, 'r') as file:
extrinsic_list = [line.strip() for line in file]
image_ids = [i for i in range(len(image_list))]
random.shuffle(image_ids)
image_ids = image_ids[:30]
image_list = [image_path+'/'+image_list[i] for i in image_ids]
extrinsic_list = [extrinsic_list[i] for i in image_ids]
#image_list = random.sample(image_list,min(30,len(image_list)))
pcd_all = o3d.geometry.PointCloud()
extrinsic_id = 0
for image_path in image_list:
intrinsic_file = '3dod/Training/'+scene_id+'/'+scene_id+'_frames'+'/lowres_wide_intrinsics/' + image_path.split('/')[-1][:-4]+'.pincam'
with open(intrinsic_file, 'r') as file:
intrinsic_raw = [line.split() for line in file]
intrinsic = st2_camera_intrinsics(intrinsic_raw[0])
extrinsic = TrajStringToMatrix(extrinsic_list[extrinsic_id])
extrinsic_id += 1
R = extrinsic[:3,:3]
T = extrinsic[:3,3:4]
color_raw = o3d.io.read_image(image_path)
depth_raw = o3d.io.read_image(image_path.replace("lowres_wide","lowres_depth"))
#rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(color_raw, depth_raw, depth_scale=1000.0, depth_trunc=1000.0, convert_rgb_to_intensity=False)
#pcd = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd_image,o3d.camera.PinholeCameraIntrinsic(256,192,intrinsic[0][0],intrinsic[1][1],intrinsic[0][2],intrinsic[1][2]))
pcd = o3d.geometry.PointCloud.create_from_depth_image(depth_raw, o3d.camera.PinholeCameraIntrinsic(256,192,intrinsic[0][0],intrinsic[1][1],intrinsic[0][2],intrinsic[1][2]), depth_scale=1000.0, depth_trunc=1000.0)
points = np.asarray(pcd.points)
points = (R @ points.T + T).T
pcd.points = o3d.utility.Vector3dVector(points)
pcd_all += pcd
#o3d.visualization.draw_geometries([pcd_all])
#mask = get_frustum_mask(torch.tensor(np.array(pcd_all.points)), 240, 320, intrinsic[:3,:3], np.linalg.inv(extrinsic), near = 0.02, far = 10.)
o3d.visualization.draw_geometries([pcd_all])