|
|
import open3d as o3d |
|
|
import matplotlib.pyplot as plt |
|
|
import numpy as np |
|
|
import os |
|
|
import math |
|
|
import torch |
|
|
from PIL import Image |
|
|
import random |
|
|
from einops import einsum |
|
|
|
|
|
|
|
|
@torch.no_grad() |
|
|
def get_frustum_mask(points, H, W, intrinsics, view_matrices, near = 0.02, far = 10.): |
|
|
|
|
|
ones = torch.ones_like(points[:, 0]).unsqueeze(-1) |
|
|
homo_points = torch.cat([points, ones], dim=-1) |
|
|
|
|
|
view_points = einsum(view_matrices, homo_points, "b c, N c -> N b") |
|
|
view_points = view_points[:, :3] |
|
|
|
|
|
uv_points = einsum(intrinsics, view_points, "b c, N c -> N b") |
|
|
|
|
|
z = uv_points[:, -1:] |
|
|
uv_points = uv_points[:, :2] / z |
|
|
u, v = uv_points[:, 0], uv_points[:, 1] |
|
|
depth = view_points[:, -1] |
|
|
|
|
|
cull_near_fars = (depth >= near) & (depth <= far) |
|
|
|
|
|
mask = cull_near_fars & (u >= 0) & (u <= W-1) & (v >= 0) & (v <= H-1) |
|
|
return mask |
|
|
|
|
|
|
|
|
def config_parser(): |
|
|
|
|
|
import configargparse |
|
|
parser = configargparse.ArgumentParser() |
|
|
|
|
|
|
|
|
|
|
|
parser.add_argument("--near", type=float, default=0., |
|
|
help='near distance') |
|
|
parser.add_argument("--far", type=float, default=10., |
|
|
help='far distance') |
|
|
parser.add_argument("--camera_height", type=int, default=24, |
|
|
help='height of the feature map') |
|
|
parser.add_argument("--camera_width", type=int, default=24, |
|
|
help='width of the feature map') |
|
|
parser.add_argument("--feature_fields_search_radius", type=float, default=1., |
|
|
help='search radius for near features') |
|
|
parser.add_argument("--feature_fields_search_num", type=int, default=4, |
|
|
help='The number of searched near features') |
|
|
parser.add_argument("--mlp_net_layers", type=int, default=8, |
|
|
help='layers in mlp network') |
|
|
parser.add_argument("--mlp_net_width", type=int, default=768, |
|
|
help='channels per layer in mlp net') |
|
|
|
|
|
|
|
|
parser.add_argument("--N_samples", type=int, default=512, |
|
|
help='number of coarse samples per ray') |
|
|
parser.add_argument("--N_importance", type=int, default=16, |
|
|
help='number of fine samples per ray') |
|
|
|
|
|
return parser |
|
|
|
|
|
|
|
|
parser = config_parser() |
|
|
args, unknown = parser.parse_known_args() |
|
|
|
|
|
camera_intrinsic = np.eye(4) |
|
|
with open('scannet_train_images/frames_square/scene0000_00/intrinsic_depth.txt', 'r') as file: |
|
|
numbers = [line.strip() for line in file] |
|
|
for i in range(4): |
|
|
for j in range(4): |
|
|
camera_intrinsic[i][j] = float(numbers[i].split()[j]) |
|
|
camera_intrinsic[0][0] *= args.camera_width / 320 |
|
|
camera_intrinsic[1][1] *= args.camera_height / 240 |
|
|
|
|
|
N_spacing = (args.far - args.near) / args.N_samples |
|
|
sampled_points = o3d.geometry.PointCloud() |
|
|
for N_index in range(args.N_samples): |
|
|
N_distance = args.near + N_spacing * (N_index+1) |
|
|
N_depth = np.full((args.camera_height,args.camera_width),N_distance,dtype=np.float32) |
|
|
N_depth = o3d.geometry.Image(N_depth) |
|
|
N_points = o3d.geometry.PointCloud.create_from_depth_image(N_depth, o3d.camera.PinholeCameraIntrinsic(args.camera_width,args.camera_height,camera_intrinsic[0][0]/2.,camera_intrinsic[1][1]/2.,args.camera_width/2,args.camera_height/2), depth_scale=1., depth_trunc=1.) |
|
|
sampled_points += N_points |
|
|
points_along_rays = o3d.geometry.PointCloud() |
|
|
points_along_rays += sampled_points |
|
|
|
|
|
scene_list = [] |
|
|
for i in range(800): |
|
|
path = 'scannet_train_images/frames_square/' |
|
|
scene = 'scene'+str(i).rjust(4, "0")+'_00/' |
|
|
scene_list.append(path+scene) |
|
|
|
|
|
for scene_id in scene_list: |
|
|
image_list = [] |
|
|
for image_id in range(1000): |
|
|
image_id = image_id * 20 |
|
|
image_path = scene_id + 'color/' + str(image_id) + ".jpg" |
|
|
if not os.path.exists(image_path): |
|
|
break |
|
|
image_list.append(str(image_id)) |
|
|
|
|
|
image_list = image_list[:30] |
|
|
|
|
|
target_image = random.choice(image_list) |
|
|
pcd_all = o3d.geometry.PointCloud() |
|
|
for image_id in image_list: |
|
|
intrinsic = np.eye(4) |
|
|
with open(scene_id + 'intrinsic_depth.txt', 'r') as file: |
|
|
intrinsic_raw = [line.strip() for line in file] |
|
|
for i in range(4): |
|
|
for j in range(4): |
|
|
intrinsic[i][j] = float(intrinsic_raw[i].split()[j]) |
|
|
|
|
|
extrinsic = np.eye(4) |
|
|
with open(scene_id + 'pose/' + image_id + '.txt', 'r') as file: |
|
|
extrinsic_raw = [line.strip() for line in file] |
|
|
|
|
|
for i in range(4): |
|
|
for j in range(4): |
|
|
extrinsic[i][j] = float(extrinsic_raw[i].split()[j]) |
|
|
|
|
|
R = extrinsic[:3,:3] |
|
|
T = extrinsic[:3,3:4] |
|
|
if image_id == target_image: |
|
|
points = np.asarray(sampled_points.points) |
|
|
points = (R @ points.T + T).T |
|
|
points_along_rays.points = o3d.utility.Vector3dVector(points) |
|
|
continue |
|
|
|
|
|
color_raw = o3d.io.read_image(scene_id + 'color/' + image_id + ".jpg") |
|
|
depth_raw = o3d.io.read_image(scene_id + 'depth/' + image_id + ".png") |
|
|
rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(color_raw, depth_raw, depth_scale=1000.0, depth_trunc=1000.0, convert_rgb_to_intensity=False) |
|
|
|
|
|
|
|
|
intrinsic[0][0],intrinsic[1][1],intrinsic[0][2],intrinsic[1][2] = intrinsic[0][0]/2,intrinsic[1][1]/2,intrinsic[0][2]/2,intrinsic[1][2]/2 |
|
|
|
|
|
pcd = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd_image,o3d.camera.PinholeCameraIntrinsic(320,240,intrinsic[0][0],intrinsic[1][1],intrinsic[0][2],intrinsic[1][2])) |
|
|
|
|
|
|
|
|
points = np.asarray(pcd.points) |
|
|
|
|
|
points = (R @ points.T + T).T |
|
|
pcd.points = o3d.utility.Vector3dVector(points) |
|
|
pcd_all += pcd |
|
|
|
|
|
mask = get_frustum_mask(torch.tensor(np.array(pcd_all.points)), 240, 320, intrinsic[:3,:3], np.linalg.inv(extrinsic), near = 0.02, far = 10.) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
o3d.visualization.draw_geometries([pcd_all]) |