File size: 6,432 Bytes
fb6f9ba
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
import open3d as o3d
import matplotlib.pyplot as plt
import numpy as np
import os
import math
import torch
from PIL import Image
import random
from einops import einsum


@torch.no_grad()
def get_frustum_mask(points, H, W, intrinsics, view_matrices, near = 0.02, far = 10.):

    ones = torch.ones_like(points[:, 0]).unsqueeze(-1)
    homo_points = torch.cat([points, ones], dim=-1)

    view_points = einsum(view_matrices, homo_points, "b c, N c -> N b")
    view_points = view_points[:, :3]

    uv_points = einsum(intrinsics, view_points, "b c, N c -> N b")

    z = uv_points[:, -1:]
    uv_points = uv_points[:, :2] / z
    u, v = uv_points[:, 0], uv_points[:, 1]
    depth = view_points[:, -1]
    
    cull_near_fars = (depth >= near) & (depth <= far)

    mask = cull_near_fars & (u >= 0) & (u <= W-1) & (v >= 0) & (v <= H-1)
    return mask


def config_parser():

    import configargparse
    parser = configargparse.ArgumentParser()


    # training options
    parser.add_argument("--near", type=float, default=0., 
                        help='near distance')
    parser.add_argument("--far", type=float, default=10., 
                        help='far distance')
    parser.add_argument("--camera_height", type=int, default=24, 
                        help='height of the feature map')
    parser.add_argument("--camera_width", type=int, default=24, 
                        help='width of the feature map')
    parser.add_argument("--feature_fields_search_radius", type=float, default=1., 
                        help='search radius for near features')
    parser.add_argument("--feature_fields_search_num", type=int, default=4, 
                        help='The number of searched near features')
    parser.add_argument("--mlp_net_layers", type=int, default=8, 
                        help='layers in mlp network')
    parser.add_argument("--mlp_net_width", type=int, default=768, 
                        help='channels per layer in mlp net')

    # rendering options
    parser.add_argument("--N_samples", type=int, default=512, 
                        help='number of coarse samples per ray')
    parser.add_argument("--N_importance", type=int, default=16,
                        help='number of fine samples per ray')

    return parser


parser = config_parser()
args, unknown = parser.parse_known_args() #parser.parse_args()  

camera_intrinsic = np.eye(4)
with open('scannet_train_images/frames_square/scene0000_00/intrinsic_depth.txt', 'r') as file:  
    numbers = [line.strip() for line in file]
for i in range(4):  
    for j in range(4): 
        camera_intrinsic[i][j] = float(numbers[i].split()[j])
camera_intrinsic[0][0] *= args.camera_width / 320
camera_intrinsic[1][1] *= args.camera_height / 240

N_spacing = (args.far - args.near) / args.N_samples
sampled_points = o3d.geometry.PointCloud()
for N_index in range(args.N_samples):
    N_distance = args.near + N_spacing * (N_index+1)
    N_depth = np.full((args.camera_height,args.camera_width),N_distance,dtype=np.float32)
    N_depth = o3d.geometry.Image(N_depth)
    N_points = o3d.geometry.PointCloud.create_from_depth_image(N_depth, o3d.camera.PinholeCameraIntrinsic(args.camera_width,args.camera_height,camera_intrinsic[0][0]/2.,camera_intrinsic[1][1]/2.,args.camera_width/2,args.camera_height/2), depth_scale=1., depth_trunc=1.)
    sampled_points += N_points
points_along_rays = o3d.geometry.PointCloud()
points_along_rays += sampled_points

scene_list = []
for i in range(800):
    path = 'scannet_train_images/frames_square/'
    scene = 'scene'+str(i).rjust(4, "0")+'_00/'    
    scene_list.append(path+scene)

for scene_id in scene_list:
    image_list = []
    for image_id in range(1000):
        image_id = image_id * 20   
        image_path = scene_id + 'color/' + str(image_id) + ".jpg"
        if not os.path.exists(image_path):
            break
        image_list.append(str(image_id))

    image_list = image_list[:30]
    #image_list = random.sample(image_list,min(30,len(image_list)))
    target_image = random.choice(image_list)
    pcd_all = o3d.geometry.PointCloud() 
    for image_id in image_list:
        intrinsic = np.eye(4)
        with open(scene_id + 'intrinsic_depth.txt', 'r') as file:  
            intrinsic_raw = [line.strip() for line in file]
        for i in range(4):  
            for j in range(4): 
                intrinsic[i][j] = float(intrinsic_raw[i].split()[j])

        extrinsic = np.eye(4)
        with open(scene_id + 'pose/' + image_id + '.txt', 'r') as file:  
            extrinsic_raw = [line.strip() for line in file]
 
        for i in range(4):  
            for j in range(4): 
                extrinsic[i][j] = float(extrinsic_raw[i].split()[j])

        R = extrinsic[:3,:3]
        T = extrinsic[:3,3:4]
        if image_id == target_image:
            points = np.asarray(sampled_points.points)
            points = (R @ points.T + T).T
            points_along_rays.points = o3d.utility.Vector3dVector(points)
            continue

        color_raw = o3d.io.read_image(scene_id + 'color/' + image_id + ".jpg")
        depth_raw = o3d.io.read_image(scene_id + 'depth/' + image_id + ".png")
        rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(color_raw, depth_raw, depth_scale=1000.0, depth_trunc=1000.0, convert_rgb_to_intensity=False)   

        # modify the intrinsic, because the image resolution is changed
        intrinsic[0][0],intrinsic[1][1],intrinsic[0][2],intrinsic[1][2] = intrinsic[0][0]/2,intrinsic[1][1]/2,intrinsic[0][2]/2,intrinsic[1][2]/2

        pcd = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd_image,o3d.camera.PinholeCameraIntrinsic(320,240,intrinsic[0][0],intrinsic[1][1],intrinsic[0][2],intrinsic[1][2]))
        #pcd = o3d.geometry.PointCloud.create_from_depth_image(depth_raw, o3d.camera.PinholeCameraIntrinsic(320,240,intrinsic[0][0],intrinsic[1][1],intrinsic[0][2],intrinsic[1][2]), depth_scale=1000.0, depth_trunc=1000.0)

        points = np.asarray(pcd.points)
        
        points = (R @ points.T + T).T
        pcd.points = o3d.utility.Vector3dVector(points)
        pcd_all += pcd
        #o3d.visualization.draw_geometries([pcd_all])
        mask = get_frustum_mask(torch.tensor(np.array(pcd_all.points)), 240, 320, intrinsic[:3,:3], np.linalg.inv(extrinsic), near = 0.02, far = 10.)
        
        #exit()
    #pcd_all += points_along_rays
    
    
    o3d.visualization.draw_geometries([pcd_all])