File size: 5,453 Bytes
2c68f56
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
import numpy as np
import torch
import torch.nn.functional as F
import os
import cv2



# borrowed from https://github.com/daniilidis-group/neural_renderer/blob/master/neural_renderer/vertices_to_faces.py
def face_vertices(vertices, faces):
    """ 
    :param vertices: [batch size, number of vertices, 3]
    :param faces: [batch size, number of faces, 3]
    :return: [batch size, number of faces, 3, 3]
    """
    assert (vertices.ndimension() == 3)
    assert (faces.ndimension() == 3)
    assert (vertices.shape[0] == faces.shape[0])
    assert (vertices.shape[2] == 3)
    assert (faces.shape[2] == 3)

    bs, nv = vertices.shape[:2]
    bs, nf = faces.shape[:2]
    device = vertices.device
    faces = faces + (torch.arange(bs, dtype=torch.int32).to(device) * nv)[:, None, None]
    vertices = vertices.reshape((bs * nv, 3))
    # pytorch only supports long and byte tensors for indexing
    return vertices[faces.long()]
    
def vertex_normals(vertices, faces):
    """
    :param vertices: [batch size, number of vertices, 3]
    :param faces: [batch size, number of faces, 3]
    :return: [batch size, number of vertices, 3]
    """
    assert (vertices.ndimension() == 3)
    assert (faces.ndimension() == 3)
    assert (vertices.shape[0] == faces.shape[0])
    assert (vertices.shape[2] == 3)
    assert (faces.shape[2] == 3)
    bs, nv = vertices.shape[:2]
    bs, nf = faces.shape[:2]
    device = vertices.device
    normals = torch.zeros(bs * nv, 3).to(device)

    faces = faces + (torch.arange(bs, dtype=torch.int32).to(device) * nv)[:, None, None] # expanded faces
    vertices_faces = vertices.reshape((bs * nv, 3))[faces.long()]

    faces = faces.reshape(-1, 3)
    vertices_faces = vertices_faces.reshape(-1, 3, 3)

    normals.index_add_(0, faces[:, 1].long(), 
                       torch.cross(vertices_faces[:, 2] - vertices_faces[:, 1], vertices_faces[:, 0] - vertices_faces[:, 1]))
    normals.index_add_(0, faces[:, 2].long(), 
                       torch.cross(vertices_faces[:, 0] - vertices_faces[:, 2], vertices_faces[:, 1] - vertices_faces[:, 2]))
    normals.index_add_(0, faces[:, 0].long(),
                       torch.cross(vertices_faces[:, 1] - vertices_faces[:, 0], vertices_faces[:, 2] - vertices_faces[:, 0]))

    normals = F.normalize(normals, eps=1e-6, dim=1)
    normals = normals.reshape((bs, nv, 3))
    # pytorch only supports long and byte tensors for indexing
    return normals

def batch_orth_proj(X, camera):
    ''' orthgraphic projection
        X:  3d vertices, [bz, n_point, 3]
        camera: scale and translation, [bz, 3], [scale, tx, ty]
    '''
    camera = camera.clone().view(-1, 1, 3)
    X_trans = X[:, :, :2] + camera[:, :, 1:]
    X_trans = torch.cat([X_trans, X[:,:,2:]], 2)
    Xn = (camera[:, :, 0:1] * X_trans)
    return Xn


from pytorch3d.renderer import look_at_view_transform 

def weak_cam2persp_cam(wcam, focal_length=12, z_dist=10):
    """_summary_

    Args:
        wcam (torch.Tensor): In shape Bx3, for each [s, x, y]
        focal_length (int, optional): perspective camera focal length. Defaults to 12.
        z_dist (int, optional): perspective camera at (0, 0, z). Defaults to 10.

    Returns:
        R, T: Rotation matrix and translation vector
    """
    bz = wcam.shape[0]
    R, T = look_at_view_transform(dist=z_dist, device=wcam.device)
    R = R.repeat(bz, 1, 1)
    T = T.repeat(bz, 1)
    T[:, 2] = focal_length / wcam[:, 0]
    T[:, 1] = wcam[:, 2]
    T[:, 0] = -wcam[:, 1]
    return R, T

def cam2persp_cam_fov(wcam, tanfov=1/12):
    """
    scale by changing zdist,unchange fov
    Returns: w2c
        R, T: Rotation matrix and translation vector
    """
    # blender coord to pytorch3d coord
    flx=-1.0
    fly= 1.0
    flz=-1.0
    bz = wcam.shape[0]
    z_dist=1/tanfov
    R=torch.tensor([[flx,0,0],
                    [0,fly,0],
                    [0,0,flz]], device=wcam.device, dtype=torch.float32)
    T=torch.tensor([0,0,z_dist], device=wcam.device, dtype=torch.float32)
    R = R.repeat(bz, 1, 1)
    T = T.repeat(bz, 1)   
    T[:, 2] = T[:, 2] / wcam[:, 0]   
    T[:, 1] = wcam[:, 2] *fly
    T[:, 0] = wcam[:, 1] *flx
    return R, T
def cam2persp_cam_fov_body(wcam, tanfov=1/12):
    """
    scale by changing zdist,unchange fov
    Returns: w2c
        R, T: Rotation matrix and translation vector
    """
    #image coord to pytorch3d coord
    flx=-1.0
    fly=-1.0
    flz=1.0
    bz = wcam.shape[0]
    z_dist=1/tanfov
    R=torch.tensor([[flx,0,0],
                    [0,fly,0],
                    [0,0,flz]],device=wcam.device,dtype=torch.float32)
    T=torch.tensor([0,0,z_dist],device=wcam.device,dtype=torch.float32)
    R = R.repeat(bz, 1, 1)
    T = T.repeat(bz, 1)
    T[:, 2] = T[:, 2] / wcam[:, 0]
    T[:, 1] = wcam[:, 2]*fly # not needed 
    T[:, 0] = wcam[:, 1]*flx # not needed 
    return R, T
# def cam2persp_cam_fov(wcam, tanfov=1):
#     """
#     scale by changing zdist,unchange fov
#     Returns: w2c
#         R, T: Rotation matrix and translation vector
#     """
#     bz = wcam.shape[0]
#     z_dist=2/tanfov
#     R=torch.tensor([[1,0,0],
#                     [0,1,0],
#                     [0,0,1]],device=wcam.device,dtype=torch.float32)
#     T=torch.tensor([0,0,-z_dist],device=wcam.device,dtype=torch.float32)
#     R = R.repeat(bz, 1, 1)
#     T = T.repeat(bz, 1)
#     T[:, 2] = T[:, 2] / wcam[:, 0]
#     T[:, 1] = wcam[:, 2]
#     T[:, 0] = wcam[:, 1]
#     return R, T