|
|
import torch |
|
|
import torch.nn as nn |
|
|
from lib.layers import ResnetBlockFC |
|
|
|
|
|
|
|
|
def maxpool(x, dim=-1, keepdim=False): |
|
|
''' Performs a maxpooling operation. |
|
|
|
|
|
Args: |
|
|
x (tensor): input |
|
|
dim (int): dimension of pooling |
|
|
keepdim (bool): whether to keep dimensions |
|
|
''' |
|
|
out, _ = x.max(dim=dim, keepdim=keepdim) |
|
|
return out |
|
|
|
|
|
|
|
|
class SimplePointnet(nn.Module): |
|
|
''' PointNet-based encoder network. |
|
|
|
|
|
Args: |
|
|
c_dim (int): dimension of latent code c |
|
|
dim (int): input points dimension |
|
|
hidden_dim (int): hidden dimension of the network |
|
|
''' |
|
|
|
|
|
def __init__(self, c_dim=128, dim=3, hidden_dim=512): |
|
|
super().__init__() |
|
|
self.c_dim = c_dim |
|
|
|
|
|
self.fc_pos = nn.Linear(dim, 2*hidden_dim) |
|
|
self.fc_0 = nn.Linear(2*hidden_dim, hidden_dim) |
|
|
self.fc_1 = nn.Linear(2*hidden_dim, hidden_dim) |
|
|
self.fc_2 = nn.Linear(2*hidden_dim, hidden_dim) |
|
|
self.fc_3 = nn.Linear(2*hidden_dim, hidden_dim) |
|
|
self.fc_c = nn.Linear(hidden_dim, c_dim) |
|
|
|
|
|
self.actvn = nn.ReLU() |
|
|
self.pool = maxpool |
|
|
|
|
|
def forward(self, p): |
|
|
batch_size, T, D = p.size() |
|
|
|
|
|
|
|
|
net = self.fc_pos(p) |
|
|
net = self.fc_0(self.actvn(net)) |
|
|
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size()) |
|
|
net = torch.cat([net, pooled], dim=2) |
|
|
|
|
|
net = self.fc_1(self.actvn(net)) |
|
|
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size()) |
|
|
net = torch.cat([net, pooled], dim=2) |
|
|
|
|
|
net = self.fc_2(self.actvn(net)) |
|
|
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size()) |
|
|
net = torch.cat([net, pooled], dim=2) |
|
|
|
|
|
net = self.fc_3(self.actvn(net)) |
|
|
|
|
|
|
|
|
net = self.pool(net, dim=1) |
|
|
|
|
|
c = self.fc_c(self.actvn(net)) |
|
|
|
|
|
return c |
|
|
|
|
|
|
|
|
class ResnetPointnet(nn.Module): |
|
|
''' PointNet-based encoder network with ResNet blocks. |
|
|
|
|
|
Args: |
|
|
c_dim (int): dimension of latent code c |
|
|
dim (int): input points dimension |
|
|
hidden_dim (int): hidden dimension of the network |
|
|
''' |
|
|
|
|
|
def __init__(self, c_dim=128, dim=3, hidden_dim=512): |
|
|
super().__init__() |
|
|
self.c_dim = c_dim |
|
|
|
|
|
self.fc_pos = nn.Linear(dim, 2*hidden_dim) |
|
|
self.block_0 = ResnetBlockFC(2*hidden_dim, hidden_dim) |
|
|
self.block_1 = ResnetBlockFC(2*hidden_dim, hidden_dim) |
|
|
self.block_2 = ResnetBlockFC(2*hidden_dim, hidden_dim) |
|
|
self.block_3 = ResnetBlockFC(2*hidden_dim, hidden_dim) |
|
|
self.block_4 = ResnetBlockFC(2*hidden_dim, hidden_dim) |
|
|
self.fc_c = nn.Linear(hidden_dim, c_dim) |
|
|
|
|
|
self.actvn = nn.ReLU() |
|
|
self.pool = maxpool |
|
|
|
|
|
def forward(self, p): |
|
|
batch_size, T, D = p.size() |
|
|
|
|
|
|
|
|
net = self.fc_pos(p) |
|
|
net = self.block_0(net) |
|
|
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size()) |
|
|
net = torch.cat([net, pooled], dim=2) |
|
|
|
|
|
net = self.block_1(net) |
|
|
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size()) |
|
|
net = torch.cat([net, pooled], dim=2) |
|
|
|
|
|
net = self.block_2(net) |
|
|
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size()) |
|
|
net = torch.cat([net, pooled], dim=2) |
|
|
|
|
|
net = self.block_3(net) |
|
|
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size()) |
|
|
net = torch.cat([net, pooled], dim=2) |
|
|
|
|
|
net = self.block_4(net) |
|
|
|
|
|
|
|
|
net = self.pool(net, dim=1) |
|
|
|
|
|
c = self.fc_c(self.actvn(net)) |
|
|
|
|
|
return c |
|
|
|
|
|
|
|
|
class ResnetPointnet2Stream(nn.Module): |
|
|
''' ResnetPointNet-based encoder network with two streams. |
|
|
|
|
|
The input point clouds are encoded with the same ResNet PointNet |
|
|
(shared weights) and the output codes are concatenated. |
|
|
|
|
|
Args: |
|
|
c_dim (int): dimension of latent code c |
|
|
dim (int): input points dimension |
|
|
hidden_dim (int): hidden dimension of the network |
|
|
''' |
|
|
|
|
|
def __init__(self, c_dim=128, dim=3, hidden_dim=512, **kwargs): |
|
|
super().__init__() |
|
|
|
|
|
self.c_dim = int(c_dim / 2) |
|
|
|
|
|
self.resnet_pointnet = ResnetPointnet( |
|
|
self.c_dim, dim, hidden_dim, **kwargs) |
|
|
|
|
|
def forward(self, x): |
|
|
p_1 = x[:, 0] |
|
|
p_2 = x[:, 1] |
|
|
|
|
|
c_1 = self.resnet_pointnet(p_1) |
|
|
c_2 = self.resnet_pointnet(p_2) |
|
|
|
|
|
c = torch.cat([c_1, c_2], dim=-1) |
|
|
|
|
|
return c |
|
|
|
|
|
|
|
|
class TemporalResnetPointnet(nn.Module): |
|
|
''' Temporal PointNet-based encoder network. |
|
|
|
|
|
The input point clouds are concatenated along the hidden dimension, |
|
|
e.g. for a sequence of length L, the dimension becomes 3xL = 51. |
|
|
|
|
|
Args: |
|
|
c_dim (int): dimension of latent code c |
|
|
dim (int): input points dimension |
|
|
hidden_dim (int): hidden dimension of the network |
|
|
use_only_first_pcl (bool): whether to use only the first point cloud |
|
|
''' |
|
|
|
|
|
def __init__(self, c_dim=128, dim=51, hidden_dim=512, |
|
|
use_only_first_pcl=False, **kwargs): |
|
|
super().__init__() |
|
|
self.c_dim = c_dim |
|
|
self.use_only_first_pcl = use_only_first_pcl |
|
|
|
|
|
self.fc_pos = nn.Linear(dim, 2*hidden_dim) |
|
|
self.block_0 = ResnetBlockFC(2*hidden_dim, hidden_dim) |
|
|
self.block_1 = ResnetBlockFC(2*hidden_dim, hidden_dim) |
|
|
self.block_2 = ResnetBlockFC(2*hidden_dim, hidden_dim) |
|
|
self.block_3 = ResnetBlockFC(2*hidden_dim, hidden_dim) |
|
|
self.block_4 = ResnetBlockFC(2*hidden_dim, hidden_dim) |
|
|
self.fc_c = nn.Linear(hidden_dim, c_dim) |
|
|
|
|
|
self.actvn = nn.ReLU() |
|
|
self.pool = maxpool |
|
|
|
|
|
def forward(self, x): |
|
|
batch_size, n_steps, n_pts, _ = x.shape |
|
|
|
|
|
if len(x.shape) == 4 and self.use_only_first_pcl: |
|
|
x = x[:, 0] |
|
|
elif len(x.shape) == 4: |
|
|
x = x.transpose(1, 2).contiguous().view(batch_size, n_pts, -1) |
|
|
|
|
|
net = self.fc_pos(x) |
|
|
net = self.block_0(net) |
|
|
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size()) |
|
|
net = torch.cat([net, pooled], dim=2) |
|
|
|
|
|
net = self.block_1(net) |
|
|
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size()) |
|
|
net = torch.cat([net, pooled], dim=2) |
|
|
|
|
|
net = self.block_2(net) |
|
|
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size()) |
|
|
net = torch.cat([net, pooled], dim=2) |
|
|
|
|
|
net = self.block_3(net) |
|
|
pooled = self.pool(net, dim=1, keepdim=True).expand(net.size()) |
|
|
net = torch.cat([net, pooled], dim=2) |
|
|
|
|
|
net = self.block_4(net) |
|
|
net = self.pool(net, dim=1) |
|
|
c = self.fc_c(self.actvn(net)) |
|
|
|
|
|
return c |
|
|
|