|
|
import os |
|
|
import glob |
|
|
|
|
|
import einops |
|
|
import torch |
|
|
import torch.nn as nn |
|
|
import torch.nn.functional as F |
|
|
|
|
|
from modules.build import VISION_REGISTRY |
|
|
from modules.layers.pointnet import PointNetPP |
|
|
from modules.layers.transformers import TransformerSpatialEncoderLayer |
|
|
from modules.utils import get_mlp_head, layer_repeat, calc_pairwise_locs, get_mixup_function |
|
|
from modules.weights import _init_weights_bert |
|
|
|
|
|
|
|
|
@VISION_REGISTRY.register() |
|
|
class PointOpenVocabEncoder(nn.Module): |
|
|
def __init__(self, cfg, backbone='pointnet++', hidden_size=768, path=None, freeze=False, dim_feedforward=2048, |
|
|
num_attention_heads=12, spatial_dim=5, num_layers=4, dim_loc=6, pairwise_rel_type='center', |
|
|
use_matmul_label=False, mixup_strategy=None, mixup_stage1=None, mixup_stage2=None, |
|
|
lang_type='bert', lang_path=None, attn_type='spatial'): |
|
|
super().__init__() |
|
|
assert backbone in ['pointnet++'] |
|
|
|
|
|
|
|
|
if backbone == 'pointnet++': |
|
|
self.point_feature_extractor = PointNetPP( |
|
|
sa_n_points=[32, 16, None], |
|
|
sa_n_samples=[32, 32, None], |
|
|
sa_radii=[0.2, 0.4, None], |
|
|
sa_mlps=[[3, 64, 64, 128], [128, 128, 128, 256], [256, 256, 512, 768]], |
|
|
) |
|
|
elif backbone == 'pointnext': |
|
|
self.point_feature_extractor = PointNextEncoder( |
|
|
blocks=[1, 1, 1, 1, 1, 1], |
|
|
strides=[1, 2, 2, 2, 2, 1], |
|
|
sa_layers=2, |
|
|
sa_use_res=True, |
|
|
width=32, |
|
|
radius=0.15, |
|
|
radius_scaling=1.5, |
|
|
mlp_head=[1024, 768] if lang_type == 'bert' else [] |
|
|
) |
|
|
|
|
|
|
|
|
vocab_file_name = f"scannet_607_{'bert-base-uncased' if lang_type == 'bert' else 'clip-ViT-B16'}_id.pth" |
|
|
self.register_buffer("text_features", torch.load(os.path.join(lang_path, vocab_file_name))) |
|
|
self.point_cls_head = lambda x: x @ self.text_features.t() |
|
|
self.dropout = nn.Dropout(0.1) |
|
|
|
|
|
self.attn_type = attn_type |
|
|
|
|
|
|
|
|
self.freeze = freeze |
|
|
if freeze: |
|
|
for p in self.parameters(): |
|
|
p.requires_grad = False |
|
|
|
|
|
|
|
|
self.sem_cls_embed_layer = nn.Sequential(nn.Linear(hidden_size, hidden_size), |
|
|
nn.LayerNorm(hidden_size), |
|
|
nn.Dropout(0.1)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.use_matmul_label = use_matmul_label |
|
|
|
|
|
self.sem_mask_embeddings = nn.Embedding(1, 768) |
|
|
|
|
|
|
|
|
if self.attn_type == 'spatial': |
|
|
pc_encoder_layer = TransformerSpatialEncoderLayer(hidden_size, num_attention_heads, |
|
|
dim_feedforward=dim_feedforward, |
|
|
dropout=0.1, activation='gelu', |
|
|
spatial_dim=spatial_dim, spatial_multihead=True, |
|
|
spatial_attn_fusion='cond') |
|
|
self.spatial_encoder = layer_repeat(pc_encoder_layer, num_layers) |
|
|
loc_layer = nn.Sequential( |
|
|
nn.Linear(dim_loc, hidden_size), |
|
|
nn.LayerNorm(hidden_size), |
|
|
) |
|
|
self.loc_layers = layer_repeat(loc_layer, 1) |
|
|
self.pairwise_rel_type = pairwise_rel_type |
|
|
self.spatial_dim = spatial_dim |
|
|
else: |
|
|
pass |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.apply(_init_weights_bert) |
|
|
if path is not None: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("loaded") |
|
|
ckpts = glob.glob(os.path.join(path, '*.bin')) |
|
|
if len(ckpts) != 0: |
|
|
for ckpt in ckpts: |
|
|
state_dict = torch.load(ckpt, map_location='cpu') |
|
|
self.load_state_dict(state_dict, strict=False) |
|
|
print("loaded checkpoint files") |
|
|
elif path.endswith('.pth'): |
|
|
print("loaded checkpoint file") |
|
|
state_dict = torch.load(path) |
|
|
self.load_state_dict(state_dict, strict=False) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def freeze_bn(self, m): |
|
|
for layer in m.modules(): |
|
|
if isinstance(layer, nn.BatchNorm2d): |
|
|
layer.eval() |
|
|
|
|
|
def forward(self, obj_pcds, obj_locs, obj_masks, obj_sem_masks, |
|
|
obj_labels=None, cur_step=None, max_steps=None, **kwargs): |
|
|
if self.freeze: |
|
|
self.freeze_bn(self.point_feature_extractor) |
|
|
|
|
|
|
|
|
batch_size, num_objs, _, _ = obj_pcds.size() |
|
|
obj_embeds = self.point_feature_extractor(einops.rearrange(obj_pcds, 'b o p d -> (b o) p d')) |
|
|
|
|
|
|
|
|
obj_embeds = einops.rearrange(obj_embeds, '(b o) d -> b o d', b=batch_size) |
|
|
obj_embeds = self.dropout(obj_embeds) |
|
|
if self.freeze: |
|
|
obj_embeds = obj_embeds.detach() |
|
|
|
|
|
|
|
|
obj_sem_cls = F.softmax(self.point_cls_head(obj_embeds), dim=2).detach() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
obj_embeds_pre = obj_embeds |
|
|
|
|
|
|
|
|
if self.attn_type == 'spatial': |
|
|
pairwise_locs = calc_pairwise_locs(obj_locs[:, :, :3], obj_locs[:, :, 3:], |
|
|
pairwise_rel_type=self.pairwise_rel_type, spatial_dist_norm=True, |
|
|
spatial_dim=self.spatial_dim) |
|
|
for i, pc_layer in enumerate(self.spatial_encoder): |
|
|
query_pos = self.loc_layers[0](obj_locs) |
|
|
obj_embeds = obj_embeds + query_pos |
|
|
obj_embeds, self_attn_matrices = pc_layer(obj_embeds, pairwise_locs, |
|
|
tgt_key_padding_mask=obj_masks.logical_not()) |
|
|
else: |
|
|
pass |
|
|
|
|
|
return obj_embeds, obj_embeds_pre, obj_sem_cls |