| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| from timm.models.layers import DropPath |
| from typing import Optional |
| from torch import Tensor, nn |
| import copy |
| import math |
|
|
| def _get_activation_fn(activation, d_model=256, batch_dim=0): |
| """Return an activation function given a string""" |
| if activation == "relu": |
| return F.relu |
| if activation == "gelu": |
| return F.gelu |
| if activation == "glu": |
| return F.glu |
| if activation == "prelu": |
| return nn.PReLU() |
| if activation == "selu": |
| return F.selu |
|
|
| raise RuntimeError(f"activation should be relu/gelu, not {activation}.") |
|
|
| def _get_clones(module, N, layer_share=False): |
| |
| if layer_share: |
| return nn.ModuleList([module for i in range(N)]) |
| else: |
| return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) |
|
|
| def get_sine_pos_embed( |
| pos_tensor: torch.Tensor, |
| num_pos_feats: int = 128, |
| temperature: int = 10000, |
| exchange_xy: bool = True, |
| ): |
| """generate sine position embedding from a position tensor |
| Args: |
| pos_tensor (torch.Tensor): shape: [..., n]. |
| num_pos_feats (int): projected shape for each float in the tensor. |
| temperature (int): temperature in the sine/cosine function. |
| exchange_xy (bool, optional): exchange pos x and pos y. \ |
| For example, input tensor is [x,y], the results will be [pos(y), pos(x)]. Defaults to True. |
| Returns: |
| pos_embed (torch.Tensor): shape: [..., n*num_pos_feats]. |
| """ |
| scale = 2 * math.pi |
| dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos_tensor.device) |
| dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / num_pos_feats) |
|
|
| def sine_func(x: torch.Tensor): |
| sin_x = x * scale / dim_t |
| sin_x = torch.stack((sin_x[..., 0::2].sin(), sin_x[..., 1::2].cos()), dim=3).flatten(2) |
| return sin_x |
|
|
| pos_res = [sine_func(x) for x in pos_tensor.split([1] * pos_tensor.shape[-1], dim=-1)] |
| if exchange_xy: |
| pos_res[0], pos_res[1] = pos_res[1], pos_res[0] |
| pos_res = torch.cat(pos_res, dim=-1) |
| return pos_res |
|
|
| class FeatureResizer(nn.Module): |
| """ |
| This class takes as input a set of embeddings of dimension C1 and outputs a set of |
| embedding of dimension C2, after a linear transformation, dropout and normalization (LN). |
| """ |
|
|
| def __init__(self, input_feat_size, output_feat_size, dropout, do_ln=True): |
| super().__init__() |
| self.do_ln = do_ln |
| |
| self.fc = nn.Linear(input_feat_size, output_feat_size, bias=True) |
| self.layer_norm = nn.LayerNorm(output_feat_size, eps=1e-12) |
| self.dropout = nn.Dropout(dropout) |
|
|
| def forward(self, encoder_features): |
| x = self.fc(encoder_features) |
| if self.do_ln: |
| x = self.layer_norm(x) |
| output = self.dropout(x) |
| return output |
|
|
|
|
| def l1norm(X, dim, eps=1e-8): |
| """L1-normalize columns of X""" |
| norm = torch.abs(X).sum(dim=dim, keepdim=True) + eps |
| X = torch.div(X, norm) |
| return X |
|
|
|
|
| def l2norm(X, dim, eps=1e-8): |
| """L2-normalize columns of X""" |
| norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps |
| X = torch.div(X, norm) |
| return X |
|
|
|
|
| def func_attention(query, context, smooth=1, raw_feature_norm="softmax", eps=1e-8): |
| """ |
| query: (n_context, queryL, d) |
| context: (n_context, sourceL, d) |
| """ |
| batch_size_q, queryL = query.size(0), query.size(1) |
| batch_size, sourceL = context.size(0), context.size(1) |
|
|
| |
| |
| queryT = torch.transpose(query, 1, 2) |
|
|
| |
| |
| attn = torch.bmm(context, queryT) |
| if raw_feature_norm == "softmax": |
| |
| attn = attn.view(batch_size * sourceL, queryL) |
| attn = nn.Softmax()(attn) |
| |
| attn = attn.view(batch_size, sourceL, queryL) |
| elif raw_feature_norm == "l2norm": |
| attn = l2norm(attn, 2) |
| elif raw_feature_norm == "clipped_l2norm": |
| attn = nn.LeakyReLU(0.1)(attn) |
| attn = l2norm(attn, 2) |
| else: |
| raise ValueError("unknown first norm type:", raw_feature_norm) |
| |
| attn = torch.transpose(attn, 1, 2).contiguous() |
| |
| attn = attn.view(batch_size * queryL, sourceL) |
| attn = nn.Softmax()(attn * smooth) |
| |
| attn = attn.view(batch_size, queryL, sourceL) |
| |
| attnT = torch.transpose(attn, 1, 2).contiguous() |
|
|
| |
| contextT = torch.transpose(context, 1, 2) |
| |
| |
| weightedContext = torch.bmm(contextT, attnT) |
| |
| weightedContext = torch.transpose(weightedContext, 1, 2) |
|
|
| return weightedContext, attnT |
|
|
|
|
| class BiMultiHeadAttention(nn.Module): |
| def __init__(self, v_dim, l_dim, embed_dim, num_heads, dropout=0.1, cfg=None): |
| super(BiMultiHeadAttention, self).__init__() |
|
|
| self.embed_dim = embed_dim |
| self.num_heads = num_heads |
| self.head_dim = embed_dim // num_heads |
| self.v_dim = v_dim |
| self.l_dim = l_dim |
|
|
| assert ( |
| self.head_dim * self.num_heads == self.embed_dim |
| ), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})." |
| self.scale = self.head_dim ** (-0.5) |
| self.dropout = dropout |
|
|
| self.v_proj = nn.Linear(self.v_dim, self.embed_dim) |
| self.l_proj = nn.Linear(self.l_dim, self.embed_dim) |
| self.values_v_proj = nn.Linear(self.v_dim, self.embed_dim) |
| self.values_l_proj = nn.Linear(self.l_dim, self.embed_dim) |
|
|
| self.out_v_proj = nn.Linear(self.embed_dim, self.v_dim) |
| self.out_l_proj = nn.Linear(self.embed_dim, self.l_dim) |
|
|
| self.stable_softmax_2d = True |
| self.clamp_min_for_underflow = True |
| self.clamp_max_for_overflow = True |
|
|
| self._reset_parameters() |
|
|
| def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): |
| return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() |
|
|
| def _reset_parameters(self): |
| nn.init.xavier_uniform_(self.v_proj.weight) |
| self.v_proj.bias.data.fill_(0) |
| nn.init.xavier_uniform_(self.l_proj.weight) |
| self.l_proj.bias.data.fill_(0) |
| nn.init.xavier_uniform_(self.values_v_proj.weight) |
| self.values_v_proj.bias.data.fill_(0) |
| nn.init.xavier_uniform_(self.values_l_proj.weight) |
| self.values_l_proj.bias.data.fill_(0) |
| nn.init.xavier_uniform_(self.out_v_proj.weight) |
| self.out_v_proj.bias.data.fill_(0) |
| nn.init.xavier_uniform_(self.out_l_proj.weight) |
| self.out_l_proj.bias.data.fill_(0) |
|
|
| def forward(self, v, l, attention_mask_v=None, attention_mask_l=None): |
| """_summary_ |
| |
| Args: |
| v (_type_): bs, n_img, dim |
| l (_type_): bs, n_text, dim |
| attention_mask_v (_type_, optional): _description_. bs, n_img |
| attention_mask_l (_type_, optional): _description_. bs, n_text |
| |
| Returns: |
| _type_: _description_ |
| """ |
| |
| |
| bsz, tgt_len, _ = v.size() |
|
|
| query_states = self.v_proj(v) * self.scale |
| key_states = self._shape(self.l_proj(l), -1, bsz) |
| value_v_states = self._shape(self.values_v_proj(v), -1, bsz) |
| value_l_states = self._shape(self.values_l_proj(l), -1, bsz) |
|
|
| proj_shape = (bsz * self.num_heads, -1, self.head_dim) |
| query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) |
| key_states = key_states.view(*proj_shape) |
| value_v_states = value_v_states.view(*proj_shape) |
| value_l_states = value_l_states.view(*proj_shape) |
|
|
| src_len = key_states.size(1) |
| attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) |
|
|
| if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): |
| raise ValueError( |
| f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}" |
| ) |
|
|
| if self.stable_softmax_2d: |
| attn_weights = attn_weights - attn_weights.max() |
|
|
| if self.clamp_min_for_underflow: |
| attn_weights = torch.clamp( |
| attn_weights, min=-50000 |
| ) |
| if self.clamp_max_for_overflow: |
| attn_weights = torch.clamp( |
| attn_weights, max=50000 |
| ) |
|
|
| attn_weights_T = attn_weights.transpose(1, 2) |
| attn_weights_l = attn_weights_T - torch.max(attn_weights_T, dim=-1, keepdim=True)[0] |
| if self.clamp_min_for_underflow: |
| attn_weights_l = torch.clamp( |
| attn_weights_l, min=-50000 |
| ) |
| if self.clamp_max_for_overflow: |
| attn_weights_l = torch.clamp( |
| attn_weights_l, max=50000 |
| ) |
|
|
| |
| if attention_mask_v is not None: |
| attention_mask_v = ( |
| attention_mask_v[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1) |
| ) |
| attn_weights_l.masked_fill_(attention_mask_v, float("-inf")) |
|
|
| attn_weights_l = attn_weights_l.softmax(dim=-1) |
|
|
| |
| if attention_mask_l is not None: |
| attention_mask_l = ( |
| attention_mask_l[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1) |
| ) |
| attn_weights.masked_fill_(attention_mask_l, float("-inf")) |
| attn_weights_v = attn_weights.softmax(dim=-1) |
|
|
| attn_probs_v = F.dropout(attn_weights_v, p=self.dropout, training=self.training) |
| attn_probs_l = F.dropout(attn_weights_l, p=self.dropout, training=self.training) |
|
|
| attn_output_v = torch.bmm(attn_probs_v, value_l_states) |
| attn_output_l = torch.bmm(attn_probs_l, value_v_states) |
|
|
| if attn_output_v.size() != (bsz * self.num_heads, tgt_len, self.head_dim): |
| raise ValueError( |
| f"`attn_output_v` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output_v.size()}" |
| ) |
|
|
| if attn_output_l.size() != (bsz * self.num_heads, src_len, self.head_dim): |
| raise ValueError( |
| f"`attn_output_l` should be of size {(bsz, self.num_heads, src_len, self.head_dim)}, but is {attn_output_l.size()}" |
| ) |
|
|
| attn_output_v = attn_output_v.view(bsz, self.num_heads, tgt_len, self.head_dim) |
| attn_output_v = attn_output_v.transpose(1, 2) |
| attn_output_v = attn_output_v.reshape(bsz, tgt_len, self.embed_dim) |
|
|
| attn_output_l = attn_output_l.view(bsz, self.num_heads, src_len, self.head_dim) |
| attn_output_l = attn_output_l.transpose(1, 2) |
| attn_output_l = attn_output_l.reshape(bsz, src_len, self.embed_dim) |
|
|
| attn_output_v = self.out_v_proj(attn_output_v) |
| attn_output_l = self.out_l_proj(attn_output_l) |
|
|
| return attn_output_v, attn_output_l |
|
|
| import numpy as np |
| |
| class BiAttentionBlock(nn.Module): |
| def __init__( |
| self, |
| v_dim, |
| l_dim, |
| embed_dim, |
| num_heads, |
| dropout=0.1, |
| drop_path=0.0, |
| init_values=1e-4, |
| cfg=None, |
| ): |
| """ |
| Inputs: |
| embed_dim - Dimensionality of input and attention feature vectors |
| hidden_dim - Dimensionality of hidden layer in feed-forward network |
| (usually 2-4x larger than embed_dim) |
| num_heads - Number of heads to use in the Multi-Head Attention block |
| dropout - Amount of dropout to apply in the feed-forward network |
| """ |
| super(BiAttentionBlock, self).__init__() |
|
|
| |
| self.layer_norm_v = nn.LayerNorm(v_dim) |
| self.layer_norm_l = nn.LayerNorm(l_dim) |
| self.attn = BiMultiHeadAttention( |
| v_dim=v_dim, l_dim=l_dim, embed_dim=embed_dim, num_heads=num_heads, dropout=dropout |
| ) |
|
|
| |
| self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() |
| gamma_v = init_values * np.ones((v_dim)) |
| gamma_l = init_values * np.ones((l_dim)) |
| self.gamma_v = nn.Parameter(torch.tensor(gamma_v)) |
| self.gamma_l = nn.Parameter(torch.tensor(gamma_l)) |
|
|
| def forward(self, v, l, attention_mask_v=None, attention_mask_l=None): |
| v = self.layer_norm_v(v) |
| l = self.layer_norm_l(l) |
| delta_v, delta_l = self.attn( |
| v, l, attention_mask_v=attention_mask_v, attention_mask_l=attention_mask_l |
| ) |
| |
| v = v + self.drop_path(self.gamma_v * delta_v) |
| l = l + self.drop_path(self.gamma_l * delta_l) |
| return v.to(torch.bfloat16), l.to(torch.bfloat16) |
|
|
| |
|
|
| class TransformerEncoderLayer(nn.Module): |
| def __init__( |
| self, |
| d_model, |
| nhead, |
| dim_feedforward=2048, |
| dropout=0.1, |
| activation="relu", |
| normalize_before=False, |
| ): |
| super().__init__() |
| self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) |
| |
| self.linear1 = nn.Linear(d_model, dim_feedforward) |
| self.dropout = nn.Dropout(dropout) |
| self.linear2 = nn.Linear(dim_feedforward, d_model) |
|
|
| self.norm1 = nn.LayerNorm(d_model) |
| self.norm2 = nn.LayerNorm(d_model) |
| self.dropout1 = nn.Dropout(dropout) |
| self.dropout2 = nn.Dropout(dropout) |
|
|
| self.activation = _get_activation_fn(activation) |
| self.normalize_before = normalize_before |
| self.nhead = nhead |
|
|
| def with_pos_embed(self, tensor, pos: Optional[Tensor]): |
| return tensor if pos is None else tensor + pos |
|
|
| def forward( |
| self, |
| src, |
| pos: Optional[Tensor] = None, |
| src_mask: Optional[Tensor] = None, |
| |
| ): |
| |
| |
| |
| |
|
|
| q = k = self.with_pos_embed(src, pos) |
|
|
| src2 = self.self_attn(q, k, value=src, attn_mask=src_mask)[0] |
|
|
| |
| src = src + self.dropout1(src2) |
| src = self.norm1(src) |
| src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) |
| src = src + self.dropout2(src2) |
| src = self.norm2(src) |
| return src |
|
|
| class MLP(nn.Module): |
| def __init__(self, input_dim, hidden_dim, output_dim, num_layers): |
| super().__init__() |
| self.num_layers = num_layers |
| h = [hidden_dim] * (num_layers - 1) |
| self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) |
|
|
| def forward(self, x): |
| for i, layer in enumerate(self.layers): |
| x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) |
| return x |
|
|
| class AfdMultimodalTransformer(nn.Module): |
| def __init__( |
| self, |
| d_model=2176, |
| num_encoder_layers=3 |
| ): |
| super().__init__() |
| self.d_model = d_model*2 |
| self.num_encoder_layers = 3 |
| self.language_projector = MLP(input_dim=d_model, hidden_dim=self.d_model, output_dim=d_model, num_layers=1) |
| self.multimodal_transformer = MultimodalTransformer( |
| d_model=d_model, num_encoder_layers=num_encoder_layers |
| ) |
| self.afd_predictor = MLP(input_dim=d_model, hidden_dim=self.d_model, output_dim=1, num_layers=2) |
|
|
| def forward( |
| self, |
| visual_features, |
| textual_features |
| ): |
| |
| textual_features = self.language_projector(textual_features) |
| visual_features, textual_features = self.multimodal_transformer(visual_features, |
| textual_features) |
| interactability = self.afd_predictor(visual_features).sigmoid() |
| return interactability |
|
|
| class MultimodalTransformer(nn.Module): |
| def __init__( |
| self, |
| d_model=256, |
| nhead=8, |
| num_queries=300, |
| num_encoder_layers=3, |
| dim_feedforward=2048, |
| normalize_before=False, |
| |
| num_feature_levels=1, |
| text_dropout=0.1, |
| image_dropout=0.1, |
| fusion_dropout=0.1, |
| fusion_droppath=0.0, |
| ): |
| super().__init__() |
| self.num_feature_levels = num_feature_levels |
| self.num_encoder_layers = num_encoder_layers |
| self.num_queries = num_queries |
|
|
| |
| encoder_layer = TransformerEncoderLayer( |
| d_model=d_model, |
| nhead=nhead // 2, |
| dim_feedforward=dim_feedforward // 2, |
| dropout=image_dropout, |
| ) |
|
|
| text_enhance_layer = TransformerEncoderLayer( |
| d_model=d_model, |
| nhead=nhead // 2, |
| dim_feedforward=dim_feedforward // 2, |
| dropout=text_dropout, |
| ) |
|
|
| feature_fusion_layer = BiAttentionBlock( |
| v_dim=d_model, |
| l_dim=d_model, |
| embed_dim=dim_feedforward // 2, |
| num_heads=nhead // 2, |
| dropout=fusion_dropout, |
| drop_path=fusion_droppath, |
| ) |
|
|
| encoder_norm = nn.LayerNorm(d_model) if normalize_before else None |
| assert encoder_norm is None |
| self.encoder = TransformerEncoder( |
| encoder_layer, |
| num_encoder_layers, |
| d_model=d_model, |
| num_queries=num_queries, |
| text_enhance_layer=text_enhance_layer, |
| feature_fusion_layer=feature_fusion_layer, |
| ) |
|
|
| self.d_model = d_model |
| self.nhead = nhead |
| self.num_queries = num_queries |
| if num_feature_levels > 1: |
| if self.num_encoder_layers > 0: |
| self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model)) |
| else: |
| self.level_embed = None |
|
|
|
|
| self._reset_parameters() |
|
|
| def _reset_parameters(self): |
| for p in self.parameters(): |
| if p.dim() > 1: |
| nn.init.xavier_uniform_(p) |
| if self.num_feature_levels > 1 and self.level_embed is not None: |
| nn.init.normal_(self.level_embed) |
|
|
| def forward(self, slot_features, text_features): |
| """ |
| Input: |
| - srcs: List of multi features [bs, \sum{hxw}, c] |
| |
| """ |
| |
| |
| |
| memory, memory_text = self.encoder( |
| visual_features=slot_features, |
| textual_features=text_features, |
| ) |
| return memory, memory_text |
|
|
|
|
| class TransformerEncoder(nn.Module): |
| def __init__( |
| self, |
| encoder_layer, |
| num_layers, |
| d_model=256, |
| num_queries=300, |
| enc_layer_share=False, |
| text_enhance_layer=None, |
| feature_fusion_layer=None, |
| use_checkpoint=False, |
| use_transformer_ckpt=False, |
| ): |
| """_summary_ |
| |
| Args: |
| encoder_layer (_type_): _description_ |
| num_layers (_type_): _description_ |
| norm (_type_, optional): _description_. Defaults to None. |
| d_model (int, optional): _description_. Defaults to 256. |
| num_queries (int, optional): _description_. Defaults to 300. |
| enc_layer_share (bool, optional): _description_. Defaults to False. |
| |
| """ |
| super().__init__() |
| |
| self.layers = [] |
| self.text_layers = [] |
| self.fusion_layers = [] |
| if num_layers > 0: |
| self.layers = _get_clones(encoder_layer, num_layers, layer_share=enc_layer_share) |
|
|
| if text_enhance_layer is not None: |
| self.text_layers = _get_clones( |
| text_enhance_layer, num_layers, layer_share=enc_layer_share |
| ) |
| if feature_fusion_layer is not None: |
| self.fusion_layers = _get_clones( |
| feature_fusion_layer, num_layers, layer_share=enc_layer_share |
| ) |
| else: |
| self.layers = [] |
| del encoder_layer |
|
|
| if text_enhance_layer is not None: |
| self.text_layers = [] |
| del text_enhance_layer |
| if feature_fusion_layer is not None: |
| self.fusion_layers = [] |
| del feature_fusion_layer |
|
|
| self.query_scale = None |
| self.num_queries = num_queries |
| self.num_layers = num_layers |
| self.d_model = d_model |
|
|
| self.use_checkpoint = use_checkpoint |
| self.use_transformer_ckpt = use_transformer_ckpt |
| |
| |
|
|
| def forward( |
| self, |
| |
| visual_features: Tensor, |
| |
| textual_features: Tensor = None, |
| ): |
| """ |
| Input: |
| - visual_features: [bs, sum(hi*wi), cc] |
| - textual_features: bs, n_text, cc |
| Outpus: |
| - output: [bs, sum(hi*wi), 256] |
| """ |
| output = visual_features |
|
|
| if self.text_layers: |
| |
| bs, n_text, text_dim = textual_features.shape |
| pos_text = ( |
| torch.arange(n_text, device=textual_features.device) |
| .float() |
| .unsqueeze(0) |
| .unsqueeze(-1) |
| .repeat(bs, 1, 1) |
| ) |
| pos_text = get_sine_pos_embed(pos_text, num_pos_feats=visual_features.shape[-1], exchange_xy=False) |
|
|
| |
| for layer_id, visual_layer in enumerate(self.layers): |
| |
| |
| |
| if self.fusion_layers: |
| output, textual_features = self.fusion_layers[layer_id]( |
| v=output, |
| l=textual_features, |
| |
| |
| ) |
|
|
| if self.text_layers: |
| textual_features = self.text_layers[layer_id]( |
| src=textual_features.transpose(0, 1), |
| pos=(pos_text.transpose(0, 1) if pos_text is not None else None), |
| |
| |
| ).transpose(0, 1) |
|
|
| |
| output = visual_layer( |
| src=output, |
| |
| |
| |
| |
| |
| ) |
| |
| |
|
|
| return output, textual_features |
|
|
|
|
|
|
| class EgoExoTransformer(nn.Module): |
| def __init__( |
| self, |
| d_model=256, |
| nhead=8, |
| num_queries=300, |
| num_encoder_layers=3, |
| dim_feedforward=2048, |
| normalize_before=False, |
| |
| num_feature_levels=1, |
| text_dropout=0.1, |
| image_dropout=0.1, |
| fusion_dropout=0.1, |
| fusion_droppath=0.0, |
| ): |
| super().__init__() |
| self.num_feature_levels = num_feature_levels |
| self.num_encoder_layers = num_encoder_layers |
| self.num_queries = num_queries |
|
|
| |
| encoder_layer = TransformerEncoderLayer( |
| d_model=d_model, |
| nhead=nhead // 2, |
| dim_feedforward=dim_feedforward // 2, |
| dropout=image_dropout, |
| ) |
|
|
| ego_enhance_layer = TransformerEncoderLayer( |
| d_model=d_model, |
| nhead=nhead // 2, |
| dim_feedforward=dim_feedforward // 2, |
| dropout=image_dropout, |
| ) |
|
|
| feature_fusion_layer = BiAttentionBlock( |
| v_dim=d_model, |
| l_dim=d_model, |
| embed_dim=dim_feedforward // 2, |
| num_heads=nhead // 2, |
| dropout=fusion_dropout, |
| drop_path=fusion_droppath, |
| ) |
|
|
| encoder_norm = nn.LayerNorm(d_model) if normalize_before else None |
| assert encoder_norm is None |
| self.encoder = EgoExoTransformerEncoder( |
| encoder_layer, |
| num_encoder_layers, |
| d_model=d_model, |
| num_queries=num_queries, |
| ego_enhance_layer=ego_enhance_layer, |
| feature_fusion_layer=feature_fusion_layer, |
| ) |
|
|
| self.d_model = d_model |
| self.nhead = nhead |
| self.num_queries = num_queries |
| if num_feature_levels > 1: |
| if self.num_encoder_layers > 0: |
| self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model)) |
| else: |
| self.level_embed = None |
|
|
|
|
| self._reset_parameters() |
|
|
| def _reset_parameters(self): |
| for p in self.parameters(): |
| if p.dim() > 1: |
| nn.init.xavier_uniform_(p) |
| if self.num_feature_levels > 1 and self.level_embed is not None: |
| nn.init.normal_(self.level_embed) |
|
|
| def forward(self, exo_features, ego_features): |
| """ |
| Input: |
| - srcs: List of multi features [bs, \sum{hxw}, c] |
| |
| """ |
| |
| |
| |
| memory_exo, memory_ego = self.encoder( |
| exo_features=exo_features, |
| ego_features=ego_features, |
| ) |
| return memory_exo, memory_ego |
|
|
|
|
| class EgoExoTransformerEncoder(nn.Module): |
| def __init__( |
| self, |
| encoder_layer, |
| num_layers, |
| d_model=256, |
| num_queries=300, |
| enc_layer_share=False, |
| ego_enhance_layer=None, |
| feature_fusion_layer=None, |
| use_checkpoint=False, |
| use_transformer_ckpt=False, |
| ): |
| """_summary_ |
| |
| Args: |
| encoder_layer (_type_): _description_ |
| num_layers (_type_): _description_ |
| norm (_type_, optional): _description_. Defaults to None. |
| d_model (int, optional): _description_. Defaults to 256. |
| num_queries (int, optional): _description_. Defaults to 300. |
| enc_layer_share (bool, optional): _description_. Defaults to False. |
| |
| """ |
| super().__init__() |
| |
| self.layers = [] |
| self.ego_layers = [] |
| self.fusion_layers = [] |
| if num_layers > 0: |
| self.layers = _get_clones(encoder_layer, num_layers, layer_share=enc_layer_share) |
|
|
| if ego_enhance_layer is not None: |
| self.ego_layers = _get_clones( |
| ego_enhance_layer, num_layers, layer_share=enc_layer_share |
| ) |
| if feature_fusion_layer is not None: |
| self.fusion_layers = _get_clones( |
| feature_fusion_layer, num_layers, layer_share=enc_layer_share |
| ) |
| else: |
| self.layers = [] |
| del encoder_layer |
|
|
| if ego_enhance_layer is not None: |
| self.ego_layers = [] |
| del ego_enhance_layer |
| if feature_fusion_layer is not None: |
| self.fusion_layers = [] |
| del feature_fusion_layer |
|
|
| self.query_scale = None |
| self.num_queries = num_queries |
| self.num_layers = num_layers |
| self.d_model = d_model |
|
|
| self.use_checkpoint = use_checkpoint |
| self.use_transformer_ckpt = use_transformer_ckpt |
| |
| |
|
|
| def forward( |
| self, |
| |
| exo_features: Tensor, |
| |
| ego_features: Tensor = None, |
| ): |
| """ |
| Input: |
| - exo_features: [bs, sum(hi*wi), cc] |
| - ego_features: [bs, sum(hi*wi), cc] |
| Outpus: |
| - output: [bs, sum(hi*wi), 256] |
| """ |
| |
|
|
| |
| for layer_id, exo_layer in enumerate(self.layers): |
| |
| |
| |
| if self.fusion_layers: |
| exo_features, ego_features = self.fusion_layers[layer_id]( |
| v=exo_features, |
| l=ego_features, |
| |
| |
| ) |
|
|
| if self.ego_layers: |
| ego_features = self.ego_layers[layer_id]( |
| src=ego_features, |
| |
| |
| |
| |
| |
| ) |
|
|
| |
| exo_features = exo_layer( |
| src=exo_features, |
| |
| |
| |
| |
| |
| ) |
| |
| |
|
|
| return exo_features, ego_features |
|
|
|
|
| class AfdTransformer(nn.Module): |
| def __init__( |
| self, |
| d_model=256, |
| nhead=8, |
| num_queries=300, |
| num_encoder_layers=3, |
| dim_feedforward=2048, |
| normalize_before=False, |
| |
| num_feature_levels=1, |
| text_dropout=0.1, |
| image_dropout=0.1, |
| fusion_dropout=0.1, |
| fusion_droppath=0.0, |
| ): |
| super().__init__() |
| self.num_feature_levels = num_feature_levels |
| self.num_encoder_layers = num_encoder_layers |
| self.num_queries = num_queries |
|
|
| |
| encoder_layer = TransformerEncoderLayer( |
| d_model=d_model, |
| nhead=nhead // 2, |
| dim_feedforward=dim_feedforward // 2, |
| dropout=image_dropout, |
| ) |
|
|
| feature_fusion_layer = BiAttentionBlock( |
| v_dim=d_model, |
| l_dim=d_model, |
| embed_dim=dim_feedforward // 2, |
| num_heads=nhead // 2, |
| dropout=fusion_dropout, |
| drop_path=fusion_droppath, |
| ) |
|
|
| encoder_norm = nn.LayerNorm(d_model) if normalize_before else None |
| assert encoder_norm is None |
| self.encoder = AfdTransformerEncoder( |
| encoder_layer, |
| num_encoder_layers, |
| d_model=d_model, |
| num_queries=num_queries, |
| ctx_enhance_layer=None, |
| feature_fusion_layer=feature_fusion_layer, |
| ) |
|
|
| self.d_model = d_model |
| self.nhead = nhead |
| self.num_queries = num_queries |
| if num_feature_levels > 1: |
| if self.num_encoder_layers > 0: |
| self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model)) |
| else: |
| self.level_embed = None |
|
|
|
|
| self._reset_parameters() |
|
|
| def _reset_parameters(self): |
| for p in self.parameters(): |
| if p.dim() > 1: |
| nn.init.xavier_uniform_(p) |
| if self.num_feature_levels > 1 and self.level_embed is not None: |
| nn.init.normal_(self.level_embed) |
|
|
| def forward(self, context_tokens, object_tokens): |
| """ |
| Input: |
| - srcs: List of multi features [bs, \sum{hxw}, c] |
| |
| """ |
| |
| |
| |
| context_tokens = torch.concat([context_tokens, object_tokens], dim=1) |
| memory_ctx, memory_afd = self.encoder( |
| context_features=context_tokens, |
| object_features=object_tokens, |
| ) |
| return memory_afd |
|
|
|
|
| class AfdTransformerEncoder(nn.Module): |
| def __init__( |
| self, |
| encoder_layer, |
| num_layers, |
| d_model=256, |
| num_queries=300, |
| enc_layer_share=False, |
| ctx_enhance_layer=None, |
| feature_fusion_layer=None, |
| use_checkpoint=False, |
| use_transformer_ckpt=False, |
| ): |
| """_summary_ |
| |
| Args: |
| encoder_layer (_type_): _description_ |
| num_layers (_type_): _description_ |
| norm (_type_, optional): _description_. Defaults to None. |
| d_model (int, optional): _description_. Defaults to 256. |
| num_queries (int, optional): _description_. Defaults to 300. |
| enc_layer_share (bool, optional): _description_. Defaults to False. |
| |
| """ |
| super().__init__() |
| |
| self.layers = [] |
| self.ctx_layers = [] |
| self.fusion_layers = [] |
| if num_layers > 0: |
| self.layers = _get_clones(encoder_layer, num_layers, layer_share=enc_layer_share) |
|
|
| if ctx_enhance_layer is not None: |
| self.ctx_layers = _get_clones( |
| ctx_enhance_layer, num_layers, layer_share=enc_layer_share |
| ) |
| if feature_fusion_layer is not None: |
| self.fusion_layers = _get_clones( |
| feature_fusion_layer, num_layers, layer_share=enc_layer_share |
| ) |
| else: |
| self.layers = [] |
| del encoder_layer |
|
|
| if ctx_enhance_layer is not None: |
| self.ctx_layers = [] |
| del ctx_enhance_layer |
| if feature_fusion_layer is not None: |
| self.fusion_layers = [] |
| del feature_fusion_layer |
|
|
| self.query_scale = None |
| self.num_queries = num_queries |
| self.num_layers = num_layers |
| self.d_model = d_model |
|
|
| self.use_checkpoint = use_checkpoint |
| self.use_transformer_ckpt = use_transformer_ckpt |
| |
| |
|
|
| def forward( |
| self, |
| |
| context_features: Tensor, |
| |
| object_features: Tensor = None, |
| ): |
| """ |
| Input: |
| - exo_features: [bs, sum(hi*wi), cc] |
| - ego_features: [bs, sum(hi*wi), cc] |
| Outpus: |
| - output: [bs, sum(hi*wi), 256] |
| """ |
| |
|
|
| |
| for layer_id, obj_layer in enumerate(self.layers): |
| |
| |
| |
| if self.fusion_layers: |
| object_features, context_features = self.fusion_layers[layer_id]( |
| v=object_features, |
| l=context_features, |
| |
| |
| ) |
| |
| |
| |
| |
| |
| |
|
|
| if self.ctx_layers: |
| context_features = self.ctx_layers[layer_id]( |
| src=context_features, |
| |
| |
| |
| |
| |
| ) |
|
|
| |
| object_features = obj_layer( |
| src=object_features, |
| |
| |
| |
| |
| |
| ) |
| |
| |
|
|
| return context_features, object_features |
|
|
|
|
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
|
|
| |
| |
| |
|
|
| |
|
|
| |
|
|
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
|
|
| |
| |
|
|
| |
|
|
| |
| |
|
|
| |
|
|
| |
|
|
| |
|
|
| |
|
|
| |
|
|
| |
|
|
| |
|
|
| |
| |
|
|
| |
|
|
| |
|
|
| |
|
|
| |
| |
|
|
| |
|
|
| |
| |
|
|
| |
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
|
|
| |
| |
| |
|
|
| |
| |
|
|
| |
| |
|
|
| |
| |
| |
| |
|
|
| |
|
|
| |
| |
|
|
| |
| |
| |
|
|
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
|
|
| |
|
|
| |
|
|
| |
|
|
| |
|
|
| |
| |
| |
| |
|
|
| |
|
|
| |
|
|
| |
| |
|
|
| |
|
|
| |
|
|
| |
|
|
| |
|
|
| |
|
|
| |
| |
|
|
| |
|
|
| |
| |
| |
|
|
| |
|
|
| |
| |
|
|
| |
|
|
| |
|
|
| |
|
|
| |
| |
|
|
| |
| |
| |
| |
|
|
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
|
|
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
|
|
| |
|
|