|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from mmcv.ops.multi_scale_deform_attn import multi_scale_deformable_attn_pytorch
|
|
|
import mmcv
|
|
|
import cv2 as cv
|
|
|
import copy
|
|
|
import warnings
|
|
|
from matplotlib import pyplot as plt
|
|
|
import numpy as np
|
|
|
import torch
|
|
|
import torch.nn as nn
|
|
|
import torch.nn.functional as F
|
|
|
from mmcv.cnn import xavier_init, constant_init
|
|
|
from mmcv.cnn.bricks.registry import ATTENTION, TRANSFORMER_LAYER_SEQUENCE
|
|
|
from mmcv.cnn.bricks.transformer import TransformerLayerSequence
|
|
|
import math
|
|
|
from mmcv.runner.base_module import BaseModule, ModuleList, Sequential
|
|
|
from mmcv.utils import ConfigDict, build_from_cfg, deprecated_api_warning, to_2tuple
|
|
|
|
|
|
from mmcv.utils import ext_loader
|
|
|
from .multi_scale_deformable_attn_function import (
|
|
|
MultiScaleDeformableAttnFunction_fp32,
|
|
|
MultiScaleDeformableAttnFunction_fp16,
|
|
|
)
|
|
|
|
|
|
ext_module = ext_loader.load_ext(
|
|
|
"_ext", ["ms_deform_attn_backward", "ms_deform_attn_forward"]
|
|
|
)
|
|
|
|
|
|
from mmdet3d_plugin.uniad.custom_modules.peft import (LoRALinear, ZeroAdapter, LoRACLAdapter, LoRAMoECLAdapter,
|
|
|
finetuning_detach, frozen_grad, peft_wrapper_forward, lora_wrapper)
|
|
|
|
|
|
def inverse_sigmoid(x, eps=1e-5):
|
|
|
"""Inverse function of sigmoid.
|
|
|
Args:
|
|
|
x (Tensor): The tensor to do the
|
|
|
inverse.
|
|
|
eps (float): EPS avoid numerical
|
|
|
overflow. Defaults 1e-5.
|
|
|
Returns:
|
|
|
Tensor: The x has passed the inverse
|
|
|
function of sigmoid, has same
|
|
|
shape with input.
|
|
|
"""
|
|
|
x = x.clamp(min=0, max=1)
|
|
|
x1 = x.clamp(min=eps)
|
|
|
x2 = (1 - x).clamp(min=eps)
|
|
|
return torch.log(x1 / x2)
|
|
|
|
|
|
|
|
|
@TRANSFORMER_LAYER_SEQUENCE.register_module()
|
|
|
class DetectionTransformerDecoder(TransformerLayerSequence):
|
|
|
"""Implements the decoder in DETR3D transformer.
|
|
|
Args:
|
|
|
return_intermediate (bool): Whether to return intermediate outputs.
|
|
|
coder_norm_cfg (dict): Config of last normalization layer. Default:
|
|
|
`LN`.
|
|
|
"""
|
|
|
|
|
|
def __init__(self, *args, return_intermediate=False, **kwargs):
|
|
|
super(DetectionTransformerDecoder, self).__init__(*args, **kwargs)
|
|
|
self.return_intermediate = return_intermediate
|
|
|
self.fp16_enabled = False
|
|
|
|
|
|
def forward(
|
|
|
self,
|
|
|
query,
|
|
|
*args,
|
|
|
reference_points=None,
|
|
|
reg_branches=None,
|
|
|
key_padding_mask=None,
|
|
|
**kwargs,
|
|
|
):
|
|
|
"""Forward function for `Detr3DTransformerDecoder`.
|
|
|
Args:
|
|
|
query (Tensor): Input query with shape
|
|
|
`(num_query, bs, embed_dims)`.
|
|
|
reference_points (Tensor): The reference
|
|
|
points of offset. has shape
|
|
|
(bs, num_query, 4) when as_two_stage,
|
|
|
otherwise has shape ((bs, num_query, 2).
|
|
|
reg_branch: (obj:`nn.ModuleList`): Used for
|
|
|
refining the regression results. Only would
|
|
|
be passed when with_box_refine is True,
|
|
|
otherwise would be passed a `None`.
|
|
|
Returns:
|
|
|
Tensor: Results with shape [1, num_query, bs, embed_dims] when
|
|
|
return_intermediate is `False`, otherwise it has shape
|
|
|
[num_layers, num_query, bs, embed_dims].
|
|
|
"""
|
|
|
output = query
|
|
|
intermediate = []
|
|
|
intermediate_reference_points = []
|
|
|
for lid, layer in enumerate(self.layers):
|
|
|
|
|
|
reference_points_input = reference_points[..., :2].unsqueeze(
|
|
|
2
|
|
|
)
|
|
|
output = layer(
|
|
|
output,
|
|
|
*args,
|
|
|
reference_points=reference_points_input,
|
|
|
key_padding_mask=key_padding_mask,
|
|
|
**kwargs,
|
|
|
)
|
|
|
output = output.permute(1, 0, 2)
|
|
|
|
|
|
if reg_branches is not None:
|
|
|
tmp = reg_branches[lid](output)
|
|
|
|
|
|
assert reference_points.shape[-1] == 3
|
|
|
|
|
|
new_reference_points = torch.zeros_like(reference_points)
|
|
|
new_reference_points[..., :2] = tmp[..., :2] + inverse_sigmoid(
|
|
|
reference_points[..., :2]
|
|
|
)
|
|
|
new_reference_points[..., 2:3] = tmp[..., 4:5] + inverse_sigmoid(
|
|
|
reference_points[..., 2:3]
|
|
|
)
|
|
|
|
|
|
new_reference_points = new_reference_points.sigmoid()
|
|
|
|
|
|
reference_points = new_reference_points.detach()
|
|
|
|
|
|
output = output.permute(1, 0, 2)
|
|
|
if self.return_intermediate:
|
|
|
intermediate.append(output)
|
|
|
intermediate_reference_points.append(reference_points)
|
|
|
|
|
|
if self.return_intermediate:
|
|
|
return torch.stack(intermediate), torch.stack(intermediate_reference_points)
|
|
|
|
|
|
return output, reference_points
|
|
|
|
|
|
|
|
|
@ATTENTION.register_module()
|
|
|
class CustomMSDeformableAttention(BaseModule):
|
|
|
"""An attention module used in Deformable-Detr.
|
|
|
|
|
|
`Deformable DETR: Deformable Transformers for End-to-End Object Detection.
|
|
|
<https://arxiv.org/pdf/2010.04159.pdf>`_.
|
|
|
|
|
|
Args:
|
|
|
embed_dims (int): The embedding dimension of Attention.
|
|
|
Default: 256.
|
|
|
num_heads (int): Parallel attention heads. Default: 64.
|
|
|
num_levels (int): The number of feature map used in
|
|
|
Attention. Default: 4.
|
|
|
num_points (int): The number of sampling points for
|
|
|
each query in each head. Default: 4.
|
|
|
im2col_step (int): The step used in image_to_column.
|
|
|
Default: 64.
|
|
|
dropout (float): A Dropout layer on `inp_identity`.
|
|
|
Default: 0.1.
|
|
|
batch_first (bool): Key, Query and Value are shape of
|
|
|
(batch, n, embed_dim)
|
|
|
or (n, batch, embed_dim). Default to False.
|
|
|
norm_cfg (dict): Config dict for normalization layer.
|
|
|
Default: None.
|
|
|
init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
|
|
|
Default: None.
|
|
|
"""
|
|
|
|
|
|
def __init__(
|
|
|
self,
|
|
|
embed_dims=256,
|
|
|
num_heads=8,
|
|
|
num_levels=4,
|
|
|
num_points=4,
|
|
|
im2col_step=64,
|
|
|
dropout=0.1,
|
|
|
batch_first=False,
|
|
|
norm_cfg=None,
|
|
|
init_cfg=None,
|
|
|
use_lora=False,
|
|
|
lora_rank=16,
|
|
|
lora_drop=0.
|
|
|
):
|
|
|
super().__init__(init_cfg)
|
|
|
if embed_dims % num_heads != 0:
|
|
|
raise ValueError(
|
|
|
f"embed_dims must be divisible by num_heads, "
|
|
|
f"but got {embed_dims} and {num_heads}"
|
|
|
)
|
|
|
dim_per_head = embed_dims // num_heads
|
|
|
self.norm_cfg = norm_cfg
|
|
|
self.dropout = nn.Dropout(dropout)
|
|
|
self.batch_first = batch_first
|
|
|
self.fp16_enabled = False
|
|
|
|
|
|
|
|
|
|
|
|
def _is_power_of_2(n):
|
|
|
if (not isinstance(n, int)) or (n < 0):
|
|
|
raise ValueError(
|
|
|
"invalid input for _is_power_of_2: {} (type: {})".format(n, type(n))
|
|
|
)
|
|
|
return (n & (n - 1) == 0) and n != 0
|
|
|
|
|
|
if not _is_power_of_2(dim_per_head):
|
|
|
warnings.warn(
|
|
|
"You'd better set embed_dims in "
|
|
|
"MultiScaleDeformAttention to make "
|
|
|
"the dimension of each attention head a power of 2 "
|
|
|
"which is more efficient in our CUDA implementation."
|
|
|
)
|
|
|
|
|
|
self.im2col_step = im2col_step
|
|
|
self.embed_dims = embed_dims
|
|
|
self.num_levels = num_levels
|
|
|
self.num_heads = num_heads
|
|
|
self.num_points = num_points
|
|
|
self.use_lora = use_lora
|
|
|
self.lora_rank = lora_rank
|
|
|
|
|
|
self.sampling_offsets = nn.Linear(
|
|
|
embed_dims, num_heads * num_levels * num_points * 2
|
|
|
)
|
|
|
self.attention_weights = nn.Linear(
|
|
|
embed_dims, num_heads * num_levels * num_points
|
|
|
)
|
|
|
self.value_proj = nn.Linear(embed_dims, embed_dims)
|
|
|
self.output_proj = nn.Linear(embed_dims, embed_dims)
|
|
|
|
|
|
if self.use_lora:
|
|
|
self.sampling_offsets_lora = LoRALinear(embed_dims, num_heads * num_levels * num_points * 2,
|
|
|
r=lora_rank, dropout=lora_drop)
|
|
|
self.attention_weights_lora = LoRALinear(embed_dims, num_heads * num_levels * num_points,
|
|
|
r=lora_rank, dropout=lora_drop)
|
|
|
self.value_proj_lora = LoRALinear(embed_dims, embed_dims, r=lora_rank, dropout=lora_drop)
|
|
|
self.output_proj_lora = LoRALinear(embed_dims, embed_dims, r=lora_rank, dropout=lora_drop)
|
|
|
|
|
|
self.init_weights()
|
|
|
|
|
|
def init_weights(self):
|
|
|
"""Default initialization for Parameters of Module."""
|
|
|
constant_init(self.sampling_offsets, 0.0)
|
|
|
thetas = torch.arange(self.num_heads, dtype=torch.float32) * (
|
|
|
2.0 * math.pi / self.num_heads
|
|
|
)
|
|
|
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
|
|
|
grid_init = (
|
|
|
(grid_init / grid_init.abs().max(-1, keepdim=True)[0])
|
|
|
.view(self.num_heads, 1, 1, 2)
|
|
|
.repeat(1, self.num_levels, self.num_points, 1)
|
|
|
)
|
|
|
for i in range(self.num_points):
|
|
|
grid_init[:, :, i, :] *= i + 1
|
|
|
|
|
|
self.sampling_offsets.bias.data = grid_init.view(-1)
|
|
|
constant_init(self.attention_weights, val=0.0, bias=0.0)
|
|
|
xavier_init(self.value_proj, distribution="uniform", bias=0.0)
|
|
|
xavier_init(self.output_proj, distribution="uniform", bias=0.0)
|
|
|
|
|
|
if self.use_lora:
|
|
|
finetuning_detach(self)
|
|
|
|
|
|
self._is_init = True
|
|
|
|
|
|
@deprecated_api_warning(
|
|
|
{"residual": "identity"}, cls_name="MultiScaleDeformableAttention"
|
|
|
)
|
|
|
def forward(
|
|
|
self,
|
|
|
query,
|
|
|
key=None,
|
|
|
value=None,
|
|
|
identity=None,
|
|
|
query_pos=None,
|
|
|
key_padding_mask=None,
|
|
|
reference_points=None,
|
|
|
spatial_shapes=None,
|
|
|
level_start_index=None,
|
|
|
flag="decoder",
|
|
|
**kwargs,
|
|
|
):
|
|
|
"""Forward Function of MultiScaleDeformAttention.
|
|
|
|
|
|
Args:
|
|
|
query (Tensor): Query of Transformer with shape
|
|
|
(num_query, bs, embed_dims).
|
|
|
key (Tensor): The key tensor with shape
|
|
|
`(num_key, bs, embed_dims)`.
|
|
|
value (Tensor): The value tensor with shape
|
|
|
`(num_key, bs, embed_dims)`.
|
|
|
identity (Tensor): The tensor used for addition, with the
|
|
|
same shape as `query`. Default None. If None,
|
|
|
`query` will be used.
|
|
|
query_pos (Tensor): The positional encoding for `query`.
|
|
|
Default: None.
|
|
|
key_pos (Tensor): The positional encoding for `key`. Default
|
|
|
None.
|
|
|
reference_points (Tensor): The normalized reference
|
|
|
points with shape (bs, num_query, num_levels, 2),
|
|
|
all elements is range in [0, 1], top-left (0,0),
|
|
|
bottom-right (1, 1), including padding area.
|
|
|
or (N, Length_{query}, num_levels, 4), add
|
|
|
additional two dimensions is (w, h) to
|
|
|
form reference boxes.
|
|
|
key_padding_mask (Tensor): ByteTensor for `query`, with
|
|
|
shape [bs, num_key].
|
|
|
spatial_shapes (Tensor): Spatial shape of features in
|
|
|
different levels. With shape (num_levels, 2),
|
|
|
last dimension represents (h, w).
|
|
|
level_start_index (Tensor): The start index of each level.
|
|
|
A tensor has shape ``(num_levels, )`` and can be represented
|
|
|
as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].
|
|
|
|
|
|
Returns:
|
|
|
Tensor: forwarded results with shape [num_query, bs, embed_dims].
|
|
|
"""
|
|
|
|
|
|
if value is None:
|
|
|
value = query
|
|
|
|
|
|
if identity is None:
|
|
|
identity = query
|
|
|
if query_pos is not None:
|
|
|
query = query + query_pos
|
|
|
if not self.batch_first:
|
|
|
|
|
|
query = query.permute(1, 0, 2)
|
|
|
value = value.permute(1, 0, 2)
|
|
|
|
|
|
bs, num_query, _ = query.shape
|
|
|
bs, num_value, _ = value.shape
|
|
|
assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value
|
|
|
|
|
|
if self.use_lora:
|
|
|
value = self.value_proj(value) + self.value_proj_lora(value)
|
|
|
else:
|
|
|
value = self.value_proj(value)
|
|
|
|
|
|
if key_padding_mask is not None:
|
|
|
value = value.masked_fill(key_padding_mask[..., None], 0.0)
|
|
|
value = value.view(bs, num_value, self.num_heads, -1)
|
|
|
|
|
|
sampling_offsets = self.sampling_offsets(query).view(
|
|
|
bs, num_query, self.num_heads, self.num_levels, self.num_points, 2
|
|
|
)
|
|
|
attention_weights = self.attention_weights(query).view(
|
|
|
bs, num_query, self.num_heads, self.num_levels * self.num_points
|
|
|
)
|
|
|
|
|
|
if self.use_lora:
|
|
|
sampling_offsets += self.sampling_offsets_lora(query).view(
|
|
|
bs, num_query, self.num_heads, self.num_levels, self.num_points, 2
|
|
|
)
|
|
|
attention_weights += self.attention_weights_lora(query).view(
|
|
|
bs, num_query, self.num_heads, self.num_levels * self.num_points
|
|
|
)
|
|
|
|
|
|
attention_weights = attention_weights.softmax(-1)
|
|
|
|
|
|
attention_weights = attention_weights.view(
|
|
|
bs, num_query, self.num_heads, self.num_levels, self.num_points
|
|
|
)
|
|
|
if reference_points.shape[-1] == 2:
|
|
|
offset_normalizer = torch.stack(
|
|
|
[spatial_shapes[..., 1], spatial_shapes[..., 0]], -1
|
|
|
)
|
|
|
sampling_locations = (
|
|
|
reference_points[:, :, None, :, None, :]
|
|
|
+ sampling_offsets / offset_normalizer[None, None, None, :, None, :]
|
|
|
)
|
|
|
elif reference_points.shape[-1] == 4:
|
|
|
sampling_locations = (
|
|
|
reference_points[:, :, None, :, None, :2]
|
|
|
+ sampling_offsets
|
|
|
/ self.num_points
|
|
|
* reference_points[:, :, None, :, None, 2:]
|
|
|
* 0.5
|
|
|
)
|
|
|
else:
|
|
|
raise ValueError(
|
|
|
f"Last dim of reference_points must be"
|
|
|
f" 2 or 4, but get {reference_points.shape[-1]} instead."
|
|
|
)
|
|
|
if torch.cuda.is_available() and value.is_cuda:
|
|
|
|
|
|
|
|
|
if value.dtype == torch.float16:
|
|
|
MultiScaleDeformableAttnFunction = MultiScaleDeformableAttnFunction_fp32
|
|
|
else:
|
|
|
MultiScaleDeformableAttnFunction = MultiScaleDeformableAttnFunction_fp32
|
|
|
output = MultiScaleDeformableAttnFunction.apply(
|
|
|
value,
|
|
|
spatial_shapes,
|
|
|
level_start_index,
|
|
|
sampling_locations,
|
|
|
attention_weights,
|
|
|
self.im2col_step,
|
|
|
)
|
|
|
else:
|
|
|
output = multi_scale_deformable_attn_pytorch(
|
|
|
value, spatial_shapes, sampling_locations, attention_weights
|
|
|
)
|
|
|
|
|
|
if self.use_lora:
|
|
|
output = self.output_proj(output) + self.output_proj_lora(output)
|
|
|
else:
|
|
|
output = self.output_proj(output)
|
|
|
|
|
|
if not self.batch_first:
|
|
|
|
|
|
output = output.permute(1, 0, 2)
|
|
|
|
|
|
return self.dropout(output) + identity
|
|
|
|