| import pdb
|
| import cv2
|
| import numpy as np
|
| import torch
|
| from torch.nn.functional import linear, pad
|
| from torch import Tensor
|
| from torch.nn import MultiheadAttention
|
|
|
| from typing import Optional, Tuple, List
|
| import warnings
|
| import torch.nn.functional as F
|
| import torchvision
|
|
|
| import torch
|
|
|
| def scale_to_target_range(source_tensor, target_tensor):
|
| """
|
| 将 source_tensor 的数据放缩到 target_tensor 的均值和方差范围
|
| :param source_tensor: 需要放缩的 Tensor
|
| :param target_tensor: 目标 Tensor, 提供均值和方差
|
| :return: 放缩后的 Tensor
|
| """
|
|
|
|
|
| target_mean = target_tensor.mean() / 5
|
| target_std = target_tensor.std() / 5
|
|
|
|
|
| source_mean = source_tensor.mean()
|
| source_std = source_tensor.std()
|
|
|
|
|
| source_normalized = (source_tensor - source_mean) / source_std
|
|
|
|
|
| scaled_tensor = source_normalized * target_std + target_mean
|
|
|
| return scaled_tensor
|
|
|
|
|
|
|
| def multi_head_attention_forward(
|
| query: Tensor,
|
| key: Tensor,
|
| value: Tensor,
|
| embed_dim_to_check: int,
|
| num_heads: int,
|
| in_proj_weight: Tensor,
|
| in_proj_bias: Tensor,
|
| bias_k: Optional[Tensor],
|
| bias_v: Optional[Tensor],
|
| add_zero_attn: bool,
|
| dropout_p: float,
|
| out_proj_weight: Tensor,
|
| out_proj_bias: Tensor,
|
| training: bool = True,
|
| key_padding_mask: Optional[Tensor] = None,
|
| need_weights: bool = True,
|
| attn_mask: Optional[Tensor] = None,
|
| use_separate_proj_weight: bool = False,
|
| q_proj_weight: Optional[Tensor] = None,
|
| k_proj_weight: Optional[Tensor] = None,
|
| v_proj_weight: Optional[Tensor] = None,
|
| static_k: Optional[Tensor] = None,
|
| static_v: Optional[Tensor] = None,
|
| gaussian: Optional[Tensor] = None,
|
| idx = -1,
|
| images = None,
|
| img_path = None
|
| ) -> Tuple[Tensor, Optional[Tensor]]:
|
| r"""
|
| Args:
|
| query, key, value: map a query and a set of key-value pairs to an output.
|
| See "Attention Is All You Need" for more details.
|
| embed_dim_to_check: total dimension of the model.
|
| num_heads: parallel attention heads.
|
| in_proj_weight, in_proj_bias: input projection weight and bias.
|
| bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
|
| add_zero_attn: add a new batch of zeros to the key and
|
| value sequences at dim=1.
|
| dropout_p: probability of an element to be zeroed.
|
| out_proj_weight, out_proj_bias: the output projection weight and bias.
|
| training: apply dropout if is ``True``.
|
| key_padding_mask: if provided, specified padding elements in the key will
|
| be ignored by the attention. This is an binary mask. When the value is True,
|
| the corresponding value on the attention layer will be filled with -inf.
|
| need_weights: output attn_output_weights.
|
| attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
|
| the batches while a 3D mask allows to specify a different mask for the entries of each batch.
|
| use_separate_proj_weight: the function accept the proj. weights for query, key,
|
| and value in different forms. If false, in_proj_weight will be used, which is
|
| a combination of q_proj_weight, k_proj_weight, v_proj_weight.
|
| q_proj_weight, k_proj_weight, v_proj_weight: input projection weight and bias.
|
| static_k, static_v: static key and value used for attention operators.
|
| gaussian: the generated Gaussian-like weight map
|
| Shape:
|
| Inputs:
|
| - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
|
| the embedding dimension.
|
| - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
|
| the embedding dimension.
|
| - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
|
| the embedding dimension.
|
| - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
|
| If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
|
| will be unchanged. If a BoolTensor is provided, the positions with the
|
| value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
|
| - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
|
| 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
|
| S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
|
| positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
|
| while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
|
| are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
|
| is provided, it will be added to the attention weight.
|
| - static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
|
| N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
|
| - static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
|
| N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
|
| Outputs:
|
| - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
|
| E is the embedding dimension.
|
| - attn_output_weights: :math:`(N, L, S)` where N is the batch size,
|
| L is the target sequence length, S is the source sequence length.
|
| """
|
| '''
|
| query.shape: [5, bs, hidden_dim]
|
| key.shape, value.shape: [1024, bs, hidden_dim]
|
| embed_dim_to_check: hidden_dim
|
| '''
|
| tgt_len, bsz, embed_dim = query.size()
|
| assert embed_dim == embed_dim_to_check
|
|
|
| assert key.size(0) == value.size(0) and key.size(1) == value.size(1)
|
|
|
| head_dim = embed_dim // num_heads
|
| assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
|
| scaling = float(head_dim) ** -0.5
|
|
|
| if not use_separate_proj_weight:
|
| if torch.equal(query, key) and torch.equal(key, value):
|
|
|
| q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1)
|
|
|
| elif torch.equal(key, value):
|
|
|
|
|
| _b = in_proj_bias
|
| _start = 0
|
| _end = embed_dim
|
| _w = in_proj_weight[_start:_end, :]
|
| if _b is not None:
|
| _b = _b[_start:_end]
|
| q = linear(query, _w, _b)
|
|
|
| if key is None:
|
| assert value is None
|
| k = None
|
| v = None
|
| else:
|
|
|
| _b = in_proj_bias
|
| _start = embed_dim
|
| _end = None
|
| _w = in_proj_weight[_start:, :]
|
| if _b is not None:
|
| _b = _b[_start:]
|
| k, v = linear(key, _w, _b).chunk(2, dim=-1)
|
| else:
|
|
|
| _b = in_proj_bias
|
| _start = 0
|
| _end = embed_dim
|
| _w = in_proj_weight[_start:_end, :]
|
| if _b is not None:
|
| _b = _b[_start:_end]
|
| q = linear(query, _w, _b)
|
|
|
|
|
| _b = in_proj_bias
|
| _start = embed_dim
|
| _end = embed_dim * 2
|
| _w = in_proj_weight[_start:_end, :]
|
| if _b is not None:
|
| _b = _b[_start:_end]
|
| k = linear(key, _w, _b)
|
|
|
|
|
| _b = in_proj_bias
|
| _start = embed_dim * 2
|
| _end = None
|
| _w = in_proj_weight[_start:, :]
|
| if _b is not None:
|
| _b = _b[_start:]
|
| v = linear(value, _w, _b)
|
| else:
|
| q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight)
|
| len1, len2 = q_proj_weight_non_opt.size()
|
| assert len1 == embed_dim and len2 == query.size(-1)
|
|
|
| k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight)
|
| len1, len2 = k_proj_weight_non_opt.size()
|
| assert len1 == embed_dim and len2 == key.size(-1)
|
|
|
| v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight)
|
| len1, len2 = v_proj_weight_non_opt.size()
|
| assert len1 == embed_dim and len2 == value.size(-1)
|
|
|
| if in_proj_bias is not None:
|
| q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim])
|
| k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)])
|
| v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):])
|
| else:
|
| q = linear(query, q_proj_weight_non_opt, in_proj_bias)
|
| k = linear(key, k_proj_weight_non_opt, in_proj_bias)
|
| v = linear(value, v_proj_weight_non_opt, in_proj_bias)
|
| q = q * scaling
|
|
|
| if attn_mask is not None:
|
| assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \
|
| attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \
|
| 'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype)
|
| if attn_mask.dtype == torch.uint8:
|
| warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
|
| attn_mask = attn_mask.to(torch.bool)
|
|
|
| if attn_mask.dim() == 2:
|
| attn_mask = attn_mask.unsqueeze(0)
|
| if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
|
| raise RuntimeError('The size of the 2D attn_mask is not correct.')
|
| elif attn_mask.dim() == 3:
|
| if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]:
|
| raise RuntimeError('The size of the 3D attn_mask is not correct.')
|
| else:
|
| raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim()))
|
|
|
|
|
|
|
| if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
|
| warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
|
| key_padding_mask = key_padding_mask.to(torch.bool)
|
|
|
| if bias_k is not None and bias_v is not None:
|
| if static_k is None and static_v is None:
|
| k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
|
| v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
|
| if attn_mask is not None:
|
| attn_mask = pad(attn_mask, (0, 1))
|
| if key_padding_mask is not None:
|
| key_padding_mask = pad(key_padding_mask, (0, 1))
|
| else:
|
| assert static_k is None, "bias cannot be added to static key."
|
| assert static_v is None, "bias cannot be added to static value."
|
| else:
|
| assert bias_k is None
|
| assert bias_v is None
|
|
|
| q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
|
| if k is not None:
|
| k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
|
| if v is not None:
|
| v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
|
|
|
| if static_k is not None:
|
| assert static_k.size(0) == bsz * num_heads
|
| assert static_k.size(2) == head_dim
|
| k = static_k
|
|
|
| if static_v is not None:
|
| assert static_v.size(0) == bsz * num_heads
|
| assert static_v.size(2) == head_dim
|
| v = static_v
|
|
|
| src_len = k.size(1)
|
|
|
| if key_padding_mask is not None:
|
| assert key_padding_mask.size(0) == bsz
|
| assert key_padding_mask.size(1) == src_len
|
|
|
| if add_zero_attn:
|
| src_len += 1
|
| k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1)
|
| v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1)
|
| if attn_mask is not None:
|
| attn_mask = pad(attn_mask, (0, 1))
|
| if key_padding_mask is not None:
|
| key_padding_mask = pad(key_padding_mask, (0, 1))
|
| naive = True
|
| if naive:
|
| attn_output_weights = torch.bmm(q, k.transpose(1, 2))
|
| assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]
|
|
|
| if attn_mask is not None:
|
| if attn_mask.dtype == torch.bool:
|
| attn_output_weights.masked_fill_(attn_mask, float('-inf'))
|
| else:
|
| attn_output_weights += attn_mask
|
|
|
| if key_padding_mask is not None:
|
| attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
|
| attn_output_weights = attn_output_weights.masked_fill(
|
| key_padding_mask.unsqueeze(1).unsqueeze(2), float('-inf'),)
|
| attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len)
|
|
|
|
|
|
|
| delta = gaussian[0].permute(2, 0, 1)
|
| delta = scale_to_target_range(delta, attn_output_weights)
|
|
|
|
|
| attn_output_weights = attn_output_weights + delta
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| attn_output_weights = torch.nn.functional.softmax(attn_output_weights, dim=-1)
|
| attn_output_weights = torch.nn.functional.dropout(attn_output_weights, p=dropout_p,
|
| training=training)
|
|
|
| attn_output = torch.bmm(attn_output_weights, v)
|
| assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim]
|
|
|
| attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
|
| attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
|
|
|
| return attn_output, attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
|
|
|
|
|
| class GaussianMultiheadAttention(MultiheadAttention):
|
| def __init__(self, embed_dim, num_heads, **kwargs):
|
| super(GaussianMultiheadAttention, self).__init__(embed_dim, num_heads, **kwargs)
|
| self.gaussian = True
|
|
|
| def forward(self, query, key, value, key_padding_mask=None,
|
| need_weights=False, attn_mask=None, gaussian=None, idx = -1, images=None, img_path = None):
|
|
|
| r"""
|
| Args:
|
| query, key, value: map a query and a set of key-value pairs to an output.
|
| See "Attention Is All You Need" for more details.
|
| key_padding_mask: if provided, specified padding elements in the key will
|
| be ignored by the attention. When given a binary mask and a value is True,
|
| the corresponding value on the attention layer will be ignored. When given
|
| a byte mask and a value is non-zero, the corresponding value on the attention
|
| layer will be ignored
|
| need_weights: output attn_output_weights.
|
| attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
|
| the batches while a 3D mask allows to specify a different mask for the entries of each batch.
|
| gaussian: 2D gaussian attention map that focus attention to certain object queries' initial estimations
|
| with handcrafted query spatial priors.
|
|
|
| Shape:
|
| - Inputs:
|
| - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
|
| the embedding dimension.
|
| - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
|
| the embedding dimension.
|
| - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
|
| the embedding dimension.
|
| - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
|
| If a ByteTensor is provided, the non-zero positions will be ignored while the position
|
| with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the
|
| value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
|
| - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
|
| 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
|
| S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked
|
| positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
|
| while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
|
| is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
|
| is provided, it will be added to the attention weight.
|
| - gaussian: :math:`(L, S, nhead * batch_size)`, where nhead is the number of head in multi-head
|
| attention module, L is the target sequence length, S is the source sequence length.
|
|
|
| - Outputs:
|
| - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
|
| E is the embedding dimension.
|
| - attn_output_weights: :math:`(N, L, S)` where N is the batch size,
|
| L is the target sequence length, S is the source sequence length.
|
| """
|
| if not self._qkv_same_embed_dim:
|
| return multi_head_attention_forward(
|
| query, key, value, self.embed_dim, self.num_heads,
|
| self.in_proj_weight, self.in_proj_bias,
|
| self.bias_k, self.bias_v, self.add_zero_attn,
|
| self.dropout, self.out_proj.weight, self.out_proj.bias,
|
| training=self.training,
|
| key_padding_mask=key_padding_mask, need_weights=need_weights,
|
| attn_mask=attn_mask, use_separate_proj_weight=True,
|
| q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
|
| v_proj_weight=self.v_proj_weight, gaussian=gaussian)
|
| else:
|
| return multi_head_attention_forward(
|
| query, key, value, self.embed_dim, self.num_heads,
|
| self.in_proj_weight, self.in_proj_bias,
|
| self.bias_k, self.bias_v, self.add_zero_attn,
|
| self.dropout, self.out_proj.weight, self.out_proj.bias,
|
| training=self.training,
|
| key_padding_mask=key_padding_mask, need_weights=need_weights,
|
| attn_mask=attn_mask, gaussian=gaussian, idx = idx, images=images, img_path = img_path)
|
|
|
|
|
|
|