|
|
import warnings |
|
|
from typing import Dict, Optional, Tuple |
|
|
|
|
|
import torch |
|
|
from torch import Tensor |
|
|
from torch.nn.functional import ( |
|
|
has_torch_function, handle_torch_function, multi_head_attention_forward, pad, linear, _in_projection, _in_projection_packed) |
|
|
|
|
|
try: |
|
|
from torch.nn.functional import scaled_dot_product_attention |
|
|
except: |
|
|
from torch.nn.functional import _scaled_dot_product_attention as scaled_dot_product_attention |
|
|
|
|
|
""" |
|
|
We copy torch raw implementation here with an additional param use_direct_input, for LoRA implementation. |
|
|
""" |
|
|
def custom_multi_head_attention_forward( |
|
|
query: Tensor, |
|
|
key: Tensor, |
|
|
value: Tensor, |
|
|
embed_dim_to_check: int, |
|
|
num_heads: int, |
|
|
in_proj_weight: Tensor, |
|
|
in_proj_bias: Optional[Tensor], |
|
|
bias_k: Optional[Tensor], |
|
|
bias_v: Optional[Tensor], |
|
|
add_zero_attn: bool, |
|
|
dropout_p: float, |
|
|
out_proj_weight: Tensor, |
|
|
out_proj_bias: Optional[Tensor], |
|
|
training: bool = True, |
|
|
key_padding_mask: Optional[Tensor] = None, |
|
|
need_weights: bool = True, |
|
|
attn_mask: Optional[Tensor] = None, |
|
|
use_separate_proj_weight: bool = False, |
|
|
q_proj_weight: Optional[Tensor] = None, |
|
|
k_proj_weight: Optional[Tensor] = None, |
|
|
v_proj_weight: Optional[Tensor] = None, |
|
|
static_k: Optional[Tensor] = None, |
|
|
static_v: Optional[Tensor] = None, |
|
|
use_direct_input: bool = False, |
|
|
) -> Tuple[Tensor, Optional[Tensor]]: |
|
|
r""" |
|
|
Args: |
|
|
query, key, value: map a query and a set of key-value pairs to an output. |
|
|
See "Attention Is All You Need" for more details. |
|
|
embed_dim_to_check: total dimension of the model. |
|
|
num_heads: parallel attention heads. |
|
|
in_proj_weight, in_proj_bias: input projection weight and bias. |
|
|
bias_k, bias_v: bias of the key and value sequences to be added at dim=0. |
|
|
add_zero_attn: add a new batch of zeros to the key and |
|
|
value sequences at dim=1. |
|
|
dropout_p: probability of an element to be zeroed. |
|
|
out_proj_weight, out_proj_bias: the output projection weight and bias. |
|
|
training: apply dropout if is ``True``. |
|
|
key_padding_mask: if provided, specified padding elements in the key will |
|
|
be ignored by the attention. This is an binary mask. When the value is True, |
|
|
the corresponding value on the attention layer will be filled with -inf. |
|
|
need_weights: output attn_output_weights. |
|
|
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all |
|
|
the batches while a 3D mask allows to specify a different mask for the entries of each batch. |
|
|
use_separate_proj_weight: the function accept the proj. weights for query, key, |
|
|
and value in different forms. If false, in_proj_weight will be used, which is |
|
|
a combination of q_proj_weight, k_proj_weight, v_proj_weight. |
|
|
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias. |
|
|
static_k, static_v: static key and value used for attention operators. |
|
|
|
|
|
|
|
|
Shape: |
|
|
Inputs: |
|
|
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is |
|
|
the embedding dimension. |
|
|
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is |
|
|
the embedding dimension. |
|
|
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is |
|
|
the embedding dimension. |
|
|
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. |
|
|
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions |
|
|
will be unchanged. If a BoolTensor is provided, the positions with the |
|
|
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. |
|
|
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. |
|
|
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, |
|
|
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked |
|
|
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend |
|
|
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` |
|
|
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor |
|
|
is provided, it will be added to the attention weight. |
|
|
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, |
|
|
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. |
|
|
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, |
|
|
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. |
|
|
|
|
|
Outputs: |
|
|
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, |
|
|
E is the embedding dimension. |
|
|
- attn_output_weights: :math:`(N, L, S)` where N is the batch size, |
|
|
L is the target sequence length, S is the source sequence length. |
|
|
""" |
|
|
tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, out_proj_weight, out_proj_bias) |
|
|
if has_torch_function(tens_ops): |
|
|
return handle_torch_function( |
|
|
multi_head_attention_forward, |
|
|
tens_ops, |
|
|
query, |
|
|
key, |
|
|
value, |
|
|
embed_dim_to_check, |
|
|
num_heads, |
|
|
in_proj_weight, |
|
|
in_proj_bias, |
|
|
bias_k, |
|
|
bias_v, |
|
|
add_zero_attn, |
|
|
dropout_p, |
|
|
out_proj_weight, |
|
|
out_proj_bias, |
|
|
training=training, |
|
|
key_padding_mask=key_padding_mask, |
|
|
need_weights=need_weights, |
|
|
attn_mask=attn_mask, |
|
|
use_separate_proj_weight=use_separate_proj_weight, |
|
|
q_proj_weight=q_proj_weight, |
|
|
k_proj_weight=k_proj_weight, |
|
|
v_proj_weight=v_proj_weight, |
|
|
static_k=static_k, |
|
|
static_v=static_v, |
|
|
) |
|
|
|
|
|
|
|
|
tgt_len, bsz, embed_dim = query.shape |
|
|
src_len, _, _ = key.shape |
|
|
assert embed_dim == embed_dim_to_check, \ |
|
|
f"was expecting embedding dimension of {embed_dim_to_check}, but got {embed_dim}" |
|
|
if isinstance(embed_dim, torch.Tensor): |
|
|
|
|
|
head_dim = embed_dim.div(num_heads, rounding_mode='trunc') |
|
|
else: |
|
|
head_dim = embed_dim // num_heads |
|
|
assert head_dim * num_heads == embed_dim, f"embed_dim {embed_dim} not divisible by num_heads {num_heads}" |
|
|
if use_separate_proj_weight: |
|
|
|
|
|
assert key.shape[:2] == value.shape[:2], \ |
|
|
f"key's sequence and batch dims {key.shape[:2]} do not match value's {value.shape[:2]}" |
|
|
else: |
|
|
assert key.shape == value.shape, f"key shape {key.shape} does not match value shape {value.shape}" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if not use_separate_proj_weight: |
|
|
q, k, v = _in_projection_packed(query, key, value, in_proj_weight, in_proj_bias) |
|
|
else: |
|
|
if use_direct_input: |
|
|
q, k, v = query, key, value |
|
|
else: |
|
|
assert q_proj_weight is not None, "use_separate_proj_weight is True but q_proj_weight is None" |
|
|
assert k_proj_weight is not None, "use_separate_proj_weight is True but k_proj_weight is None" |
|
|
assert v_proj_weight is not None, "use_separate_proj_weight is True but v_proj_weight is None" |
|
|
if in_proj_bias is None: |
|
|
b_q = b_k = b_v = None |
|
|
else: |
|
|
b_q, b_k, b_v = in_proj_bias.chunk(3) |
|
|
q, k, v = _in_projection(query, key, value, q_proj_weight, k_proj_weight, v_proj_weight, b_q, b_k, b_v) |
|
|
|
|
|
|
|
|
if attn_mask is not None: |
|
|
if attn_mask.dtype == torch.uint8: |
|
|
warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.") |
|
|
attn_mask = attn_mask.to(torch.bool) |
|
|
else: |
|
|
assert attn_mask.is_floating_point() or attn_mask.dtype == torch.bool, \ |
|
|
f"Only float, byte, and bool types are supported for attn_mask, not {attn_mask.dtype}" |
|
|
|
|
|
if attn_mask.dim() == 2: |
|
|
correct_2d_size = (tgt_len, src_len) |
|
|
if attn_mask.shape != correct_2d_size: |
|
|
raise RuntimeError(f"The shape of the 2D attn_mask is {attn_mask.shape}, but should be {correct_2d_size}.") |
|
|
attn_mask = attn_mask.unsqueeze(0) |
|
|
elif attn_mask.dim() == 3: |
|
|
correct_3d_size = (bsz * num_heads, tgt_len, src_len) |
|
|
if attn_mask.shape != correct_3d_size: |
|
|
raise RuntimeError(f"The shape of the 3D attn_mask is {attn_mask.shape}, but should be {correct_3d_size}.") |
|
|
else: |
|
|
raise RuntimeError(f"attn_mask's dimension {attn_mask.dim()} is not supported") |
|
|
|
|
|
|
|
|
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8: |
|
|
warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.") |
|
|
key_padding_mask = key_padding_mask.to(torch.bool) |
|
|
|
|
|
|
|
|
if bias_k is not None and bias_v is not None: |
|
|
assert static_k is None, "bias cannot be added to static key." |
|
|
assert static_v is None, "bias cannot be added to static value." |
|
|
k = torch.cat([k, bias_k.repeat(1, bsz, 1)]) |
|
|
v = torch.cat([v, bias_v.repeat(1, bsz, 1)]) |
|
|
if attn_mask is not None: |
|
|
attn_mask = pad(attn_mask, (0, 1)) |
|
|
if key_padding_mask is not None: |
|
|
key_padding_mask = pad(key_padding_mask, (0, 1)) |
|
|
else: |
|
|
assert bias_k is None |
|
|
assert bias_v is None |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1) |
|
|
if static_k is None: |
|
|
k = k.contiguous().view(k.shape[0], bsz * num_heads, head_dim).transpose(0, 1) |
|
|
else: |
|
|
|
|
|
assert static_k.size(0) == bsz * num_heads, \ |
|
|
f"expecting static_k.size(0) of {bsz * num_heads}, but got {static_k.size(0)}" |
|
|
assert static_k.size(2) == head_dim, \ |
|
|
f"expecting static_k.size(2) of {head_dim}, but got {static_k.size(2)}" |
|
|
k = static_k |
|
|
if static_v is None: |
|
|
v = v.contiguous().view(v.shape[0], bsz * num_heads, head_dim).transpose(0, 1) |
|
|
else: |
|
|
|
|
|
assert static_v.size(0) == bsz * num_heads, \ |
|
|
f"expecting static_v.size(0) of {bsz * num_heads}, but got {static_v.size(0)}" |
|
|
assert static_v.size(2) == head_dim, \ |
|
|
f"expecting static_v.size(2) of {head_dim}, but got {static_v.size(2)}" |
|
|
v = static_v |
|
|
|
|
|
|
|
|
if add_zero_attn: |
|
|
zero_attn_shape = (bsz * num_heads, 1, head_dim) |
|
|
k = torch.cat([k, torch.zeros(zero_attn_shape, dtype=k.dtype, device=k.device)], dim=1) |
|
|
v = torch.cat([v, torch.zeros(zero_attn_shape, dtype=v.dtype, device=v.device)], dim=1) |
|
|
if attn_mask is not None: |
|
|
attn_mask = pad(attn_mask, (0, 1)) |
|
|
if key_padding_mask is not None: |
|
|
key_padding_mask = pad(key_padding_mask, (0, 1)) |
|
|
|
|
|
|
|
|
src_len = k.size(1) |
|
|
|
|
|
|
|
|
if key_padding_mask is not None: |
|
|
assert key_padding_mask.shape == (bsz, src_len), \ |
|
|
f"expecting key_padding_mask shape of {(bsz, src_len)}, but got {key_padding_mask.shape}" |
|
|
key_padding_mask = key_padding_mask.view(bsz, 1, 1, src_len). \ |
|
|
expand(-1, num_heads, -1, -1).reshape(bsz * num_heads, 1, src_len) |
|
|
if attn_mask is None: |
|
|
attn_mask = key_padding_mask |
|
|
elif attn_mask.dtype == torch.bool: |
|
|
attn_mask = attn_mask.logical_or(key_padding_mask) |
|
|
else: |
|
|
attn_mask = attn_mask.masked_fill(key_padding_mask, float("-inf")) |
|
|
|
|
|
|
|
|
if attn_mask is not None and attn_mask.dtype == torch.bool: |
|
|
new_attn_mask = torch.zeros_like(attn_mask, dtype=torch.float) |
|
|
new_attn_mask.masked_fill_(attn_mask, float("-inf")) |
|
|
attn_mask = new_attn_mask |
|
|
|
|
|
|
|
|
if not training: |
|
|
dropout_p = 0.0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
attn_output, attn_output_weights = scaled_dot_product_attention(q, k, v, attn_mask, dropout_p) |
|
|
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) |
|
|
attn_output = linear(attn_output, out_proj_weight, out_proj_bias) |
|
|
|
|
|
if need_weights: |
|
|
|
|
|
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) |
|
|
return attn_output, attn_output_weights.sum(dim=1) / num_heads |
|
|
else: |
|
|
return attn_output, None |
|
|
|