| |
| |
| |
| |
|
|
| from typing import Optional, Tuple |
| import torch |
| from torch import Tensor |
| from torch.nn.functional import ( |
| linear, softmax, dropout, pad, |
| has_torch_function, |
| handle_torch_function, |
| _in_projection_packed, |
| ) |
| import math |
| import warnings |
|
|
|
|
| def _scaled_dot_product_attention( |
| q: Tensor, |
| k: Tensor, |
| v: Tensor, |
| attn_mask: Optional[Tensor] = None, |
| dropout_p: float = 0.0, |
| bsz: int = 1, |
| subset_heads: Optional[Tensor] = None, |
| subset_weights: Optional[Tensor] = None, |
| ) -> Tuple[Tensor, Tensor]: |
| B, Nt, E = q.shape |
| q = q / math.sqrt(E) |
| |
| |
| attn = torch.bmm(q, k.transpose(-2, -1)) |
| if attn_mask is not None: |
| attn += attn_mask |
| attn = softmax(attn, dim=-1) |
| if dropout_p > 0.0: |
| attn = dropout(attn, p=dropout_p) |
| if subset_heads is None: |
| |
| output = torch.bmm(attn, v) |
| else: |
| mixed_output = torch.bmm(attn, v).contiguous().view(bsz, -1, Nt, E) |
| output = torch.stack( |
| [mixed_output[torch.arange(bsz), subset_heads[:, col], :, :] for col in range(subset_heads.size(1))], |
| dim=1 |
| ) |
| output = output * subset_weights.unsqueeze(2).unsqueeze(3) |
| output = output.contiguous().view(-1, Nt, E) |
| if subset_heads is not None: |
| _, Nt, Ns = attn.size() |
| mixed_attn = attn.view(bsz, -1, Nt, Ns) |
| attn = torch.stack( |
| [mixed_attn[torch.arange(bsz), subset_heads[:, col], :, :] for col in range(subset_heads.size(1))], dim=1 |
| ) |
| return output, attn |
|
|
|
|
| def _in_projection( |
| q: Tensor, |
| k: Tensor, |
| v: Tensor, |
| w_q: Tensor, |
| w_k: Tensor, |
| w_v: Tensor, |
| b_q: Optional[Tensor] = None, |
| b_k: Optional[Tensor] = None, |
| b_v: Optional[Tensor] = None, |
| ) -> Tuple[Tensor, Tensor, Tensor]: |
| return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v) |
|
|
|
|
| def multi_head_attention_forward( |
| query: Tensor, |
| key: Tensor, |
| value: Tensor, |
| embed_dim_to_check: int, |
| total_num_heads: int, |
| num_heads: int, |
| in_proj_weight: Tensor, |
| in_proj_bias: Optional[Tensor], |
| bias_k: Optional[Tensor], |
| bias_v: Optional[Tensor], |
| add_zero_attn: bool, |
| dropout_p: float, |
| out_proj_weight: Tensor, |
| out_proj_bias: Optional[Tensor], |
| training: bool = True, |
| key_padding_mask: Optional[Tensor] = None, |
| need_weights: bool = True, |
| attn_mask: Optional[Tensor] = None, |
| use_separate_proj_weight: bool = False, |
| q_proj_weight: Optional[Tensor] = None, |
| k_proj_weight: Optional[Tensor] = None, |
| v_proj_weight: Optional[Tensor] = None, |
| static_k: Optional[Tensor] = None, |
| static_v: Optional[Tensor] = None, |
| subset_heads: Optional[Tensor] = None, |
| subset_weights: Optional[Tensor] = None, |
| ): |
| tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, out_proj_weight, out_proj_bias) |
| if has_torch_function(tens_ops): |
| return handle_torch_function( |
| multi_head_attention_forward, |
| tens_ops, |
| query, |
| key, |
| value, |
| embed_dim_to_check, |
| total_num_heads, |
| num_heads, |
| in_proj_weight, |
| in_proj_bias, |
| bias_k, |
| bias_v, |
| add_zero_attn, |
| dropout_p, |
| out_proj_weight, |
| out_proj_bias, |
| training=training, |
| key_padding_mask=key_padding_mask, |
| need_weights=need_weights, |
| attn_mask=attn_mask, |
| use_separate_proj_weight=use_separate_proj_weight, |
| q_proj_weight=q_proj_weight, |
| k_proj_weight=k_proj_weight, |
| v_proj_weight=v_proj_weight, |
| static_k=static_k, |
| static_v=static_v, |
| subset_heads=subset_heads, |
| subset_weights=subset_weights |
| ) |
|
|
| |
| tgt_len, bsz, embed_dim = query.shape |
| src_len, _, _ = key.shape |
| assert embed_dim == embed_dim_to_check, \ |
| f"was expecting embedding dimension of {embed_dim_to_check}, but got {embed_dim}" |
| if isinstance(embed_dim, torch.Tensor): |
| |
| head_dim = embed_dim.div(num_heads, rounding_mode='trunc') |
| else: |
| head_dim = embed_dim // num_heads |
| assert head_dim * num_heads == embed_dim, f"embed_dim {embed_dim} not divisible by num_heads {num_heads}" |
| if use_separate_proj_weight: |
| |
| assert key.shape[:2] == value.shape[:2], \ |
| f"key's sequence and batch dims {key.shape[:2]} do not match value's {value.shape[:2]}" |
| else: |
| assert key.shape == value.shape, f"key shape {key.shape} does not match value shape {value.shape}" |
|
|
| |
| |
| |
| if not use_separate_proj_weight: |
| q, k, v = _in_projection_packed(query, key, value, in_proj_weight, in_proj_bias) |
| else: |
| assert q_proj_weight is not None, "use_separate_proj_weight is True but q_proj_weight is None" |
| assert k_proj_weight is not None, "use_separate_proj_weight is True but k_proj_weight is None" |
| assert v_proj_weight is not None, "use_separate_proj_weight is True but v_proj_weight is None" |
| if in_proj_bias is None: |
| b_q = b_k = b_v = None |
| else: |
| b_q, b_k, b_v = in_proj_bias.chunk(3) |
| q, k, v = _in_projection(query, key, value, q_proj_weight, k_proj_weight, v_proj_weight, b_q, b_k, b_v) |
|
|
| |
| if attn_mask is not None: |
| if attn_mask.dtype == torch.uint8: |
| warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.") |
| attn_mask = attn_mask.to(torch.bool) |
| else: |
| assert attn_mask.is_floating_point() or attn_mask.dtype == torch.bool, \ |
| f"Only float, byte, and bool types are supported for attn_mask, not {attn_mask.dtype}" |
| |
| if attn_mask.dim() == 2: |
| correct_2d_size = (tgt_len, src_len) |
| if attn_mask.shape != correct_2d_size: |
| raise RuntimeError(f"The shape of the 2D attn_mask is {attn_mask.shape}, but should be {correct_2d_size}.") |
| attn_mask = attn_mask.unsqueeze(0) |
| elif attn_mask.dim() == 3: |
| correct_3d_size = (bsz * total_num_heads, tgt_len, src_len) |
| if attn_mask.shape != correct_3d_size: |
| raise RuntimeError(f"The shape of the 3D attn_mask is {attn_mask.shape}, but should be {correct_3d_size}.") |
| else: |
| raise RuntimeError(f"attn_mask's dimension {attn_mask.dim()} is not supported") |
|
|
| |
| if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8: |
| warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.") |
| key_padding_mask = key_padding_mask.to(torch.bool) |
|
|
| |
| if bias_k is not None and bias_v is not None: |
| assert static_k is None, "bias cannot be added to static key." |
| assert static_v is None, "bias cannot be added to static value." |
| k = torch.cat([k, bias_k.repeat(1, bsz, 1)]) |
| v = torch.cat([v, bias_v.repeat(1, bsz, 1)]) |
| if attn_mask is not None: |
| attn_mask = pad(attn_mask, (0, 1)) |
| if key_padding_mask is not None: |
| key_padding_mask = pad(key_padding_mask, (0, 1)) |
| else: |
| assert bias_k is None |
| assert bias_v is None |
|
|
| |
| |
| |
| q = q.contiguous().view(tgt_len, bsz * total_num_heads, head_dim).transpose(0, 1) |
| if static_k is None: |
| k = k.contiguous().view(k.shape[0], bsz * total_num_heads, head_dim).transpose(0, 1) |
| else: |
| |
| assert static_k.size(0) == bsz * total_num_heads, \ |
| f"expecting static_k.size(0) of {bsz * total_num_heads}, but got {static_k.size(0)}" |
| assert static_k.size(2) == head_dim, \ |
| f"expecting static_k.size(2) of {head_dim}, but got {static_k.size(2)}" |
| k = static_k |
| if static_v is None: |
| v = v.contiguous().view(v.shape[0], bsz * total_num_heads, head_dim).transpose(0, 1) |
| else: |
| |
| assert static_v.size(0) == bsz * total_num_heads, \ |
| f"expecting static_v.size(0) of {bsz * total_num_heads}, but got {static_v.size(0)}" |
| assert static_v.size(2) == head_dim, \ |
| f"expecting static_v.size(2) of {head_dim}, but got {static_v.size(2)}" |
| v = static_v |
|
|
| |
| if add_zero_attn: |
| zero_attn_shape = (bsz * total_num_heads, 1, head_dim) |
| k = torch.cat([k, torch.zeros(zero_attn_shape, dtype=k.dtype, device=k.device)], dim=1) |
| v = torch.cat([v, torch.zeros(zero_attn_shape, dtype=v.dtype, device=v.device)], dim=1) |
| if attn_mask is not None: |
| attn_mask = pad(attn_mask, (0, 1)) |
| if key_padding_mask is not None: |
| key_padding_mask = pad(key_padding_mask, (0, 1)) |
|
|
| |
| src_len = k.size(1) |
|
|
| |
| if key_padding_mask is not None: |
| assert key_padding_mask.shape == (bsz, src_len), \ |
| f"expecting key_padding_mask shape of {(bsz, src_len)}, but got {key_padding_mask.shape}" |
| key_padding_mask = key_padding_mask.view(bsz, 1, 1, src_len). \ |
| expand(-1, total_num_heads, -1, -1).reshape(bsz * total_num_heads, 1, src_len) |
| if attn_mask is None: |
| attn_mask = key_padding_mask |
| elif attn_mask.dtype == torch.bool: |
| attn_mask = attn_mask.logical_or(key_padding_mask) |
| else: |
| attn_mask = attn_mask.masked_fill(key_padding_mask, float("-inf")) |
|
|
| |
| if attn_mask is not None and attn_mask.dtype == torch.bool: |
| new_attn_mask = torch.zeros_like(attn_mask, dtype=torch.float) |
| new_attn_mask.masked_fill_(attn_mask, float("-inf")) |
| attn_mask = new_attn_mask |
|
|
| |
| if not training: |
| dropout_p = 0.0 |
|
|
| |
| |
| |
| attn_output, attn_output_weights = _scaled_dot_product_attention(q, k, v, attn_mask, dropout_p, bsz, subset_heads, subset_weights) |
| attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) |
| attn_output = linear(attn_output, out_proj_weight, out_proj_bias) |
|
|
| if need_weights: |
| |
| attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) |
| return attn_output, attn_output_weights.sum(dim=1) / num_heads |
| else: |
| return attn_output, None |
|
|