Sentence Similarity
Transformers
Safetensors
English
mistral
feature-extraction
text-embedding
embeddings
information-retrieval
beir
text-classification
language-model
text-clustering
text-semantic-similarity
text-evaluation
text-reranking
Sentence Similarity
natural_questions
ms_marco
fever
hotpot_qa
mteb
custom_code
text-generation-inference
text-embeddings-inference
Create attn_mask_utils.py
Browse files- attn_mask_utils.py +202 -0
attn_mask_utils.py
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Optional, Tuple, Union
|
| 2 |
+
import torch
|
| 3 |
+
from transformers.modeling_attn_mask_utils import AttentionMaskConverter
|
| 4 |
+
|
| 5 |
+
def _prepare_4d_attention_mask_for_sdpa(
|
| 6 |
+
attention_mask: Optional[torch.Tensor],
|
| 7 |
+
input_shape: Union[torch.Size, Tuple, List],
|
| 8 |
+
inputs_embeds: torch.Tensor,
|
| 9 |
+
past_key_values_length: int,
|
| 10 |
+
sliding_window: Optional[int] = None,
|
| 11 |
+
):
|
| 12 |
+
attn_mask_converter = AttentionMaskConverter(is_causal=False, sliding_window=sliding_window)
|
| 13 |
+
|
| 14 |
+
key_value_length = input_shape[-1] + past_key_values_length
|
| 15 |
+
batch_size, query_length = input_shape
|
| 16 |
+
|
| 17 |
+
# torch.jit.trace and torchdynamo with fullgraph=True are unable to capture the controlflow `is_causal=attention_mask is None and q_len > 1`
|
| 18 |
+
# used as an SDPA argument. We keep compatibility with these tracing tools by always using SDPA's `attn_mask` argument in case we are tracing.
|
| 19 |
+
# TODO: Fix this as well when using torchdynamo with fullgraph=True.
|
| 20 |
+
is_tracing = torch.jit.is_tracing()
|
| 21 |
+
|
| 22 |
+
if attention_mask is not None:
|
| 23 |
+
if torch.all(attention_mask == 1):
|
| 24 |
+
if is_tracing:
|
| 25 |
+
pass
|
| 26 |
+
elif query_length == 1:
|
| 27 |
+
# For query_length == 1, causal attention and bi-directional attention are the same.
|
| 28 |
+
attention_mask = None
|
| 29 |
+
elif key_value_length == query_length:
|
| 30 |
+
attention_mask = None
|
| 31 |
+
else:
|
| 32 |
+
# Unfortunately, for query_length > 1 and key_value_length != query_length, we cannot generally ignore the attention mask, as SDPA causal mask generation
|
| 33 |
+
# may be wrong. We will set `is_causal=False` in SDPA and rely on Transformers attention_mask instead, hence not setting it to None here.
|
| 34 |
+
# Reference: https://github.com/pytorch/pytorch/issues/108108
|
| 35 |
+
pass
|
| 36 |
+
elif query_length > 1 and key_value_length != query_length:
|
| 37 |
+
# See the comment above (https://github.com/pytorch/pytorch/issues/108108).
|
| 38 |
+
# Ugly: we set it to True here to dispatch in the following controlflow to `to_causal_4d`.
|
| 39 |
+
attention_mask = True
|
| 40 |
+
elif is_tracing:
|
| 41 |
+
raise ValueError(
|
| 42 |
+
'Attention using SDPA can not be traced with torch.jit.trace when no attention_mask is provided. To solve this issue, please either load your model with the argument `attn_implementation="eager"` or pass an attention_mask input when tracing the model.'
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
if attention_mask is None:
|
| 46 |
+
expanded_4d_mask = None
|
| 47 |
+
elif attention_mask is True:
|
| 48 |
+
expanded_4d_mask = attn_mask_converter.to_causal_4d(
|
| 49 |
+
input_shape[0], input_shape[-1], key_value_length, dtype=inputs_embeds.dtype, device=inputs_embeds.device
|
| 50 |
+
)
|
| 51 |
+
else:
|
| 52 |
+
expanded_4d_mask = attn_mask_converter.to_4d(
|
| 53 |
+
attention_mask,
|
| 54 |
+
input_shape[-1],
|
| 55 |
+
dtype=inputs_embeds.dtype,
|
| 56 |
+
key_value_length=key_value_length,
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
# From PyTorch 2.1 onwards, F.scaled_dot_product_attention with the memory-efficient attention backend
|
| 60 |
+
# produces nans if sequences are completely unattended in the attention mask. Details: https://github.com/pytorch/pytorch/issues/110213
|
| 61 |
+
if query_length > 1:
|
| 62 |
+
expanded_4d_mask = AttentionMaskConverter._unmask_unattended(
|
| 63 |
+
expanded_4d_mask, attention_mask, unmasked_value=0.0
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
return expanded_4d_mask
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def _prepare_4d_attention_mask(
|
| 70 |
+
attention_mask: Optional[torch.Tensor],
|
| 71 |
+
input_shape: Union[torch.Size, Tuple, List],
|
| 72 |
+
inputs_embeds: torch.Tensor,
|
| 73 |
+
past_key_values_length: int,
|
| 74 |
+
sliding_window: Optional[int] = None,
|
| 75 |
+
):
|
| 76 |
+
attn_mask_converter = AttentionMaskConverter(is_causal=False, sliding_window=sliding_window)
|
| 77 |
+
|
| 78 |
+
key_value_length = input_shape[-1] + past_key_values_length
|
| 79 |
+
|
| 80 |
+
# 4d mask is passed through the layers
|
| 81 |
+
if attention_mask is not None:
|
| 82 |
+
attention_mask = attn_mask_converter.to_4d(
|
| 83 |
+
attention_mask, input_shape[-1], key_value_length=key_value_length, dtype=inputs_embeds.dtype
|
| 84 |
+
)
|
| 85 |
+
else:
|
| 86 |
+
attention_mask = attn_mask_converter.to_causal_4d(
|
| 87 |
+
input_shape[0], input_shape[-1], key_value_length, dtype=inputs_embeds.dtype, device=inputs_embeds.device
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
return attention_mask
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def _prepare_4d_causal_attention_mask(
|
| 94 |
+
attention_mask: Optional[torch.Tensor],
|
| 95 |
+
input_shape: Union[torch.Size, Tuple, List],
|
| 96 |
+
inputs_embeds: torch.Tensor,
|
| 97 |
+
past_key_values_length: int,
|
| 98 |
+
sliding_window: Optional[int] = None,
|
| 99 |
+
):
|
| 100 |
+
attn_mask_converter = AttentionMaskConverter(is_causal=False, sliding_window=sliding_window)
|
| 101 |
+
|
| 102 |
+
key_value_length = input_shape[-1] + past_key_values_length
|
| 103 |
+
|
| 104 |
+
# 4d mask is passed through the layers
|
| 105 |
+
if attention_mask is not None:
|
| 106 |
+
attention_mask = attn_mask_converter.to_4d(
|
| 107 |
+
attention_mask, input_shape[-1], key_value_length=key_value_length, dtype=inputs_embeds.dtype
|
| 108 |
+
)
|
| 109 |
+
else:
|
| 110 |
+
attention_mask = attn_mask_converter.to_causal_4d(
|
| 111 |
+
input_shape[0], input_shape[-1], key_value_length, dtype=inputs_embeds.dtype, device=inputs_embeds.device
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
return attention_mask
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def _prepare_4d_causal_attention_mask_for_sdpa(
|
| 118 |
+
attention_mask: Optional[torch.Tensor],
|
| 119 |
+
input_shape: Union[torch.Size, Tuple, List],
|
| 120 |
+
inputs_embeds: torch.Tensor,
|
| 121 |
+
past_key_values_length: int,
|
| 122 |
+
sliding_window: Optional[int] = None,
|
| 123 |
+
):
|
| 124 |
+
"""
|
| 125 |
+
Prepares the correct `attn_mask` argument to be used by `torch.nn.functional.scaled_dot_product_attention`.
|
| 126 |
+
|
| 127 |
+
In case no token is masked in the `attention_mask` argument, we simply set it to `None` for the cases `query_length == 1` and
|
| 128 |
+
`key_value_length == query_length`, and rely instead on SDPA `is_causal` argument to use causal/non-causal masks,
|
| 129 |
+
allowing to dispatch to the flash attention kernel (that can otherwise not be used if a custom `attn_mask` is passed).
|
| 130 |
+
"""
|
| 131 |
+
attn_mask_converter = AttentionMaskConverter(is_causal=False, sliding_window=sliding_window)
|
| 132 |
+
|
| 133 |
+
key_value_length = input_shape[-1] + past_key_values_length
|
| 134 |
+
batch_size, query_length = input_shape
|
| 135 |
+
|
| 136 |
+
# torch.jit.trace, symbolic_trace and torchdynamo with fullgraph=True are unable to capture the controlflow `is_causal=attention_mask is None and q_len > 1`
|
| 137 |
+
# used as an SDPA argument. We keep compatibility with these tracing tools by always using SDPA's `attn_mask` argument in case we are tracing.
|
| 138 |
+
# TODO: Fix this as well when using torchdynamo with fullgraph=True.
|
| 139 |
+
is_tracing = torch.jit.is_tracing() or isinstance(inputs_embeds, torch.fx.Proxy)
|
| 140 |
+
|
| 141 |
+
if attention_mask is not None:
|
| 142 |
+
# 4d mask is passed through
|
| 143 |
+
if len(attention_mask.shape) == 4:
|
| 144 |
+
expected_shape = (input_shape[0], 1, input_shape[1], key_value_length)
|
| 145 |
+
if tuple(attention_mask.shape) != expected_shape:
|
| 146 |
+
raise ValueError(
|
| 147 |
+
f"Incorrect 4D attention_mask shape: {tuple(attention_mask.shape)}; expected: {expected_shape}."
|
| 148 |
+
)
|
| 149 |
+
else:
|
| 150 |
+
# if the 4D mask has correct shape - invert it and fill with negative infinity
|
| 151 |
+
inverted_mask = 1.0 - attention_mask.to(inputs_embeds.dtype)
|
| 152 |
+
attention_mask = inverted_mask.masked_fill(
|
| 153 |
+
inverted_mask.to(torch.bool), torch.finfo(inputs_embeds.dtype).min
|
| 154 |
+
)
|
| 155 |
+
return attention_mask
|
| 156 |
+
|
| 157 |
+
elif not is_tracing and torch.all(attention_mask == 1):
|
| 158 |
+
if query_length == 1:
|
| 159 |
+
# For query_length == 1, causal attention and bi-directional attention are the same.
|
| 160 |
+
attention_mask = None
|
| 161 |
+
elif key_value_length == query_length:
|
| 162 |
+
attention_mask = None
|
| 163 |
+
else:
|
| 164 |
+
# Unfortunately, for query_length > 1 and key_value_length != query_length, we cannot generally ignore the attention mask, as SDPA causal mask generation
|
| 165 |
+
# may be wrong. We will set `is_causal=False` in SDPA and rely on Transformers attention_mask instead, hence not setting it to None here.
|
| 166 |
+
# Reference: https://github.com/pytorch/pytorch/issues/108108
|
| 167 |
+
pass
|
| 168 |
+
elif query_length > 1 and key_value_length != query_length:
|
| 169 |
+
# See the comment above (https://github.com/pytorch/pytorch/issues/108108).
|
| 170 |
+
# Ugly: we set it to True here to dispatch in the following controlflow to `to_causal_4d`.
|
| 171 |
+
attention_mask = True
|
| 172 |
+
elif is_tracing:
|
| 173 |
+
raise ValueError(
|
| 174 |
+
'Attention using SDPA can not be traced with torch.jit.trace when no attention_mask is provided. To solve this issue, please either load your model with the argument `attn_implementation="eager"` or pass an attention_mask input when tracing the model.'
|
| 175 |
+
)
|
| 176 |
+
|
| 177 |
+
if attention_mask is None:
|
| 178 |
+
expanded_4d_mask = None
|
| 179 |
+
elif attention_mask is True:
|
| 180 |
+
expanded_4d_mask = attn_mask_converter.to_causal_4d(
|
| 181 |
+
input_shape[0], input_shape[-1], key_value_length, dtype=inputs_embeds.dtype, device=inputs_embeds.device
|
| 182 |
+
)
|
| 183 |
+
else:
|
| 184 |
+
expanded_4d_mask = attn_mask_converter.to_4d(
|
| 185 |
+
attention_mask,
|
| 186 |
+
input_shape[-1],
|
| 187 |
+
dtype=inputs_embeds.dtype,
|
| 188 |
+
key_value_length=key_value_length,
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
# From PyTorch 2.1 onwards, F.scaled_dot_product_attention with the memory-efficient attention backend
|
| 192 |
+
# produces nans if sequences are completely unattended in the attention mask. Details: https://github.com/pytorch/pytorch/issues/110213
|
| 193 |
+
#
|
| 194 |
+
# This fix is not applied in case we are tracing with torch.jit.trace or symbolic_trace, as _unmask_unattended has a data-dependent
|
| 195 |
+
# controlflow that can not be captured properly.
|
| 196 |
+
# TODO: _unmask_unattended does not work either with torch.compile when using fullgraph=True. We should find a way to detect this case.
|
| 197 |
+
if query_length > 1 and not is_tracing:
|
| 198 |
+
expanded_4d_mask = AttentionMaskConverter._unmask_unattended(
|
| 199 |
+
expanded_4d_mask, attention_mask, unmasked_value=0.0
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
return expanded_4d_mask
|