Lanni-ni commited on
Commit
9ce24fa
·
verified ·
1 Parent(s): a17c67a

add remote code + model files

Browse files
Files changed (28) hide show
  1. .ipynb_checkpoints/configuration_transformer-checkpoint.py +62 -0
  2. .ipynb_checkpoints/modeling_transformer-checkpoint.py +621 -0
  3. __init__.py +1 -0
  4. __pycache__/__init__.cpython-310.pyc +0 -0
  5. __pycache__/configuration_transformer.cpython-310.pyc +0 -0
  6. __pycache__/modeling_transformer.cpython-310.pyc +0 -0
  7. configuration_transformer.py +62 -0
  8. modeling_transformer.py +621 -0
  9. ops/.ipynb_checkpoints/forgetting_attention-checkpoint.py +1138 -0
  10. ops/.ipynb_checkpoints/forgetting_attention_std-checkpoint.py +72 -0
  11. ops/.ipynb_checkpoints/geometric_attention_std-checkpoint.py +179 -0
  12. ops/.ipynb_checkpoints/sliding_window_attention_std-checkpoint.py +88 -0
  13. ops/.ipynb_checkpoints/stickbreaking_attention_std-checkpoint.py +117 -0
  14. ops/.ipynb_checkpoints/vanilla_attention_std-checkpoint.py +171 -0
  15. ops/__init__.py +0 -0
  16. ops/__pycache__/__init__.cpython-310.pyc +0 -0
  17. ops/__pycache__/forgetting_attention.cpython-310.pyc +0 -0
  18. ops/__pycache__/forgetting_attention_std.cpython-310.pyc +0 -0
  19. ops/__pycache__/geometric_attention_std.cpython-310.pyc +0 -0
  20. ops/__pycache__/sliding_window_attention_std.cpython-310.pyc +0 -0
  21. ops/__pycache__/stickbreaking_attention_std.cpython-310.pyc +0 -0
  22. ops/__pycache__/vanilla_attention_std.cpython-310.pyc +0 -0
  23. ops/forgetting_attention.py +1138 -0
  24. ops/forgetting_attention_std.py +72 -0
  25. ops/geometric_attention_std.py +179 -0
  26. ops/sliding_window_attention_std.py +88 -0
  27. ops/stickbreaking_attention_std.py +46 -0
  28. ops/vanilla_attention_std.py +171 -0
.ipynb_checkpoints/configuration_transformer-checkpoint.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ from typing import Optional
3
+ from transformers.configuration_utils import PretrainedConfig
4
+
5
+ class TransformerConfig(PretrainedConfig):
6
+ model_type = 'transformer'
7
+ keys_to_ignore_at_inference = ['past_key_values']
8
+
9
+ def __init__(
10
+ self,
11
+ vocab_size: int = 32000,
12
+ hidden_size: int = 2048,
13
+ hidden_ratio: Optional[int] = 4,
14
+ intermediate_size: Optional[int] = None,
15
+ num_hidden_layers: int = 24,
16
+ num_heads: int = 32,
17
+ num_kv_heads: int = None,
18
+ hidden_act: str = "swish",
19
+ window_size: Optional[int] = None,
20
+ max_position_embeddings: int = 2048,
21
+ initializer_range: float = 0.02,
22
+ elementwise_affine: Optional[bool] = True,
23
+ norm_eps: float = 1e-6,
24
+ use_cache: bool = True,
25
+ pad_token_id: int = None,
26
+ bos_token_id: int = 1,
27
+ eos_token_id: int = 2,
28
+ tie_word_embeddings: bool = False,
29
+ attention_bias: bool = False,
30
+ fuse_norm: bool = True,
31
+ fuse_cross_entropy: bool = True,
32
+ rope_base: float = 500000.0,
33
+ use_rope: bool = True,
34
+ **kwargs,
35
+ ):
36
+ self.vocab_size = vocab_size
37
+ self.hidden_size = hidden_size
38
+ self.hidden_ratio = hidden_ratio
39
+ self.intermediate_size = intermediate_size
40
+ self.num_hidden_layers = num_hidden_layers
41
+ self.num_heads = num_heads
42
+ self.num_kv_heads = num_kv_heads
43
+ self.window_size = window_size
44
+ self.max_position_embeddings = max_position_embeddings
45
+ self.hidden_act = hidden_act
46
+ self.initializer_range = initializer_range
47
+ self.elementwise_affine = elementwise_affine
48
+ self.norm_eps = norm_eps
49
+ self.use_cache = use_cache
50
+ self.attention_bias = attention_bias
51
+ self.fuse_cross_entropy = fuse_cross_entropy
52
+ self.fuse_norm = fuse_norm
53
+ self.rope_base = rope_base
54
+ self.use_rope = use_rope
55
+
56
+ super().__init__(
57
+ pad_token_id=pad_token_id,
58
+ bos_token_id=bos_token_id,
59
+ eos_token_id=eos_token_id,
60
+ tie_word_embeddings=tie_word_embeddings,
61
+ **kwargs,
62
+ )
.ipynb_checkpoints/modeling_transformer-checkpoint.py ADDED
@@ -0,0 +1,621 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ from __future__ import annotations
4
+
5
+ import math
6
+ import warnings
7
+ from typing import List, Optional, Tuple, Union
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.utils.checkpoint
12
+ from torch.nn import functional as F
13
+ from transformers.activations import ACT2FN
14
+ from transformers.cache_utils import Cache, DynamicCache
15
+ from transformers.modeling_outputs import (BaseModelOutputWithPast,
16
+ CausalLMOutputWithPast)
17
+ from transformers.modeling_utils import PreTrainedModel
18
+ from transformers.utils import logging
19
+
20
+ # from fla.layers.attn import Attention
21
+ from fla.modules import FusedCrossEntropyLoss, RMSNorm
22
+ from fla.modules.activations import swiglu_linear
23
+
24
+ from fla.modules import RotaryEmbedding
25
+ from forgetting_transformer.ops.vanilla_attention_std import vanilla_attention_std, vanilla_attention_varlen_std
26
+
27
+ try:
28
+ from flash_attn.bert_padding import (index_first_axis, pad_input,
29
+ unpad_input)
30
+ except ImportError:
31
+ warnings.warn("Flash Attention is not installed. Please install it via `pip install flash-attn --no-build-isolation`")
32
+ # 定义替代函数
33
+ def pad_input(hidden_states, indices, batch, seqlen):
34
+ """
35
+ 简单的 padding 函数替代 flash_attn 的 pad_input
36
+ """
37
+ output = torch.zeros(batch * seqlen, *hidden_states.shape[1:],
38
+ device=hidden_states.device, dtype=hidden_states.dtype)
39
+ output[indices] = hidden_states
40
+ return output.view(batch, seqlen, *hidden_states.shape[1:])
41
+
42
+ def index_first_axis(hidden_states, indices):
43
+ """
44
+ 简单的索引函数替代 flash_attn 的 index_first_axis
45
+ """
46
+ return hidden_states[indices]
47
+
48
+ def unpad_input(hidden_states, attention_mask):
49
+ """
50
+ 简单的 unpad 函数替代 flash_attn 的 unpad_input
51
+ """
52
+ indices = attention_mask.flatten().nonzero(as_tuple=False).flatten()
53
+ seqlens = attention_mask.sum(dim=-1, dtype=torch.int32)
54
+ cu_seqlens = F.pad(torch.cumsum(seqlens, dim=0, dtype=torch.int32), (1, 0))
55
+ max_seqlen = seqlens.max().item()
56
+
57
+ batch_size, seq_len = attention_mask.shape[:2]
58
+ hidden_states_shape = hidden_states.shape[2:]
59
+
60
+ unpacked = hidden_states.reshape(batch_size * seq_len, *hidden_states_shape)
61
+ unpacked = unpacked[indices]
62
+
63
+ return unpacked, indices, cu_seqlens, max_seqlen
64
+
65
+ from einops import rearrange
66
+
67
+ # 动态导入配置类以支持本地和HuggingFace Hub加载
68
+ try:
69
+ from .configuration_transformer import TransformerConfig
70
+ except (ImportError, ValueError):
71
+ try:
72
+ from configuration_transformer import TransformerConfig
73
+ except ImportError:
74
+ from forgetting_transformer.model.transformer.configuration_transformer import TransformerConfig
75
+
76
+ from functools import partial
77
+
78
+ logger = logging.get_logger(__name__)
79
+
80
+ class Attention(nn.Module):
81
+
82
+ def __init__(
83
+ self,
84
+ hidden_size: int = 2048,
85
+ num_heads: int = 32,
86
+ num_kv_heads: Optional[int] = None,
87
+ window_size: Optional[int] = None,
88
+ max_position_embeddings: Optional[int] = None,
89
+ rope_base: float = 500000.0,
90
+ use_rope: bool = True,
91
+ layer_idx: int = None,
92
+ ):
93
+ super().__init__()
94
+
95
+ self.num_heads = num_heads
96
+ if num_kv_heads is None:
97
+ self.num_kv_heads = self.num_heads
98
+ else:
99
+ self.num_kv_heads = num_kv_heads
100
+ self.num_kv_groups = num_heads // self.num_kv_heads
101
+ self.hidden_size = hidden_size
102
+ self.head_dim = self.hidden_size // self.num_heads
103
+ self.kv_dim = self.num_kv_heads * self.head_dim
104
+ self.kv_dim = self.num_kv_heads * self.head_dim
105
+ self.window_size = window_size
106
+ self.max_position_embeddings = max_position_embeddings
107
+ self.layer_idx = layer_idx
108
+
109
+ self.q_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
110
+ self.k_proj = nn.Linear(self.hidden_size, self.kv_dim, bias=False)
111
+ self.v_proj = nn.Linear(self.hidden_size, self.kv_dim, bias=False)
112
+ self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
113
+
114
+ if use_rope:
115
+ self.rotary = RotaryEmbedding(self.head_dim, base=rope_base)
116
+ else:
117
+ self.rotary = None
118
+
119
+
120
+ self.apply(self._initialize_weights)
121
+
122
+ def _initialize_weights(self, module: nn.Module):
123
+ pass
124
+
125
+ def forward(
126
+ self,
127
+ hidden_states: torch.Tensor,
128
+ attention_mask: Optional[torch.LongTensor] = None,
129
+ past_key_values: Optional[Cache] = None,
130
+ output_attentions: bool = False,
131
+ use_cache: bool = False,
132
+ **kwargs,
133
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
134
+ batch_size, q_len, _ = hidden_states.size()
135
+ q = rearrange(self.q_proj(hidden_states), '... (h d) -> ... h d', h=self.num_heads)
136
+ k = rearrange(self.k_proj(hidden_states), '... (h d) -> ... h d', h=self.num_kv_heads)
137
+ v = rearrange(self.v_proj(hidden_states), 'b t (h d) -> b h t d', h=self.num_kv_heads)
138
+
139
+ seqlen_offset, max_seqlen = 0, q.shape[1]
140
+ if past_key_values is not None:
141
+ seqlen_offset = past_key_values.get_seq_length(self.layer_idx)
142
+ max_seqlen = q.shape[1] + seqlen_offset
143
+
144
+ if attention_mask is not None:
145
+ # to deliminate the offsets of padding tokens
146
+ seqlen_offset = (seqlen_offset + attention_mask.sum(-1) - attention_mask.shape[-1])
147
+ max_seqlen = q.shape[1] + max(seqlen_offset)
148
+
149
+ if self.max_position_embeddings is not None:
150
+ max_seqlen = max(max_seqlen, self.max_position_embeddings)
151
+ if self.rotary is not None:
152
+ q, k = self.rotary(q, k, seqlen_offset, max_seqlen)
153
+
154
+ k = rearrange(k, 'b t h d -> b h t d')
155
+ if past_key_values is not None:
156
+ k, v = past_key_values.update(k, v, self.layer_idx)
157
+ k, v = rearrange(k, 'b h t d -> b t h d'), rearrange(v, 'b h t d -> b t h d')
158
+ if self.num_kv_groups > 1:
159
+ k = rearrange(k.unsqueeze(-2).repeat(1, 1, 1, self.num_kv_groups, 1), 'b t h g d -> b t (h g) d')
160
+ v = rearrange(v.unsqueeze(-2).repeat(1, 1, 1, self.num_kv_groups, 1), 'b t h g d -> b t (h g) d')
161
+
162
+ # 使用 softmax attention 替代 flash attention
163
+ # Contains at least one padding token in the sequence
164
+ if attention_mask is not None:
165
+ q, k, v, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(q, k, v, attention_mask, q_len)
166
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
167
+ max_seqlen_q, max_seqlen_k = max_seq_lens
168
+
169
+ # 使用 vanilla softmax attention
170
+ o = vanilla_attention_varlen_std(
171
+ q, k, v,
172
+ cu_seqlens_q=cu_seqlens_q,
173
+ cu_seqlens_k=cu_seqlens_k,
174
+ max_seqlen_q=max_seqlen_q,
175
+ max_seqlen_k=max_seqlen_k,
176
+ causal=True,
177
+ window_size=(-1, -1) if self.window_size is None else (self.window_size-1, 0)
178
+ )
179
+ o = pad_input(o, indices_q, batch_size, q_len)
180
+ else:
181
+ # 使用 vanilla softmax attention
182
+ o = vanilla_attention_std(
183
+ q, k, v,
184
+ causal=True,
185
+ window_size=(-1, -1) if self.window_size is None else (self.window_size-1, 0)
186
+ )
187
+
188
+ o = o.reshape(batch_size, q_len, self.hidden_size)
189
+ o = self.o_proj(o)
190
+
191
+ if not output_attentions:
192
+ attentions = None
193
+
194
+ return o, attentions, past_key_values
195
+
196
+ def _upad_input(self, q, k, v, attention_mask, q_len):
197
+ seqlens = attention_mask.sum(-1, dtype=torch.int32)
198
+ indices_k = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
199
+ max_seqlen_k = seqlens.max().item()
200
+ cu_seqlens_k = F.pad(torch.cumsum(seqlens, dim=0, dtype=torch.int32), (1, 0))
201
+ batch_size, seq_len, num_key_value_heads, head_dim = k.shape
202
+
203
+ k = index_first_axis(k.reshape(batch_size * seq_len, num_key_value_heads, head_dim), indices_k)
204
+ v = index_first_axis(v.reshape(batch_size * seq_len, num_key_value_heads, head_dim), indices_k)
205
+ if q_len == seq_len:
206
+ q = index_first_axis(q.reshape(batch_size * seq_len, self.num_heads, head_dim), indices_k)
207
+ cu_seqlens_q = cu_seqlens_k
208
+ max_seqlen_q = max_seqlen_k
209
+ indices_q = indices_k
210
+ elif q_len == 1:
211
+ max_seqlen_q = 1
212
+ # There is a memcpy here, that is very bad.
213
+ cu_seqlens_q = torch.arange(batch_size + 1, dtype=torch.int32, device=q.device)
214
+ indices_q = cu_seqlens_q[:-1]
215
+ q = q.squeeze(1)
216
+ else:
217
+ # The -q_len: slice assumes left padding.
218
+ attention_mask = attention_mask[:, -q_len:]
219
+ q, indices_q, cu_seqlens_q, max_seqlen_q = unpad_input(q, attention_mask)
220
+
221
+ return q, k, v, indices_q, (cu_seqlens_q, cu_seqlens_k), (max_seqlen_q, max_seqlen_k)
222
+
223
+
224
+ class TransformerMLP(nn.Module):
225
+
226
+ def __init__(
227
+ self,
228
+ hidden_size: int,
229
+ hidden_ratio: Optional[int] = None,
230
+ intermediate_size: Optional[int] = None,
231
+ hidden_act: str = 'swish'
232
+ ) -> TransformerMLP:
233
+ super().__init__()
234
+
235
+ self.hidden_size = hidden_size
236
+ # the final number of params is `hidden_ratio * hidden_size^2`
237
+ # `intermediate_size` is chosen to be a multiple of 256 closest to `2/3 * hidden_size * hidden_ratio`
238
+ if hidden_ratio is None:
239
+ hidden_ratio = 4
240
+ if intermediate_size is None:
241
+ intermediate_size = int(hidden_size * hidden_ratio * 2 / 3)
242
+ intermediate_size = 256 * ((intermediate_size + 256 - 1) // 256)
243
+ self.hidden_ratio = hidden_ratio
244
+ self.intermediate_size = intermediate_size
245
+
246
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=False)
247
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
248
+ self.act_fn = ACT2FN[hidden_act]
249
+
250
+ def forward(self, x):
251
+ y = self.gate_proj(x)
252
+ gate, y = y.chunk(2, -1)
253
+ # TODO: maybe wrap swiglu_linear in custom_fwd/custom_bwd
254
+ return swiglu_linear(
255
+ gate, y,
256
+ self.down_proj.weight.to(y.dtype),
257
+ self.down_proj.bias.to(y.dtype) if self.down_proj.bias is not None else self.down_proj.bias
258
+ )
259
+
260
+
261
+ class TransformerBlock(nn.Module):
262
+ def __init__(self, config, layer_idx: int):
263
+ super().__init__()
264
+ self.hidden_size = config.hidden_size
265
+
266
+ self.attn_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.norm_eps)
267
+ self.attn = Attention(
268
+ hidden_size=config.hidden_size,
269
+ num_heads=config.num_heads,
270
+ num_kv_heads=config.num_kv_heads,
271
+ window_size=config.window_size,
272
+ max_position_embeddings=config.max_position_embeddings,
273
+ rope_base=config.rope_base,
274
+ use_rope=config.use_rope,
275
+ layer_idx=layer_idx
276
+ )
277
+ self.mlp_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.norm_eps)
278
+ self.mlp = TransformerMLP(
279
+ hidden_size=config.hidden_size,
280
+ hidden_ratio=config.hidden_ratio,
281
+ intermediate_size=config.intermediate_size,
282
+ hidden_act=config.hidden_act
283
+ )
284
+
285
+ def forward_attn(
286
+ self,
287
+ hidden_states: torch.Tensor,
288
+ attention_mask: Optional[torch.Tensor] = None,
289
+ past_key_values: Optional[Tuple[torch.Tensor]] = None,
290
+ output_attentions: Optional[bool] = False,
291
+ use_cache: Optional[bool] = False,
292
+ **kwargs,
293
+ ):
294
+ # reisual handled outside
295
+ # residual = hidden_states
296
+ hidden_states = self.attn_norm(hidden_states)
297
+ hidden_states, attentions, past_key_values = self.attn(
298
+ hidden_states=hidden_states,
299
+ attention_mask=attention_mask,
300
+ past_key_values=past_key_values,
301
+ use_cache=use_cache,
302
+ output_attentions=output_attentions
303
+ )
304
+ return hidden_states, attentions, past_key_values
305
+
306
+ def forward_mlp(
307
+ self,
308
+ hidden_states: torch.Tensor,
309
+ residual: torch.Tensor,
310
+ ):
311
+ hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
312
+ hidden_states = self.mlp(hidden_states)
313
+ hidden_states = residual + hidden_states
314
+
315
+ return hidden_states
316
+
317
+ def forward(
318
+ self,
319
+ hidden_states: torch.Tensor,
320
+ attention_mask: Optional[torch.Tensor] = None,
321
+ past_key_values: Optional[Tuple[torch.Tensor]] = None,
322
+ output_attentions: Optional[bool] = False,
323
+ use_cache: Optional[bool] = False,
324
+ gradient_checkpointing: bool = False
325
+ # **kwargs,
326
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
327
+
328
+ residual = hidden_states
329
+
330
+
331
+ if gradient_checkpointing:
332
+ forward_attn = partial(torch.utils.checkpoint.checkpoint, self.forward_attn, use_reentrant=False)
333
+ forward_mlp = partial(torch.utils.checkpoint.checkpoint, self.forward_mlp, use_reentrant=False)
334
+ else:
335
+ forward_attn = self.forward_attn
336
+ forward_mlp = self.forward_mlp
337
+
338
+ hidden_states, attentions, past_key_values = forward_attn(
339
+ hidden_states=hidden_states,
340
+ attention_mask=attention_mask,
341
+ past_key_values=past_key_values,
342
+ use_cache=use_cache,
343
+ output_attentions=output_attentions
344
+ )
345
+
346
+ hidden_states = forward_mlp(
347
+ hidden_states,
348
+ residual,
349
+ )
350
+
351
+ outputs = (hidden_states,)
352
+
353
+ if output_attentions:
354
+ outputs += (attentions,)
355
+
356
+ if use_cache:
357
+ outputs += (past_key_values,)
358
+
359
+ return outputs
360
+
361
+
362
+
363
+ class TransformerPreTrainedModel(PreTrainedModel):
364
+
365
+ config_class = TransformerConfig
366
+ supports_gradient_checkpointing = True
367
+ _no_split_modules = ['TransformerBlock']
368
+
369
+ def __init__(self, config, *inputs, **kwargs):
370
+ # 动态修复 config_class 以支持远程代码加载
371
+ if hasattr(config, '__class__'):
372
+ config_module = config.__class__.__module__
373
+ if 'transformers_modules' in config_module or config_module == 'configuration_transformer':
374
+ # 更新类的 config_class 为实际加载的配置类
375
+ self.__class__.config_class = config.__class__
376
+ super().__init__(config, *inputs, **kwargs)
377
+
378
+ def _init_weights(
379
+ self,
380
+ module: nn.Module,
381
+ ):
382
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
383
+ # Slightly different from the TF version which uses truncated_normal for initialization
384
+ # cf https://github.com/pytorch/pytorch/pull/5617
385
+ nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
386
+ if module.bias is not None:
387
+ nn.init.zeros_(module.bias)
388
+ elif isinstance(module, nn.Embedding):
389
+ nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
390
+ if module.padding_idx is not None:
391
+ module.weight.data[module.padding_idx].zero_()
392
+
393
+
394
+ class TransformerModel(TransformerPreTrainedModel):
395
+
396
+ def __init__(self, config):
397
+ super().__init__(config)
398
+ self.padding_idx = config.pad_token_id
399
+ self.vocab_size = config.vocab_size
400
+
401
+ self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
402
+ self.layers = nn.ModuleList([TransformerBlock(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
403
+ self.norm = RMSNorm(config.hidden_size, eps=config.norm_eps)
404
+
405
+ self.gradient_checkpointing = False
406
+
407
+ self.post_init()
408
+
409
+ def get_input_embeddings(self):
410
+ return self.embeddings
411
+
412
+ def set_input_embeddings(self, value):
413
+ self.embeddings = value
414
+
415
+ def forward(
416
+ self,
417
+ input_ids: Optional[torch.LongTensor] = None,
418
+ attention_mask: Optional[torch.Tensor] = None,
419
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
420
+ inputs_embeds: Optional[torch.FloatTensor] = None,
421
+ use_cache: Optional[bool] = None,
422
+ output_attentions: Optional[bool] = None,
423
+ output_hidden_states: Optional[bool] = None,
424
+ return_dict: Optional[bool] = None
425
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
426
+ if output_attentions:
427
+ warnings.warn(
428
+ "`TransformerModel` does not support output attention weights now, so `output_attentions` is set to `False`."
429
+ )
430
+ output_attentions = False
431
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
432
+ output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
433
+ use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
434
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
435
+
436
+ # retrieve input_ids and inputs_embeds
437
+ if input_ids is not None and inputs_embeds is not None:
438
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
439
+ elif input_ids is None and inputs_embeds is None:
440
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
441
+
442
+ if use_cache:
443
+ use_legacy_cache = not isinstance(past_key_values, Cache)
444
+ if use_legacy_cache:
445
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
446
+
447
+ if inputs_embeds is None:
448
+ inputs_embeds = self.embeddings(input_ids)
449
+
450
+ # embed positions
451
+ hidden_states = inputs_embeds
452
+
453
+ if self.gradient_checkpointing and self.training:
454
+ if use_cache:
455
+ logger.warning_once(
456
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
457
+ )
458
+ use_cache = False
459
+
460
+ all_hidden_states = () if output_hidden_states else None
461
+ all_attns = () if output_attentions else None
462
+ next_decoder_cache = None
463
+
464
+ for layer in self.layers:
465
+ if output_hidden_states:
466
+ all_hidden_states += (hidden_states,)
467
+
468
+ layer_outputs = layer(
469
+ hidden_states,
470
+ attention_mask=attention_mask,
471
+ past_key_values=past_key_values,
472
+ output_attentions=output_attentions,
473
+ use_cache=use_cache,
474
+ gradient_checkpointing=self.gradient_checkpointing and self.training
475
+ )
476
+
477
+ hidden_states = layer_outputs[0]
478
+
479
+ if use_cache:
480
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
481
+
482
+ if output_attentions:
483
+ all_attns += (layer_outputs[1],)
484
+
485
+ hidden_states = self.norm(hidden_states)
486
+
487
+ # add hidden states from the last decoder layer
488
+ if output_hidden_states:
489
+ all_hidden_states += (hidden_states,)
490
+
491
+ next_cache = None
492
+ if use_cache:
493
+ next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
494
+ if not return_dict:
495
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_attns] if v is not None)
496
+
497
+ return BaseModelOutputWithPast(
498
+ last_hidden_state=hidden_states,
499
+ past_key_values=next_cache,
500
+ hidden_states=all_hidden_states,
501
+ attentions=all_attns
502
+ )
503
+
504
+
505
+ class TransformerForCausalLM(TransformerPreTrainedModel):
506
+ _tied_weights_keys = ["lm_head.weight"]
507
+
508
+ def __init__(self, config):
509
+ super().__init__(config)
510
+ self.model = TransformerModel(config)
511
+ self.vocab_size = config.vocab_size
512
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
513
+
514
+ # Initialize weights and apply final processing
515
+ self.post_init()
516
+
517
+ def get_input_embeddings(self):
518
+ return self.model.embeddings
519
+
520
+ def set_input_embeddings(self, value):
521
+ self.model.embeddings = value
522
+
523
+ def get_output_embeddings(self):
524
+ return self.lm_head
525
+
526
+ def set_output_embeddings(self, new_embeddings):
527
+ self.lm_head = new_embeddings
528
+
529
+ def set_decoder(self, decoder):
530
+ self.model = decoder
531
+
532
+ def get_decoder(self):
533
+ return self.model
534
+
535
+ def prepare_inputs_for_generation(
536
+ self,
537
+ input_ids: torch.LongTensor = None,
538
+ past_key_values: Optional[torch.Tensor] = None,
539
+ attention_mask: Optional[torch.Tensor] = None,
540
+ inputs_embeds: Optional[torch.Tensor] = None,
541
+ **kwargs
542
+ ):
543
+ # only last token for `inputs_ids` if the `past_key_values` is passed along.
544
+ if past_key_values is not None:
545
+ input_ids = input_ids[:, -1:]
546
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
547
+ if inputs_embeds is not None and past_key_values is None:
548
+ model_inputs = {'inputs_embeds': inputs_embeds}
549
+ else:
550
+ # The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
551
+ # recompiles graphs as the stride of the inputs is a guard.
552
+ # Ref: https://github.com/huggingface/transformers/pull/29114
553
+ # TODO: use `next_tokens` directly instead.
554
+ model_inputs = {'input_ids': input_ids.contiguous()}
555
+
556
+ model_inputs.update({
557
+ 'past_key_values': past_key_values,
558
+ 'use_cache': kwargs.get('use_cache'),
559
+ 'attention_mask': attention_mask,
560
+ })
561
+ return model_inputs
562
+
563
+ def forward(
564
+ self,
565
+ input_ids: torch.LongTensor = None,
566
+ attention_mask: Optional[torch.Tensor] = None,
567
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
568
+ inputs_embeds: Optional[torch.FloatTensor] = None,
569
+ labels: Optional[torch.LongTensor] = None,
570
+ use_cache: Optional[bool] = None,
571
+ output_attentions: Optional[bool] = None,
572
+ output_hidden_states: Optional[bool] = None,
573
+ return_dict: Optional[bool] = None,
574
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
575
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
576
+ output_hidden_states = (
577
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
578
+ )
579
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
580
+
581
+ outputs = self.model(
582
+ input_ids=input_ids,
583
+ attention_mask=attention_mask,
584
+ past_key_values=past_key_values,
585
+ inputs_embeds=inputs_embeds,
586
+ use_cache=use_cache,
587
+ output_attentions=output_attentions,
588
+ output_hidden_states=output_hidden_states,
589
+ return_dict=return_dict
590
+ )
591
+
592
+ hidden_states = outputs[0]
593
+
594
+ loss = None
595
+ if labels is not None:
596
+ if self.config.fuse_cross_entropy:
597
+ loss_fct = FusedCrossEntropyLoss(inplace_backward=True, reduction='none')
598
+ else:
599
+ loss_fct = nn.CrossEntropyLoss(reduction='none')
600
+ logits = self.lm_head(hidden_states)
601
+ # Enable model parallelism
602
+ labels = labels.to(logits.device)
603
+ # labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], loss_fct.ignore_index)), 1)
604
+ loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
605
+ loss = loss.view(*labels.size())
606
+ del logits
607
+ logits = None
608
+ else:
609
+ logits = self.lm_head(hidden_states)
610
+
611
+ if not return_dict:
612
+ output = (logits,) + outputs[1:]
613
+ return (loss,) + output if loss is not None else output
614
+
615
+ return CausalLMOutputWithPast(
616
+ loss=loss,
617
+ logits=logits,
618
+ past_key_values=outputs.past_key_values,
619
+ hidden_states=outputs.hidden_states,
620
+ attentions=outputs.attentions,
621
+ )
__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # for HF remote code
__pycache__/__init__.cpython-310.pyc ADDED
Binary file (549 Bytes). View file
 
__pycache__/configuration_transformer.cpython-310.pyc ADDED
Binary file (1.98 kB). View file
 
__pycache__/modeling_transformer.cpython-310.pyc ADDED
Binary file (16.3 kB). View file
 
configuration_transformer.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ from typing import Optional
3
+ from transformers.configuration_utils import PretrainedConfig
4
+
5
+ class TransformerConfig(PretrainedConfig):
6
+ model_type = 'transformer'
7
+ keys_to_ignore_at_inference = ['past_key_values']
8
+
9
+ def __init__(
10
+ self,
11
+ vocab_size: int = 32000,
12
+ hidden_size: int = 2048,
13
+ hidden_ratio: Optional[int] = 4,
14
+ intermediate_size: Optional[int] = None,
15
+ num_hidden_layers: int = 24,
16
+ num_heads: int = 32,
17
+ num_kv_heads: int = None,
18
+ hidden_act: str = "swish",
19
+ window_size: Optional[int] = None,
20
+ max_position_embeddings: int = 2048,
21
+ initializer_range: float = 0.02,
22
+ elementwise_affine: Optional[bool] = True,
23
+ norm_eps: float = 1e-6,
24
+ use_cache: bool = True,
25
+ pad_token_id: int = None,
26
+ bos_token_id: int = 1,
27
+ eos_token_id: int = 2,
28
+ tie_word_embeddings: bool = False,
29
+ attention_bias: bool = False,
30
+ fuse_norm: bool = True,
31
+ fuse_cross_entropy: bool = True,
32
+ rope_base: float = 500000.0,
33
+ use_rope: bool = True,
34
+ **kwargs,
35
+ ):
36
+ self.vocab_size = vocab_size
37
+ self.hidden_size = hidden_size
38
+ self.hidden_ratio = hidden_ratio
39
+ self.intermediate_size = intermediate_size
40
+ self.num_hidden_layers = num_hidden_layers
41
+ self.num_heads = num_heads
42
+ self.num_kv_heads = num_kv_heads
43
+ self.window_size = window_size
44
+ self.max_position_embeddings = max_position_embeddings
45
+ self.hidden_act = hidden_act
46
+ self.initializer_range = initializer_range
47
+ self.elementwise_affine = elementwise_affine
48
+ self.norm_eps = norm_eps
49
+ self.use_cache = use_cache
50
+ self.attention_bias = attention_bias
51
+ self.fuse_cross_entropy = fuse_cross_entropy
52
+ self.fuse_norm = fuse_norm
53
+ self.rope_base = rope_base
54
+ self.use_rope = use_rope
55
+
56
+ super().__init__(
57
+ pad_token_id=pad_token_id,
58
+ bos_token_id=bos_token_id,
59
+ eos_token_id=eos_token_id,
60
+ tie_word_embeddings=tie_word_embeddings,
61
+ **kwargs,
62
+ )
modeling_transformer.py ADDED
@@ -0,0 +1,621 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ from __future__ import annotations
4
+
5
+ import math
6
+ import warnings
7
+ from typing import List, Optional, Tuple, Union
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.utils.checkpoint
12
+ from torch.nn import functional as F
13
+ from transformers.activations import ACT2FN
14
+ from transformers.cache_utils import Cache, DynamicCache
15
+ from transformers.modeling_outputs import (BaseModelOutputWithPast,
16
+ CausalLMOutputWithPast)
17
+ from transformers.modeling_utils import PreTrainedModel
18
+ from transformers.utils import logging
19
+
20
+ # from fla.layers.attn import Attention
21
+ from fla.modules import FusedCrossEntropyLoss, RMSNorm
22
+ from fla.modules.activations import swiglu_linear
23
+
24
+ from fla.modules import RotaryEmbedding
25
+ from forgetting_transformer.ops.vanilla_attention_std import vanilla_attention_std, vanilla_attention_varlen_std
26
+
27
+ try:
28
+ from flash_attn.bert_padding import (index_first_axis, pad_input,
29
+ unpad_input)
30
+ except ImportError:
31
+ warnings.warn("Flash Attention is not installed. Please install it via `pip install flash-attn --no-build-isolation`")
32
+ # 定义替代函数
33
+ def pad_input(hidden_states, indices, batch, seqlen):
34
+ """
35
+ 简单的 padding 函数替代 flash_attn 的 pad_input
36
+ """
37
+ output = torch.zeros(batch * seqlen, *hidden_states.shape[1:],
38
+ device=hidden_states.device, dtype=hidden_states.dtype)
39
+ output[indices] = hidden_states
40
+ return output.view(batch, seqlen, *hidden_states.shape[1:])
41
+
42
+ def index_first_axis(hidden_states, indices):
43
+ """
44
+ 简单的索引函数替代 flash_attn 的 index_first_axis
45
+ """
46
+ return hidden_states[indices]
47
+
48
+ def unpad_input(hidden_states, attention_mask):
49
+ """
50
+ 简单的 unpad 函数替代 flash_attn 的 unpad_input
51
+ """
52
+ indices = attention_mask.flatten().nonzero(as_tuple=False).flatten()
53
+ seqlens = attention_mask.sum(dim=-1, dtype=torch.int32)
54
+ cu_seqlens = F.pad(torch.cumsum(seqlens, dim=0, dtype=torch.int32), (1, 0))
55
+ max_seqlen = seqlens.max().item()
56
+
57
+ batch_size, seq_len = attention_mask.shape[:2]
58
+ hidden_states_shape = hidden_states.shape[2:]
59
+
60
+ unpacked = hidden_states.reshape(batch_size * seq_len, *hidden_states_shape)
61
+ unpacked = unpacked[indices]
62
+
63
+ return unpacked, indices, cu_seqlens, max_seqlen
64
+
65
+ from einops import rearrange
66
+
67
+ # 动态导入配置类以支持本地和HuggingFace Hub加载
68
+ try:
69
+ from .configuration_transformer import TransformerConfig
70
+ except (ImportError, ValueError):
71
+ try:
72
+ from configuration_transformer import TransformerConfig
73
+ except ImportError:
74
+ from forgetting_transformer.model.transformer.configuration_transformer import TransformerConfig
75
+
76
+ from functools import partial
77
+
78
+ logger = logging.get_logger(__name__)
79
+
80
+ class Attention(nn.Module):
81
+
82
+ def __init__(
83
+ self,
84
+ hidden_size: int = 2048,
85
+ num_heads: int = 32,
86
+ num_kv_heads: Optional[int] = None,
87
+ window_size: Optional[int] = None,
88
+ max_position_embeddings: Optional[int] = None,
89
+ rope_base: float = 500000.0,
90
+ use_rope: bool = True,
91
+ layer_idx: int = None,
92
+ ):
93
+ super().__init__()
94
+
95
+ self.num_heads = num_heads
96
+ if num_kv_heads is None:
97
+ self.num_kv_heads = self.num_heads
98
+ else:
99
+ self.num_kv_heads = num_kv_heads
100
+ self.num_kv_groups = num_heads // self.num_kv_heads
101
+ self.hidden_size = hidden_size
102
+ self.head_dim = self.hidden_size // self.num_heads
103
+ self.kv_dim = self.num_kv_heads * self.head_dim
104
+ self.kv_dim = self.num_kv_heads * self.head_dim
105
+ self.window_size = window_size
106
+ self.max_position_embeddings = max_position_embeddings
107
+ self.layer_idx = layer_idx
108
+
109
+ self.q_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
110
+ self.k_proj = nn.Linear(self.hidden_size, self.kv_dim, bias=False)
111
+ self.v_proj = nn.Linear(self.hidden_size, self.kv_dim, bias=False)
112
+ self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
113
+
114
+ if use_rope:
115
+ self.rotary = RotaryEmbedding(self.head_dim, base=rope_base)
116
+ else:
117
+ self.rotary = None
118
+
119
+
120
+ self.apply(self._initialize_weights)
121
+
122
+ def _initialize_weights(self, module: nn.Module):
123
+ pass
124
+
125
+ def forward(
126
+ self,
127
+ hidden_states: torch.Tensor,
128
+ attention_mask: Optional[torch.LongTensor] = None,
129
+ past_key_values: Optional[Cache] = None,
130
+ output_attentions: bool = False,
131
+ use_cache: bool = False,
132
+ **kwargs,
133
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
134
+ batch_size, q_len, _ = hidden_states.size()
135
+ q = rearrange(self.q_proj(hidden_states), '... (h d) -> ... h d', h=self.num_heads)
136
+ k = rearrange(self.k_proj(hidden_states), '... (h d) -> ... h d', h=self.num_kv_heads)
137
+ v = rearrange(self.v_proj(hidden_states), 'b t (h d) -> b h t d', h=self.num_kv_heads)
138
+
139
+ seqlen_offset, max_seqlen = 0, q.shape[1]
140
+ if past_key_values is not None:
141
+ seqlen_offset = past_key_values.get_seq_length(self.layer_idx)
142
+ max_seqlen = q.shape[1] + seqlen_offset
143
+
144
+ if attention_mask is not None:
145
+ # to deliminate the offsets of padding tokens
146
+ seqlen_offset = (seqlen_offset + attention_mask.sum(-1) - attention_mask.shape[-1])
147
+ max_seqlen = q.shape[1] + max(seqlen_offset)
148
+
149
+ if self.max_position_embeddings is not None:
150
+ max_seqlen = max(max_seqlen, self.max_position_embeddings)
151
+ if self.rotary is not None:
152
+ q, k = self.rotary(q, k, seqlen_offset, max_seqlen)
153
+
154
+ k = rearrange(k, 'b t h d -> b h t d')
155
+ if past_key_values is not None:
156
+ k, v = past_key_values.update(k, v, self.layer_idx)
157
+ k, v = rearrange(k, 'b h t d -> b t h d'), rearrange(v, 'b h t d -> b t h d')
158
+ if self.num_kv_groups > 1:
159
+ k = rearrange(k.unsqueeze(-2).repeat(1, 1, 1, self.num_kv_groups, 1), 'b t h g d -> b t (h g) d')
160
+ v = rearrange(v.unsqueeze(-2).repeat(1, 1, 1, self.num_kv_groups, 1), 'b t h g d -> b t (h g) d')
161
+
162
+ # 使用 softmax attention 替代 flash attention
163
+ # Contains at least one padding token in the sequence
164
+ if attention_mask is not None:
165
+ q, k, v, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(q, k, v, attention_mask, q_len)
166
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
167
+ max_seqlen_q, max_seqlen_k = max_seq_lens
168
+
169
+ # 使用 vanilla softmax attention
170
+ o = vanilla_attention_varlen_std(
171
+ q, k, v,
172
+ cu_seqlens_q=cu_seqlens_q,
173
+ cu_seqlens_k=cu_seqlens_k,
174
+ max_seqlen_q=max_seqlen_q,
175
+ max_seqlen_k=max_seqlen_k,
176
+ causal=True,
177
+ window_size=(-1, -1) if self.window_size is None else (self.window_size-1, 0)
178
+ )
179
+ o = pad_input(o, indices_q, batch_size, q_len)
180
+ else:
181
+ # 使用 vanilla softmax attention
182
+ o = vanilla_attention_std(
183
+ q, k, v,
184
+ causal=True,
185
+ window_size=(-1, -1) if self.window_size is None else (self.window_size-1, 0)
186
+ )
187
+
188
+ o = o.reshape(batch_size, q_len, self.hidden_size)
189
+ o = self.o_proj(o)
190
+
191
+ if not output_attentions:
192
+ attentions = None
193
+
194
+ return o, attentions, past_key_values
195
+
196
+ def _upad_input(self, q, k, v, attention_mask, q_len):
197
+ seqlens = attention_mask.sum(-1, dtype=torch.int32)
198
+ indices_k = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
199
+ max_seqlen_k = seqlens.max().item()
200
+ cu_seqlens_k = F.pad(torch.cumsum(seqlens, dim=0, dtype=torch.int32), (1, 0))
201
+ batch_size, seq_len, num_key_value_heads, head_dim = k.shape
202
+
203
+ k = index_first_axis(k.reshape(batch_size * seq_len, num_key_value_heads, head_dim), indices_k)
204
+ v = index_first_axis(v.reshape(batch_size * seq_len, num_key_value_heads, head_dim), indices_k)
205
+ if q_len == seq_len:
206
+ q = index_first_axis(q.reshape(batch_size * seq_len, self.num_heads, head_dim), indices_k)
207
+ cu_seqlens_q = cu_seqlens_k
208
+ max_seqlen_q = max_seqlen_k
209
+ indices_q = indices_k
210
+ elif q_len == 1:
211
+ max_seqlen_q = 1
212
+ # There is a memcpy here, that is very bad.
213
+ cu_seqlens_q = torch.arange(batch_size + 1, dtype=torch.int32, device=q.device)
214
+ indices_q = cu_seqlens_q[:-1]
215
+ q = q.squeeze(1)
216
+ else:
217
+ # The -q_len: slice assumes left padding.
218
+ attention_mask = attention_mask[:, -q_len:]
219
+ q, indices_q, cu_seqlens_q, max_seqlen_q = unpad_input(q, attention_mask)
220
+
221
+ return q, k, v, indices_q, (cu_seqlens_q, cu_seqlens_k), (max_seqlen_q, max_seqlen_k)
222
+
223
+
224
+ class TransformerMLP(nn.Module):
225
+
226
+ def __init__(
227
+ self,
228
+ hidden_size: int,
229
+ hidden_ratio: Optional[int] = None,
230
+ intermediate_size: Optional[int] = None,
231
+ hidden_act: str = 'swish'
232
+ ) -> TransformerMLP:
233
+ super().__init__()
234
+
235
+ self.hidden_size = hidden_size
236
+ # the final number of params is `hidden_ratio * hidden_size^2`
237
+ # `intermediate_size` is chosen to be a multiple of 256 closest to `2/3 * hidden_size * hidden_ratio`
238
+ if hidden_ratio is None:
239
+ hidden_ratio = 4
240
+ if intermediate_size is None:
241
+ intermediate_size = int(hidden_size * hidden_ratio * 2 / 3)
242
+ intermediate_size = 256 * ((intermediate_size + 256 - 1) // 256)
243
+ self.hidden_ratio = hidden_ratio
244
+ self.intermediate_size = intermediate_size
245
+
246
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=False)
247
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
248
+ self.act_fn = ACT2FN[hidden_act]
249
+
250
+ def forward(self, x):
251
+ y = self.gate_proj(x)
252
+ gate, y = y.chunk(2, -1)
253
+ # TODO: maybe wrap swiglu_linear in custom_fwd/custom_bwd
254
+ return swiglu_linear(
255
+ gate, y,
256
+ self.down_proj.weight.to(y.dtype),
257
+ self.down_proj.bias.to(y.dtype) if self.down_proj.bias is not None else self.down_proj.bias
258
+ )
259
+
260
+
261
+ class TransformerBlock(nn.Module):
262
+ def __init__(self, config, layer_idx: int):
263
+ super().__init__()
264
+ self.hidden_size = config.hidden_size
265
+
266
+ self.attn_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.norm_eps)
267
+ self.attn = Attention(
268
+ hidden_size=config.hidden_size,
269
+ num_heads=config.num_heads,
270
+ num_kv_heads=config.num_kv_heads,
271
+ window_size=config.window_size,
272
+ max_position_embeddings=config.max_position_embeddings,
273
+ rope_base=config.rope_base,
274
+ use_rope=config.use_rope,
275
+ layer_idx=layer_idx
276
+ )
277
+ self.mlp_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.norm_eps)
278
+ self.mlp = TransformerMLP(
279
+ hidden_size=config.hidden_size,
280
+ hidden_ratio=config.hidden_ratio,
281
+ intermediate_size=config.intermediate_size,
282
+ hidden_act=config.hidden_act
283
+ )
284
+
285
+ def forward_attn(
286
+ self,
287
+ hidden_states: torch.Tensor,
288
+ attention_mask: Optional[torch.Tensor] = None,
289
+ past_key_values: Optional[Tuple[torch.Tensor]] = None,
290
+ output_attentions: Optional[bool] = False,
291
+ use_cache: Optional[bool] = False,
292
+ **kwargs,
293
+ ):
294
+ # reisual handled outside
295
+ # residual = hidden_states
296
+ hidden_states = self.attn_norm(hidden_states)
297
+ hidden_states, attentions, past_key_values = self.attn(
298
+ hidden_states=hidden_states,
299
+ attention_mask=attention_mask,
300
+ past_key_values=past_key_values,
301
+ use_cache=use_cache,
302
+ output_attentions=output_attentions
303
+ )
304
+ return hidden_states, attentions, past_key_values
305
+
306
+ def forward_mlp(
307
+ self,
308
+ hidden_states: torch.Tensor,
309
+ residual: torch.Tensor,
310
+ ):
311
+ hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
312
+ hidden_states = self.mlp(hidden_states)
313
+ hidden_states = residual + hidden_states
314
+
315
+ return hidden_states
316
+
317
+ def forward(
318
+ self,
319
+ hidden_states: torch.Tensor,
320
+ attention_mask: Optional[torch.Tensor] = None,
321
+ past_key_values: Optional[Tuple[torch.Tensor]] = None,
322
+ output_attentions: Optional[bool] = False,
323
+ use_cache: Optional[bool] = False,
324
+ gradient_checkpointing: bool = False
325
+ # **kwargs,
326
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
327
+
328
+ residual = hidden_states
329
+
330
+
331
+ if gradient_checkpointing:
332
+ forward_attn = partial(torch.utils.checkpoint.checkpoint, self.forward_attn, use_reentrant=False)
333
+ forward_mlp = partial(torch.utils.checkpoint.checkpoint, self.forward_mlp, use_reentrant=False)
334
+ else:
335
+ forward_attn = self.forward_attn
336
+ forward_mlp = self.forward_mlp
337
+
338
+ hidden_states, attentions, past_key_values = forward_attn(
339
+ hidden_states=hidden_states,
340
+ attention_mask=attention_mask,
341
+ past_key_values=past_key_values,
342
+ use_cache=use_cache,
343
+ output_attentions=output_attentions
344
+ )
345
+
346
+ hidden_states = forward_mlp(
347
+ hidden_states,
348
+ residual,
349
+ )
350
+
351
+ outputs = (hidden_states,)
352
+
353
+ if output_attentions:
354
+ outputs += (attentions,)
355
+
356
+ if use_cache:
357
+ outputs += (past_key_values,)
358
+
359
+ return outputs
360
+
361
+
362
+
363
+ class TransformerPreTrainedModel(PreTrainedModel):
364
+
365
+ config_class = TransformerConfig
366
+ supports_gradient_checkpointing = True
367
+ _no_split_modules = ['TransformerBlock']
368
+
369
+ def __init__(self, config, *inputs, **kwargs):
370
+ # 动态修复 config_class 以支持远程代码加载
371
+ if hasattr(config, '__class__'):
372
+ config_module = config.__class__.__module__
373
+ if 'transformers_modules' in config_module or config_module == 'configuration_transformer':
374
+ # 更新类的 config_class 为实际加载的配置类
375
+ self.__class__.config_class = config.__class__
376
+ super().__init__(config, *inputs, **kwargs)
377
+
378
+ def _init_weights(
379
+ self,
380
+ module: nn.Module,
381
+ ):
382
+ if isinstance(module, (nn.Linear, nn.Conv1d)):
383
+ # Slightly different from the TF version which uses truncated_normal for initialization
384
+ # cf https://github.com/pytorch/pytorch/pull/5617
385
+ nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
386
+ if module.bias is not None:
387
+ nn.init.zeros_(module.bias)
388
+ elif isinstance(module, nn.Embedding):
389
+ nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
390
+ if module.padding_idx is not None:
391
+ module.weight.data[module.padding_idx].zero_()
392
+
393
+
394
+ class TransformerModel(TransformerPreTrainedModel):
395
+
396
+ def __init__(self, config):
397
+ super().__init__(config)
398
+ self.padding_idx = config.pad_token_id
399
+ self.vocab_size = config.vocab_size
400
+
401
+ self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
402
+ self.layers = nn.ModuleList([TransformerBlock(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
403
+ self.norm = RMSNorm(config.hidden_size, eps=config.norm_eps)
404
+
405
+ self.gradient_checkpointing = False
406
+
407
+ self.post_init()
408
+
409
+ def get_input_embeddings(self):
410
+ return self.embeddings
411
+
412
+ def set_input_embeddings(self, value):
413
+ self.embeddings = value
414
+
415
+ def forward(
416
+ self,
417
+ input_ids: Optional[torch.LongTensor] = None,
418
+ attention_mask: Optional[torch.Tensor] = None,
419
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
420
+ inputs_embeds: Optional[torch.FloatTensor] = None,
421
+ use_cache: Optional[bool] = None,
422
+ output_attentions: Optional[bool] = None,
423
+ output_hidden_states: Optional[bool] = None,
424
+ return_dict: Optional[bool] = None
425
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
426
+ if output_attentions:
427
+ warnings.warn(
428
+ "`TransformerModel` does not support output attention weights now, so `output_attentions` is set to `False`."
429
+ )
430
+ output_attentions = False
431
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
432
+ output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
433
+ use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
434
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
435
+
436
+ # retrieve input_ids and inputs_embeds
437
+ if input_ids is not None and inputs_embeds is not None:
438
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
439
+ elif input_ids is None and inputs_embeds is None:
440
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
441
+
442
+ if use_cache:
443
+ use_legacy_cache = not isinstance(past_key_values, Cache)
444
+ if use_legacy_cache:
445
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
446
+
447
+ if inputs_embeds is None:
448
+ inputs_embeds = self.embeddings(input_ids)
449
+
450
+ # embed positions
451
+ hidden_states = inputs_embeds
452
+
453
+ if self.gradient_checkpointing and self.training:
454
+ if use_cache:
455
+ logger.warning_once(
456
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
457
+ )
458
+ use_cache = False
459
+
460
+ all_hidden_states = () if output_hidden_states else None
461
+ all_attns = () if output_attentions else None
462
+ next_decoder_cache = None
463
+
464
+ for layer in self.layers:
465
+ if output_hidden_states:
466
+ all_hidden_states += (hidden_states,)
467
+
468
+ layer_outputs = layer(
469
+ hidden_states,
470
+ attention_mask=attention_mask,
471
+ past_key_values=past_key_values,
472
+ output_attentions=output_attentions,
473
+ use_cache=use_cache,
474
+ gradient_checkpointing=self.gradient_checkpointing and self.training
475
+ )
476
+
477
+ hidden_states = layer_outputs[0]
478
+
479
+ if use_cache:
480
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
481
+
482
+ if output_attentions:
483
+ all_attns += (layer_outputs[1],)
484
+
485
+ hidden_states = self.norm(hidden_states)
486
+
487
+ # add hidden states from the last decoder layer
488
+ if output_hidden_states:
489
+ all_hidden_states += (hidden_states,)
490
+
491
+ next_cache = None
492
+ if use_cache:
493
+ next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
494
+ if not return_dict:
495
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_attns] if v is not None)
496
+
497
+ return BaseModelOutputWithPast(
498
+ last_hidden_state=hidden_states,
499
+ past_key_values=next_cache,
500
+ hidden_states=all_hidden_states,
501
+ attentions=all_attns
502
+ )
503
+
504
+
505
+ class TransformerForCausalLM(TransformerPreTrainedModel):
506
+ _tied_weights_keys = ["lm_head.weight"]
507
+
508
+ def __init__(self, config):
509
+ super().__init__(config)
510
+ self.model = TransformerModel(config)
511
+ self.vocab_size = config.vocab_size
512
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
513
+
514
+ # Initialize weights and apply final processing
515
+ self.post_init()
516
+
517
+ def get_input_embeddings(self):
518
+ return self.model.embeddings
519
+
520
+ def set_input_embeddings(self, value):
521
+ self.model.embeddings = value
522
+
523
+ def get_output_embeddings(self):
524
+ return self.lm_head
525
+
526
+ def set_output_embeddings(self, new_embeddings):
527
+ self.lm_head = new_embeddings
528
+
529
+ def set_decoder(self, decoder):
530
+ self.model = decoder
531
+
532
+ def get_decoder(self):
533
+ return self.model
534
+
535
+ def prepare_inputs_for_generation(
536
+ self,
537
+ input_ids: torch.LongTensor = None,
538
+ past_key_values: Optional[torch.Tensor] = None,
539
+ attention_mask: Optional[torch.Tensor] = None,
540
+ inputs_embeds: Optional[torch.Tensor] = None,
541
+ **kwargs
542
+ ):
543
+ # only last token for `inputs_ids` if the `past_key_values` is passed along.
544
+ if past_key_values is not None:
545
+ input_ids = input_ids[:, -1:]
546
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
547
+ if inputs_embeds is not None and past_key_values is None:
548
+ model_inputs = {'inputs_embeds': inputs_embeds}
549
+ else:
550
+ # The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
551
+ # recompiles graphs as the stride of the inputs is a guard.
552
+ # Ref: https://github.com/huggingface/transformers/pull/29114
553
+ # TODO: use `next_tokens` directly instead.
554
+ model_inputs = {'input_ids': input_ids.contiguous()}
555
+
556
+ model_inputs.update({
557
+ 'past_key_values': past_key_values,
558
+ 'use_cache': kwargs.get('use_cache'),
559
+ 'attention_mask': attention_mask,
560
+ })
561
+ return model_inputs
562
+
563
+ def forward(
564
+ self,
565
+ input_ids: torch.LongTensor = None,
566
+ attention_mask: Optional[torch.Tensor] = None,
567
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
568
+ inputs_embeds: Optional[torch.FloatTensor] = None,
569
+ labels: Optional[torch.LongTensor] = None,
570
+ use_cache: Optional[bool] = None,
571
+ output_attentions: Optional[bool] = None,
572
+ output_hidden_states: Optional[bool] = None,
573
+ return_dict: Optional[bool] = None,
574
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
575
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
576
+ output_hidden_states = (
577
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
578
+ )
579
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
580
+
581
+ outputs = self.model(
582
+ input_ids=input_ids,
583
+ attention_mask=attention_mask,
584
+ past_key_values=past_key_values,
585
+ inputs_embeds=inputs_embeds,
586
+ use_cache=use_cache,
587
+ output_attentions=output_attentions,
588
+ output_hidden_states=output_hidden_states,
589
+ return_dict=return_dict
590
+ )
591
+
592
+ hidden_states = outputs[0]
593
+
594
+ loss = None
595
+ if labels is not None:
596
+ if self.config.fuse_cross_entropy:
597
+ loss_fct = FusedCrossEntropyLoss(inplace_backward=True, reduction='none')
598
+ else:
599
+ loss_fct = nn.CrossEntropyLoss(reduction='none')
600
+ logits = self.lm_head(hidden_states)
601
+ # Enable model parallelism
602
+ labels = labels.to(logits.device)
603
+ # labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], loss_fct.ignore_index)), 1)
604
+ loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
605
+ loss = loss.view(*labels.size())
606
+ del logits
607
+ logits = None
608
+ else:
609
+ logits = self.lm_head(hidden_states)
610
+
611
+ if not return_dict:
612
+ output = (logits,) + outputs[1:]
613
+ return (loss,) + output if loss is not None else output
614
+
615
+ return CausalLMOutputWithPast(
616
+ loss=loss,
617
+ logits=logits,
618
+ past_key_values=outputs.past_key_values,
619
+ hidden_states=outputs.hidden_states,
620
+ attentions=outputs.attentions,
621
+ )
ops/.ipynb_checkpoints/forgetting_attention-checkpoint.py ADDED
@@ -0,0 +1,1138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Implementation of Forgetting Attention.
3
+
4
+ Our code is adapted from https://github.com/FlagOpen/FlagAttention/blob/ee91638dec6da8c00c4113d179f469e0ffcd5852/src/flag_attn/flash.py. The code is modified to implement Forgetting Attention.
5
+
6
+ The original license info from FlagAttention:
7
+
8
+ Copyright 2023 BAAI
9
+
10
+ Licensed under the Apache License, Version 2.0 (the "License");
11
+ you may not use this file except in compliance with the License.
12
+ You may obtain a copy of the License at
13
+
14
+ http://www.apache.org/licenses/LICENSE-2.0
15
+
16
+ Unless required by applicable law or agreed to in writing, software
17
+ distributed under the License is distributed on an "AS IS" BASIS,
18
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ See the License for the specific language governing permissions and
20
+ limitations under the License.
21
+ """
22
+ import pytest
23
+ import math
24
+ import torch
25
+ import triton
26
+ import triton.language as tl
27
+ from einops import rearrange
28
+ from typing import Optional
29
+
30
+
31
+ __all__ = ["forgetting_attention"]
32
+
33
+
34
+ # File flash.py
35
+ def maybe_contiguous(x):
36
+ # only when the inner most dimension is contiguous can LDGSTS be used
37
+ # so inner-dimension contiguity is enforced.
38
+ return x.contiguous() if x.stride(-1) != 1 else x
39
+
40
+ def rounded_multiple(a, b):
41
+ return (a + b - 1) // b * b
42
+
43
+ # --------------------------- public API ---------------------------
44
+ class ForgettingAttention(torch.autograd.Function):
45
+ @staticmethod
46
+ def forward(ctx, q, k, v, log_fgate, seq_start, causal, sm_scale, return_log_normalizer):
47
+ assert causal, "Only causal attention is supported"
48
+ Dq, Dk, Dv = q.shape[-1], k.shape[-1], v.shape[-1]
49
+ assert Dq == Dk == Dv, "feature size of q, k, v should be equal"
50
+ assert Dk in {16, 32, 64, 128}, "We only support head dims in {16, 32, 64, 128}"
51
+
52
+ B, H, M, D = q.shape
53
+ if seq_start is not None:
54
+ has_seq_start = True
55
+ assert seq_start.shape == (B,)
56
+ else:
57
+ has_seq_start = False
58
+ seq_start = torch.zeros((B,), device=q.device, dtype=torch.long)
59
+ N = k.shape[2]
60
+ assert log_fgate.shape == (B, H, N)
61
+ log_fgate = log_fgate.float()
62
+ if has_seq_start:
63
+ log_fgate = log_fgate.clone()
64
+ # We absolutely don't want masked value to affect result. If we
65
+ # don't do this then it could via affecting numerical precision of
66
+ # cumsum
67
+ mask_index = (torch.arange(N, device=q.device)[None, None, :] < seq_start[:, None, None])
68
+ mask_index = torch.broadcast_to(mask_index, log_fgate.size())
69
+ log_fgate[mask_index] = 0.0
70
+
71
+ log_lambda = torch.cumsum(log_fgate, dim=-1, dtype=log_fgate.dtype).float()
72
+
73
+ Hk, Hv = k.shape[1], v.shape[1]
74
+ assert Hk == Hv, "num of heads in k and v should be equal"
75
+ assert H == Hk, "groupped query attention has not been tested. You can uncomment this if you know what you are doing."
76
+ assert H % Hk == 0, "number of heads in q must be a multiple of that in k & v"
77
+ num_groups = H // Hk
78
+
79
+ P_SEQ = N - M
80
+ larger_m = M > N
81
+ assert (not larger_m), "The key/value tensors must be longer than the query tensor"
82
+
83
+ if sm_scale is None:
84
+ sm_scale = 1. / math.sqrt(D)
85
+
86
+ # contiguity
87
+ q, k, v = maybe_contiguous(q), maybe_contiguous(k), maybe_contiguous(v)
88
+
89
+ # to work around https://github.com/openai/triton/issues/2441
90
+ device = torch.cuda.device_of(q)
91
+
92
+ with torch.cuda.device(device):
93
+
94
+ config = get_fwd_config(B, H, M, N, D, causal)
95
+ BLOCK_M, BLOCK_N, num_stages, num_warps = config
96
+
97
+ divisible_m = M % BLOCK_M == 0
98
+ divisible_n = N % BLOCK_N == 0
99
+ # consider using 3d grid to avoid div & rem
100
+ grid = (triton.cdiv(M, BLOCK_M), H, B)
101
+ o = torch.empty_like(q)
102
+ L = torch.empty((B, H, M), device=q.device, dtype=torch.float32)
103
+ _fwd_kernel[grid](
104
+ q, k, v, log_lambda, seq_start, sm_scale,
105
+ L, o,
106
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
107
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
108
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3),
109
+ log_lambda.stride(0), log_lambda.stride(1), log_lambda.stride(2),
110
+ o.stride(0), o.stride(1), o.stride(2), o.stride(3),
111
+ B, H, M, N, P_SEQ, num_groups,
112
+ BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N, BLOCK_DMODEL=D,
113
+ IS_CAUSAL=causal, LARGER_M=larger_m, HAS_SEQ_START=has_seq_start,
114
+ DIVISIBLE_M=divisible_m, DIVISIBLE_N=divisible_n,
115
+ num_warps=num_warps, num_stages=num_stages,
116
+ )
117
+
118
+ # autograd context maintenance
119
+ ctx.save_for_backward(q, k, v, o, L, log_lambda, seq_start)
120
+ ctx.sm_scale = sm_scale
121
+ ctx.causal = causal
122
+ ctx.has_seq_start = has_seq_start
123
+
124
+ has_extra_return = return_log_normalizer
125
+ if has_extra_return:
126
+ outs = (
127
+ o,
128
+ L if return_log_normalizer else None,
129
+ )
130
+ return outs
131
+ return o
132
+
133
+ @staticmethod
134
+ def backward(ctx, do, *ignored):
135
+ q, k, v, o, L, log_lambda, seq_start = ctx.saved_tensors
136
+ sm_scale = ctx.sm_scale
137
+ causal = ctx.causal
138
+ has_seq_start = ctx.has_seq_start
139
+
140
+ B, H, M, D = q.shape
141
+ N = k.shape[2]
142
+ Hk = k.shape[1]
143
+ num_groups = H // Hk
144
+ P_SEQ = N - M
145
+ larger_m = M > N
146
+
147
+ if sm_scale is None:
148
+ sm_scale = 1. / math.sqrt(D)
149
+
150
+ # to work around https://github.com/openai/triton/issues/2441
151
+ device = torch.cuda.device_of(q)
152
+ with torch.cuda.device(device):
153
+ config = get_bwd_config(B, H, M, N, D, causal)
154
+ BLOCK_M, BLOCK_N, num_stages, num_warps = config
155
+
156
+ divisible_m = M % BLOCK_M == 0
157
+ divisible_n = N % BLOCK_N == 0
158
+
159
+ delta = torch.empty_like(L)
160
+ grid = (triton.cdiv(M, BLOCK_M), H, B)
161
+ _bwd_preprocess[grid](
162
+ o, do,
163
+ delta,
164
+ o.stride(0), o.stride(1), o.stride(2), o.stride(3),
165
+ do.stride(0), do.stride(1), do.stride(2), do.stride(3),
166
+ delta.stride(0), delta.stride(1), delta.stride(2),
167
+ M,
168
+ BLOCK_M=BLOCK_M, D_HEAD=D,
169
+ DIVISIBLE_M=divisible_m,
170
+ )
171
+
172
+ # NOTE that dk & dv always have the same number of heads as q, instead of q.
173
+ BLOCK_M, BLOCK_N, num_stages, num_warps = get_bwd_kv_config(B, H, M, N, D, causal)
174
+ divisible_m = M % BLOCK_M == 0
175
+ divisible_n = N % BLOCK_N == 0
176
+
177
+ dk = torch.empty((B, H, N, D), dtype=k.dtype, device=q.device)
178
+ dv = torch.empty((B, H, N, D), dtype=v.dtype, device=q.device)
179
+ dlog_lambda = torch.empty((B, H, N), dtype=log_lambda.dtype, device=q.device)
180
+ grid = (triton.cdiv(N, BLOCK_N), H, B)
181
+ _bwd_kv_kernel[grid](
182
+ q, k, v, log_lambda, seq_start, sm_scale, do,
183
+ dk, dv, dlog_lambda,
184
+ L, delta,
185
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
186
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
187
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3),
188
+ log_lambda.stride(0), log_lambda.stride(1), log_lambda.stride(2),
189
+ do.stride(0), do.stride(1), do.stride(2), do.stride(3),
190
+ dk.stride(0), dk.stride(1), dk.stride(2), dk.stride(3),
191
+ dv.stride(0), dv.stride(1), dv.stride(2), dv.stride(3),
192
+ dlog_lambda.stride(0), dlog_lambda.stride(1), dlog_lambda.stride(2),
193
+ B, H, M, N, P_SEQ,
194
+ num_groups,
195
+ BLOCK_M=BLOCK_M, BLOCK_DMODEL=D, BLOCK_N=BLOCK_N, CAUSAL=causal,
196
+ DIVISIBLE_M=divisible_m, DIVISIBLE_N=divisible_n, HAS_SEQ_START=has_seq_start,
197
+ num_stages=num_stages, num_warps=num_warps,
198
+ )
199
+
200
+ BLOCK_M, BLOCK_N, num_stages, num_warps = get_bwd_q_config(B, H, M, N, D, causal)
201
+ divisible_m = M % BLOCK_M == 0
202
+ divisible_n = N % BLOCK_N == 0
203
+ dq = torch.zeros_like(q)
204
+ grid = (triton.cdiv(M, BLOCK_M), H, B)
205
+ _bwd_q_kernel[grid](
206
+ q, k, v, log_lambda, seq_start, sm_scale, do,
207
+ dq, dlog_lambda,
208
+ L, delta,
209
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
210
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
211
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3),
212
+ log_lambda.stride(0), log_lambda.stride(1), log_lambda.stride(2),
213
+ do.stride(0), do.stride(1), do.stride(2), do.stride(3),
214
+ dq.stride(0), dq.stride(1), dq.stride(2), dq.stride(3),
215
+ dlog_lambda.stride(0), dlog_lambda.stride(1), dlog_lambda.stride(2),
216
+ B, H, M, N, P_SEQ,
217
+ num_groups,
218
+ BLOCK_M=BLOCK_M, BLOCK_DMODEL=D, BLOCK_N=BLOCK_N,
219
+ CAUSAL=causal, LARGER_M=larger_m, HAS_SEQ_START=has_seq_start,
220
+ DIVISIBLE_M=divisible_m, DIVISIBLE_N=divisible_n,
221
+ num_stages=num_stages, num_warps = num_warps,
222
+ )
223
+ dk = dk.reshape((B, Hk, num_groups, N, D)).sum(2)
224
+ dv = dv.reshape((B, Hk, num_groups, N, D)).sum(2)
225
+ dcumsum = torch.cumsum(dlog_lambda, dim=-1, dtype=log_lambda.dtype)
226
+ dlog_fgate = dlog_lambda + dcumsum[..., -1:] - dcumsum
227
+ dlog_fgate = dlog_fgate.float()
228
+ return dq, dk, dv, dlog_fgate, None, None, None, None, None, None, None
229
+
230
+
231
+ def forgetting_attention(
232
+ q: torch.Tensor,
233
+ k: torch.Tensor,
234
+ v: torch.Tensor,
235
+ log_fgate: torch.Tensor,
236
+ *,
237
+ head_first: bool = False,
238
+ seq_start: Optional[torch.Tensor] = None,
239
+ sm_scale: Optional[float] = None,
240
+ ):
241
+ """
242
+ A FlashAttention-based implementation of Forgetting Attention.
243
+
244
+ Note:
245
+ - We recommand bfloat16/float16 for q, k, v and float32 for log_fgate. float32 for
246
+ q, k, v is also supported, but the kernel will not use tensor cores if q, k, v are
247
+ in float32 (which would be slow).
248
+ - We only support seqlen_q <= seqlen_k
249
+ - We only support causal attention
250
+ - Head dimension must be in one of {16, 32, 64, 128}
251
+
252
+ Arguments:
253
+ - q: (batch_size, seqlen_q, num_heads, head_dim) unless head_first=True.
254
+ - k: (batch_size, seqlen_k, num_heads, head_dim) unless head_first=True.
255
+ - v: (batch_size, seqlen_k, num_heads, head_dim) unless head_first=True.
256
+ - log_fgate: (batch_size, seqlen_k, num_heads) unless head_first=True.
257
+ This should be the **log** of the forget gates. This is typically the
258
+ output of torch.nn.functional.logsigmoid.
259
+ - head_first: if True, the order the num_heads and seqlen_* axis of the all
260
+ FloatTensor inputs and outputs should be (num_heads, seq_len_*) instead of
261
+ (seq_len_*, num_heads)
262
+ - seq_start: If not None, should be LongTensor with shape (batch_size,)
263
+ and range in [0, seq_len_k). For each batch index batch_id, no attention
264
+ will be allocated to tokens before the token index seq_start[batch_id].
265
+ This is useful for left-padded inputs.
266
+ - sm_scale: The scaling of attention scores before applying softmax. If
267
+ None, it defaults to (1.0 / math.sqrt(head_dim))
268
+
269
+ Returns:
270
+ out (torch.Tensor): (batch_size, seqlen_q, num_heads, head_dim) unless head_first=True.
271
+ """
272
+ if not head_first:
273
+ q, k, v = [rearrange(item, "b t h d -> b h t d") for item in (q, k, v)]
274
+ log_fgate = rearrange(log_fgate, "b t h -> b h t")
275
+ out = ForgettingAttention.apply(q, k, v, log_fgate, seq_start, True, sm_scale, False)
276
+ if not head_first:
277
+ out = rearrange(out, "b h t d -> b t h d")
278
+ return out
279
+
280
+
281
+ # --------------------------- Forward ---------------------------
282
+ # NOTE: this function can be overwritten at runtime to use your custom config
283
+ def get_fwd_config(B, H, M, N, D, causal):
284
+ assert causal
285
+ if torch.cuda.get_device_capability() == (8, 0):
286
+ if D <= 64:
287
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 32, 3, 4
288
+ else:
289
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 4, 4
290
+ elif torch.cuda.get_device_capability() == (9, 0):
291
+ # H100
292
+ if D <= 64:
293
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 8
294
+ else:
295
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 128, 2, 8
296
+ elif torch.cuda.get_device_capability() == (8, 6):
297
+ if not causal:
298
+ if D <= 64:
299
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 4
300
+ else:
301
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 2, 4
302
+ else: # causal
303
+ if D <= 64:
304
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 3, 4
305
+ else:
306
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 2, 4
307
+ elif torch.cuda.get_device_capability() == (8, 9):
308
+ # L40S
309
+ if D <= 64:
310
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 2, 4
311
+ else:
312
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 2, 4
313
+ else:
314
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
315
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
316
+
317
+
318
+ @triton.jit
319
+ def _fwd_kernel(
320
+ Q, K, V, LOG_LAMBDA, SEQ_START, sm_scale,
321
+ L, O,
322
+ stride_qz, stride_qh, stride_qm, stride_qk,
323
+ stride_kz, stride_kh, stride_kn, stride_kk,
324
+ stride_vz, stride_vh, stride_vn, stride_vk,
325
+ stride_log_lambda_z, stride_log_lambda_h, stride_log_lambda_n,
326
+ stride_oz, stride_oh, stride_om, stride_ok,
327
+ Z, H, M, N, P_SEQ,
328
+ num_groups,
329
+ BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr,
330
+ IS_CAUSAL: tl.constexpr, LARGER_M: tl.constexpr, HAS_SEQ_START: tl.constexpr,
331
+ DIVISIBLE_M: tl.constexpr, DIVISIBLE_N: tl.constexpr,
332
+ ):
333
+ input_dtype = Q.dtype.element_ty
334
+ # -- grid id --
335
+ start_m = tl.program_id(0)
336
+ off_h = tl.program_id(1)
337
+ off_z = tl.program_id(2)
338
+
339
+ # scale sm_scale by log_2(e) and use
340
+ # 2^x instead of exp in the loop because CSE and LICM
341
+ # don't work as expected with `exp` in the loop
342
+ log2e: tl.constexpr = 1.4426950408889634
343
+ loge2: tl.constexpr = 0.6931471805599453
344
+ qk_scale = sm_scale * log2e
345
+
346
+ # offset pointers for (batch, head)
347
+ off_hk = off_h // num_groups
348
+ Q += off_z * stride_qz + off_h * stride_qh
349
+ K += off_z * stride_kz + off_hk * stride_kh
350
+ V += off_z * stride_vz + off_hk * stride_vh
351
+ LOG_LAMBDA += off_z * stride_log_lambda_z + off_h * stride_log_lambda_h
352
+ O += off_z * stride_oz + off_h * stride_oh
353
+ L += (off_z * H + off_h) * M # l's shape is (B, H, M)
354
+
355
+ offs_m_base = tl.arange(0, BLOCK_M)
356
+ offs_m = start_m * BLOCK_M + offs_m_base
357
+ offs_n_base = tl.arange(0, BLOCK_N)
358
+ offs_k = tl.arange(0, BLOCK_DMODEL)
359
+
360
+
361
+ # initialize pointers to value-like data
362
+ q_ptrs = Q + (offs_m[:, None] * stride_qm + offs_k[None, :] * stride_qk) # (BLOCK_M, BLOCK_DMODEL)
363
+ log_lambda_out_ptrs = LOG_LAMBDA + (P_SEQ + offs_m) * stride_log_lambda_n
364
+ o_ptrs = O + (offs_m[:, None] * stride_om + offs_k[None, :] * stride_ok) # (BLOCK_M, BLOCK_DMODEL)
365
+ l_ptrs = L + offs_m
366
+
367
+ # initialize pointer to m and l, fp32 for accumulators
368
+ m_i = tl.full([BLOCK_M], value=-float("inf"), dtype=tl.float32)
369
+ l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
370
+ acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
371
+
372
+ # load q
373
+ if DIVISIBLE_M:
374
+ q = tl.load(q_ptrs, cache_modifier=".cg")
375
+ log_lambda_out = tl.load(log_lambda_out_ptrs, cache_modifier=".cg")
376
+ else:
377
+ mask_m = offs_m < M
378
+ q = tl.load(q_ptrs, mask=mask_m[:, None], cache_modifier=".cg")
379
+ log_lambda_out = tl.load(log_lambda_out_ptrs, mask=mask_m, cache_modifier=".cg")
380
+
381
+ #Dot I trick: to place q in registers, it saves shared memory
382
+ # if BLOCK_DMODEL < 128:
383
+ # I = tl.where(offs_k[:, None] == offs_k,
384
+ # tl.full((BLOCK_DMODEL, BLOCK_DMODEL), 1.0, dtype=input_dtype),
385
+ # tl.full((BLOCK_DMODEL, BLOCK_DMODEL), 0.0, dtype=input_dtype))
386
+ # q = tl.dot(q, I, input_precision="ieee").to(input_dtype)
387
+ # else:
388
+ # I = tl.where(offs_m_base[:, None] == offs_m_base,
389
+ # tl.full((BLOCK_M, BLOCK_M), 1.0, dtype=input_dtype),
390
+ # tl.full((BLOCK_M, BLOCK_M), 0.0, dtype=input_dtype))
391
+ # q = tl.dot(I, q, input_precision="ieee").to(input_dtype)
392
+
393
+ # NOTE: Loop-Bound-For-N
394
+ # The indices in m-dimension that this block may access is in `[start_m * BLOCK_M, (start_m + 1) * BLOCK_M)`.
395
+ # According to the rule of causal masking, then max index in n-dimension that this block may access
396
+ # is `P_SEQ + (start_m + 1) * BLOCK_M`.
397
+ # However, the upper bound of index in n-dimension should never exceed the sequence length of k/v(`P_SEQ + N_CTX`).
398
+ # `P_SEQ + (start_m + 1) * BLOCK_M` may be larger than `N`.
399
+ # At this case, there would be illegal memory access when loading k & v tiles
400
+ # if mask_n is not applied for loading(only when `DIVISIBLE_N`` is true).
401
+ # See also https://github.com/FlagOpen/FlagAttention/pull/8
402
+ if IS_CAUSAL:
403
+ hi = tl.minimum(N, P_SEQ + (start_m + 1) * BLOCK_M)
404
+ if LARGER_M:
405
+ hi = tl.maximum(0, hi)
406
+ else:
407
+ hi = N
408
+
409
+ offs_n_init = offs_n_base
410
+ if HAS_SEQ_START:
411
+ SEQ_START += off_z
412
+ seq_start = tl.load(SEQ_START)
413
+ lo = tl.minimum(seq_start, hi)
414
+ lo = (lo // BLOCK_N) * BLOCK_N
415
+ offs_n_init += lo
416
+ else:
417
+ lo = 0
418
+ seq_start = 0
419
+
420
+ # loop over k, v and update accumulators
421
+ k_ptrs = K + (offs_k[:, None] * stride_kk + offs_n_init[None, :] * stride_kn) # (BLOCK_DMODEL, BLOCK_N)
422
+ v_ptrs = V + (offs_n_init[:, None] * stride_vn + offs_k[None, :] * stride_vk) # (BLOCK_N, BLOCK_DMODEL)
423
+ log_lambda_in_ptrs = LOG_LAMBDA + (offs_n_init * stride_log_lambda_n) # (BLOCK_N, BLOCK_DMODEL)
424
+ for start_n in range(lo, hi, BLOCK_N):
425
+ start_n = tl.multiple_of(start_n, BLOCK_N)
426
+ offs_n = start_n + offs_n_base
427
+
428
+ # -- load k, v --
429
+ if DIVISIBLE_N:
430
+ k = tl.load(k_ptrs, cache_modifier=".cg")
431
+ v = tl.load(v_ptrs, cache_modifier=".cg")
432
+ log_lambda_in = tl.load(log_lambda_in_ptrs, cache_modifier=".cg")
433
+ else:
434
+ mask_n = offs_n < N
435
+ k = tl.load(k_ptrs, mask=mask_n[None, :], cache_modifier=".cg")
436
+ v = tl.load(v_ptrs, mask=mask_n[:, None], cache_modifier=".cg")
437
+ log_lambda_in = tl.load(log_lambda_in_ptrs, mask=mask_n, cache_modifier=".cg")
438
+
439
+ # -- compute qk ---
440
+ # s = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
441
+ s = tl.dot(q, k, input_precision="ieee") * qk_scale
442
+ decay_bias = log_lambda_out[:, None] - log_lambda_in[None, :]
443
+ s += decay_bias * log2e
444
+
445
+ if not DIVISIBLE_N:
446
+ s = tl.where(mask_n[None, :], s, float("-inf"))
447
+ if IS_CAUSAL:
448
+ causal_mask = (P_SEQ + offs_m[:, None]) >= offs_n[None, :]
449
+ s = tl.where(causal_mask, s, float("-inf"))
450
+ if HAS_SEQ_START:
451
+ s = tl.where(offs_n[None, :] >= seq_start, s, float("-inf"))
452
+
453
+
454
+ # -- compute scaling constant ---
455
+ m_i_new = tl.maximum(m_i, tl.max(s, 1))
456
+ alpha = tl.math.exp2((m_i - m_i_new))
457
+ p = tl.math.exp2(s - m_i_new[:, None])
458
+
459
+ # -- compute partial sumexpn before applying dropout
460
+ p_sum = tl.sum(p, 1)
461
+
462
+
463
+ # -- scale and update acc: acc *= alpha[:, None]--
464
+ acc *= alpha[:, None]
465
+ acc += tl.dot(p.to(input_dtype), v, input_precision="ieee")
466
+
467
+ # -- update m_i and l_i --
468
+ l_i = l_i * alpha + p_sum
469
+ m_i = m_i_new
470
+ # update pointers
471
+ k_ptrs += BLOCK_N * stride_kn
472
+ v_ptrs += BLOCK_N * stride_vn
473
+ log_lambda_in_ptrs += BLOCK_N * stride_log_lambda_n
474
+
475
+ # write back l & o
476
+ if IS_CAUSAL and (LARGER_M or HAS_SEQ_START):
477
+ is_empty_line = (offs_m + P_SEQ) < seq_start
478
+ acc = tl.where(is_empty_line[:, None], 0.0, acc * (1.0 / l_i[:, None]))
479
+ l = tl.where(is_empty_line, float("-inf"), m_i * loge2 + tl.log(l_i))
480
+ else:
481
+ acc = acc * (1.0 / l_i[:, None])
482
+ l = m_i * loge2 + tl.log(l_i) # log(normalizer)
483
+
484
+
485
+ if DIVISIBLE_M:
486
+ tl.store(l_ptrs, l, cache_modifier=".cg")
487
+ tl.store(o_ptrs, acc.to(input_dtype), cache_modifier=".cg")
488
+ else:
489
+ tl.store(l_ptrs, l, mask=mask_m, cache_modifier=".cg")
490
+ tl.store(o_ptrs, acc.to(input_dtype), mask=mask_m[:, None], cache_modifier=".cg")
491
+
492
+
493
+ # --------------------------- Backward ---------------------------
494
+ # NOTE: this function can be overwritten at runtime to use your custom config
495
+ def get_bwd_config(B, H, M, N, D, causal):
496
+ if torch.cuda.get_device_capability() == (9, 0):
497
+ if not causal:
498
+ BLOCK_M = 128 if D <= 64 else 64
499
+ BLOCK_N = 64
500
+ num_stages = 2
501
+ num_warps = 4
502
+ else:
503
+ BLOCK_M = 64
504
+ BLOCK_N = 64
505
+ num_stages = 3 if D <= 64 else 2
506
+ num_warps = 4
507
+ elif torch.cuda.get_device_capability() == (8, 0):
508
+ if not causal:
509
+ BLOCK_M = 128 if D <= 64 else 64
510
+ BLOCK_N = 64
511
+ num_stages = 2
512
+ num_warps = 4
513
+ else:
514
+ BLOCK_M = 64
515
+ BLOCK_N = 64
516
+ num_stages = 3 if D <= 64 else 2
517
+ num_warps = 4
518
+ elif torch.cuda.get_device_capability() == (8, 6): # tune for RTX-3090, device_capability(8, 6)
519
+ if not causal:
520
+ if D <= 64:
521
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
522
+ else:
523
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 8
524
+ else:
525
+ if D <= 64:
526
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
527
+ else:
528
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 2, 4
529
+ else:
530
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 1, 4
531
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
532
+
533
+ def get_bwd_kv_config(B, H, M, N, D, causal):
534
+ assert causal
535
+ if torch.cuda.get_device_capability() == (8, 0): # A100
536
+ if D <= 64:
537
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 4, 4
538
+ else:
539
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 128, 4, 8
540
+ elif torch.cuda.get_device_capability() == (8, 6): # tune for RTX-3090, device_capability(8, 6)
541
+ if D <= 64:
542
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
543
+ else:
544
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 2, 4
545
+ elif torch.cuda.get_device_capability() == (8, 9): # L40S
546
+ if D <= 64:
547
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 128, 4, 8
548
+ else:
549
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 128, 2, 8
550
+ elif torch.cuda.get_device_capability() == (9, 0): # H100
551
+ if D <= 64:
552
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 4
553
+ else:
554
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
555
+ else:
556
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
557
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
558
+
559
+ def get_bwd_q_config(B, H, M, N, D, causal):
560
+ assert causal
561
+ if torch.cuda.get_device_capability() == (8, 0): # A100
562
+ if D <= 64:
563
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 4
564
+ else:
565
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 4, 8
566
+ elif torch.cuda.get_device_capability() == (8, 6): # tune for RTX-3090, device_capability(8, 6)
567
+ if D <= 64:
568
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
569
+ else:
570
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 2, 4
571
+ elif torch.cuda.get_device_capability() == (8, 9): # L40S
572
+ if D <= 64:
573
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 4, 4
574
+ else:
575
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 3, 4
576
+ elif torch.cuda.get_device_capability() == (9, 0): # H100
577
+ if D <= 64:
578
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 128, 4, 8
579
+ else:
580
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 128, 2, 8
581
+ else:
582
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
583
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
584
+
585
+
586
+ @triton.jit
587
+ def _bwd_preprocess(
588
+ Out, DO,
589
+ Delta,
590
+ stride_oz, stride_oh, stride_om, stride_ok,
591
+ stride_doz, stride_doh, stride_dom, stride_dok,
592
+ stride_dz, stride_dh, stride_dm,
593
+ M,
594
+ BLOCK_M: tl.constexpr, D_HEAD: tl.constexpr,
595
+ DIVISIBLE_M: tl.constexpr,
596
+ ):
597
+ off_h = tl.program_id(1)
598
+ off_z = tl.program_id(2)
599
+ Out += off_z * stride_oz + off_h * stride_oh
600
+ DO += off_z * stride_doz + off_h * stride_doh
601
+ Delta += off_z * stride_dz + off_h * stride_dh
602
+
603
+ # compute (Out * Dout).sum() for vector interpretation
604
+ off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)
605
+ off_n = tl.arange(0, D_HEAD)
606
+
607
+ # load
608
+ o_ptrs = Out + off_m[:, None] * stride_om + off_n[None, :] * stride_ok
609
+ do_ptrs = DO + off_m[:, None] * stride_dom + off_n[None, :] * stride_dok
610
+
611
+ if DIVISIBLE_M:
612
+ o = tl.load(o_ptrs).to(tl.float32)
613
+ do = tl.load(do_ptrs).to(tl.float32)
614
+ else:
615
+ mask_m = off_m < M
616
+ o = tl.load(o_ptrs, mask=mask_m[:, None]).to(tl.float32)
617
+ do = tl.load(do_ptrs, mask=mask_m[:, None]).to(tl.float32)
618
+
619
+ # compute
620
+ delta = tl.sum(o * do, axis=1)
621
+
622
+ # write-back
623
+ d_ptrs = Delta + off_m * stride_dm
624
+ if DIVISIBLE_M:
625
+ tl.store(d_ptrs, delta)
626
+ else:
627
+ tl.store(d_ptrs, delta, mask=mask_m)
628
+
629
+
630
+ @triton.jit
631
+ def _bwd_kv_kernel(
632
+ Q, K, V, LOG_LAMBDA, SEQ_START, sm_scale, DO,
633
+ DK, DV, DLOG_LAMBDA,
634
+ L,
635
+ D,
636
+ stride_qz, stride_qh, stride_qm, stride_qk,
637
+ stride_kz, stride_kh, stride_kn, stride_kk,
638
+ stride_vz, stride_vh, stride_vn, stride_vk,
639
+ stride_log_lambda_z, stride_log_lambda_h, stride_log_lambda_n,
640
+ stride_doz, stride_doh, stride_dom, stride_dok,
641
+ stride_dkz, stride_dkh, stride_dkn, stride_dkk,
642
+ stride_dvz, stride_dvh, stride_dvn, stride_dvk,
643
+ stride_dlog_lambda_z, stride_dlog_lambda_h, stride_dlog_lambda_n,
644
+ Z, H, M, N, P_SEQ,
645
+ num_groups,
646
+ BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr,
647
+ CAUSAL: tl.constexpr,
648
+ DIVISIBLE_M: tl.constexpr, DIVISIBLE_N: tl.constexpr, HAS_SEQ_START: tl.constexpr,
649
+ ):
650
+ input_dtype = Q.dtype.element_ty
651
+ # -- grid id --
652
+ start_n = tl.program_id(0)
653
+ off_h = tl.program_id(1)
654
+ off_z = tl.program_id(2)
655
+ log2e: tl.constexpr = 1.4426950408889634
656
+ qk_scale = sm_scale * log2e
657
+
658
+ # offset pointers for (batch, head)
659
+ off_hk = off_h // num_groups
660
+ Q += off_z * stride_qz + off_h * stride_qh
661
+ K += off_z * stride_kz + off_hk * stride_kh
662
+ V += off_z * stride_vz + off_hk * stride_vh
663
+ LOG_LAMBDA += off_z * stride_log_lambda_z + off_h * stride_log_lambda_h
664
+ DO += off_z * stride_doz + off_h * stride_doh
665
+
666
+ # offset pointers for batch/head
667
+ DK += off_z * stride_dkz + off_h * stride_dkh
668
+ DV += off_z * stride_dvz + off_h * stride_dvh
669
+ DLOG_LAMBDA += off_z * stride_dlog_lambda_z + off_h * stride_dlog_lambda_h
670
+
671
+ # offset pointers for batch/head
672
+ D += (off_z * H + off_h) * M
673
+ L += (off_z * H + off_h) * M
674
+
675
+ if CAUSAL:
676
+ lo = tl.maximum(start_n * BLOCK_N - P_SEQ, 0)
677
+ lo = (lo // BLOCK_M) * BLOCK_M
678
+ else:
679
+ lo = 0
680
+
681
+ offs_m_init = lo + tl.arange(0, BLOCK_M)
682
+ offs_n = start_n * BLOCK_N + tl.arange(0, BLOCK_N)
683
+ offs_m_base = tl.arange(0, BLOCK_M)
684
+ offs_k = tl.arange(0, BLOCK_DMODEL)
685
+
686
+ # initialize pointers to value-like data
687
+ q_ptrs = Q + (offs_m_init[:, None] * stride_qm + offs_k[None, :] * stride_qk) # (BLOCK_M, BLOCK_DMODEL)
688
+ log_lambda_out_ptrs = LOG_LAMBDA + (P_SEQ + offs_m_init) * stride_log_lambda_n # (BLOCK_N, BLOCK_DMODEL)
689
+ k_ptrs = K + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk) # (BLOCK_N, BLOCK_DMODEL)
690
+ v_ptrs = V + (offs_n[:, None] * stride_vn + offs_k[None, :] * stride_vk) # (BLOCK_N, BLOCK_DMODEL)
691
+ log_lambda_in_ptrs = LOG_LAMBDA + (offs_n * stride_log_lambda_n) # (BLOCK_N, BLOCK_DMODEL)
692
+ do_ptrs = DO + (offs_m_init[:, None] * stride_dom + offs_k[None, :] * stride_dok) # (BLOCK_M, BLOCK_DMODEL)
693
+
694
+ dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_k[None, :] * stride_dvk) # (BLOCK_N, BLOCK_DMODEL)
695
+ dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_k[None, :] * stride_dkk) # (BLOCK_N, BLOCK_DMODEL)
696
+ dlog_lambda_in_ptrs = DLOG_LAMBDA + (offs_n * stride_dlog_lambda_n) # (BLOCK_N, BLOCK_DMODEL)
697
+
698
+ # k and v stay in SRAM throughout
699
+ if DIVISIBLE_N:
700
+ v = tl.load(v_ptrs)
701
+ k = tl.load(k_ptrs)
702
+ log_lambda_in = tl.load(log_lambda_in_ptrs)
703
+ else:
704
+ mask_n = offs_n < N
705
+ v = tl.load(v_ptrs, mask=mask_n[:, None])
706
+ k = tl.load(k_ptrs, mask=mask_n[:, None])
707
+ log_lambda_in = tl.load(log_lambda_in_ptrs, mask=mask_n)
708
+
709
+ # If the N block doesn't contain seq_start, no need to loop
710
+ if HAS_SEQ_START:
711
+ SEQ_START += off_z
712
+ seq_start = tl.load(SEQ_START)
713
+ hi = tl.where(start_n * BLOCK_N + BLOCK_N >= seq_start - 1, M, lo)
714
+ else:
715
+ hi = M
716
+
717
+ # initialize dk amd dv
718
+ dk = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32)
719
+ dv = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32)
720
+ dlog_lambda_in = tl.zeros([BLOCK_N], dtype=tl.float32)
721
+
722
+ # loop over a col
723
+ for start_m in range(lo, hi, BLOCK_M):
724
+ start_m = tl.multiple_of(start_m, BLOCK_M)
725
+ offs_m = start_m + offs_m_base
726
+ causal_mask = (P_SEQ + offs_m[None, :]) >= (offs_n[:, None]) # (BLOCK_M, BLOCK_N)
727
+
728
+ # load q1, k1, q2, k2, v, do on-chip
729
+ if DIVISIBLE_M:
730
+ q = tl.load(q_ptrs)
731
+ log_lambda_out = tl.load(log_lambda_out_ptrs)
732
+ else:
733
+ mask_m = offs_m < M
734
+ valid_mask = mask_m[None, :] # & mask_n
735
+ q = tl.load(q_ptrs, mask=mask_m[:, None])
736
+ log_lambda_out = tl.load(log_lambda_out_ptrs, mask=mask_m)
737
+ # recompute p = softmax(qk * sm_scale, dim=-1)
738
+ # s = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
739
+ sT = tl.dot(k, tl.trans(q), input_precision="ieee") * qk_scale
740
+ decay_bias = log_lambda_out[None, :] - log_lambda_in[:, None]
741
+ sT += decay_bias * log2e
742
+ # NOTE: since softmax in backward is pointwise, the normalizer has been saved in fwd)
743
+ # So masking on s is not needed.
744
+ # s = tl.where(valid_mask, s , float("-inf"))
745
+ # if CAUSAL:
746
+ # s = tl.where(causal_mask, s, float("-inf"))
747
+
748
+ # -- recompute p ---
749
+ if DIVISIBLE_M:
750
+ l = tl.load(L + offs_m)
751
+ else:
752
+ l = tl.load(L + offs_m, mask=mask_m)
753
+ pT = tl.math.exp2(sT - l[None, :] * log2e) # (BLOCK_M, BLOCK_N)
754
+
755
+ if not DIVISIBLE_M:
756
+ pT = tl.where(valid_mask, pT, 0.0)
757
+ if CAUSAL:
758
+ pT = tl.where(causal_mask, pT, 0.0)
759
+
760
+ # compute dv = dot(p, do)
761
+ if DIVISIBLE_M:
762
+ do = tl.load(do_ptrs)
763
+ else:
764
+ do = tl.load(do_ptrs, mask=mask_m[:, None]) # (BLOCK_M, BLOCK_DMODEL)
765
+
766
+
767
+ dv += tl.dot(pT.to(input_dtype), do, input_precision="ieee") # (BLOCK_N, BLOCK_DMODEL) # still correct
768
+
769
+ # compute dp = dot(v, do)
770
+ if DIVISIBLE_M:
771
+ delta = tl.load(D + offs_m)
772
+ else:
773
+ delta = tl.load(D + offs_m, mask=mask_m)
774
+ # dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
775
+ dpT = tl.dot(v, tl.trans(do), input_precision="ieee")
776
+
777
+
778
+ # compute ds = p * (dp - delta[:, None])
779
+ dsT = pT * (dpT - delta[None, :]) # (BLOCK_M, BLOCK_N)
780
+
781
+ if not DIVISIBLE_M:
782
+ dsT = tl.where(valid_mask, dsT, 0.0)
783
+ if CAUSAL:
784
+ dsT = tl.where(causal_mask, dsT, 0.0)
785
+
786
+ # compute dk = dot(ds.T, q) masking
787
+ dk += tl.dot(dsT.to(input_dtype), q, input_precision="ieee")
788
+ dlog_lambda_in += -tl.sum(dsT, axis=1)
789
+
790
+ # increment pointers
791
+ q_ptrs += BLOCK_M * stride_qm
792
+ log_lambda_out_ptrs += BLOCK_M * stride_log_lambda_n
793
+ do_ptrs += BLOCK_M * stride_dom
794
+
795
+ dk *= sm_scale
796
+ if HAS_SEQ_START:
797
+ # Mask out
798
+ seq_mask = (offs_n >= seq_start)
799
+ dk = tl.where(seq_mask[:, None], dk, 0.0)
800
+ dv = tl.where(seq_mask[:, None], dv, 0.0)
801
+ dlog_lambda_in = tl.where(seq_mask, dlog_lambda_in, 0.0)
802
+ if DIVISIBLE_N:
803
+ tl.store(dk_ptrs, dk.to(input_dtype)) # (BLOCK_N, BLOCK_DMODEL)
804
+ tl.store(dv_ptrs, dv.to(input_dtype)) # (BLOCK_N, BLOCK_DMODEL,)
805
+ tl.store(dlog_lambda_in_ptrs, dlog_lambda_in.to(tl.float32)) # (BLOCK_N, BLOCK_DMODEL,)
806
+ else:
807
+ tl.store(dk_ptrs, dk.to(input_dtype), mask=mask_n[:, None]) # (BLOCK_N, BLOCK_DMODEL)
808
+ tl.store(dv_ptrs, dv.to(input_dtype), mask=mask_n[:, None]) # (BLOCK_N, BLOCK_DMODEL)
809
+ tl.store(dlog_lambda_in_ptrs, dlog_lambda_in.to(tl.float32), mask=mask_n) # (BLOCK_N, BLOCK_DMODEL,)
810
+
811
+
812
+ @triton.jit
813
+ def _bwd_q_kernel(
814
+ Q, K, V, LOG_LAMBDA, SEQ_START, sm_scale, DO,
815
+ DQ, DLOG_LAMBDA,
816
+ L,
817
+ D,
818
+ stride_qz, stride_qh, stride_qm, stride_qk,
819
+ stride_kz, stride_kh, stride_kn, stride_kk,
820
+ stride_vz, stride_vh, stride_vn, stride_vk,
821
+ stride_log_lambda_z, stride_log_lambda_h, stride_log_lambda_n,
822
+ stride_doz, stride_doh, stride_dom, stride_dok,
823
+ stride_dqz, stride_dqh, stride_dqm, stride_dqk,
824
+ stride_dlog_lambda_z, stride_dlog_lambda_h, stride_dlog_lambda_n,
825
+ Z, H, M, N, P_SEQ,
826
+ num_groups,
827
+ BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr,
828
+ CAUSAL: tl.constexpr, LARGER_M: tl.constexpr, HAS_SEQ_START: tl.constexpr,
829
+ DIVISIBLE_M: tl.constexpr, DIVISIBLE_N: tl.constexpr,
830
+ ):
831
+ input_dtype = Q.dtype.element_ty
832
+ # -- grid id --
833
+ start_m = tl.program_id(0)
834
+ off_h = tl.program_id(1)
835
+ off_z = tl.program_id(2)
836
+
837
+ # scale sm_scale by log_2(e) and use
838
+ # 2^x instead of exp in the loop because CSE and LICM
839
+ # don't work as expected with `exp` in the loop
840
+ log2e: tl.constexpr = 1.4426950408889634
841
+ qk_scale = sm_scale * log2e
842
+
843
+ # offset pointers for (batch, head)
844
+ off_hk = off_h // num_groups
845
+ Q += off_z * stride_qz + off_h * stride_qh
846
+ K += off_z * stride_kz + off_hk * stride_kh
847
+ V += off_z * stride_vz + off_hk * stride_vh
848
+ LOG_LAMBDA += off_z * stride_log_lambda_z + off_h * stride_log_lambda_h
849
+ DO += off_z * stride_doz + off_h * stride_doh
850
+ D += (off_z * H + off_h) * M
851
+ L += (off_z * H + off_h) * M
852
+
853
+ # offset pointers for batch/head
854
+ DQ += off_z * stride_dqz + off_h * stride_dqh
855
+ DLOG_LAMBDA += off_z * stride_dlog_lambda_z + off_h * stride_dlog_lambda_h
856
+
857
+ offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
858
+ offs_k = tl.arange(0, BLOCK_DMODEL)
859
+
860
+ # initialize pointers to value-like data
861
+ q_ptrs = Q + (offs_m[:, None] * stride_qm + offs_k[None, :] * stride_qk) # (BLOCK_M, BLOCK_DMODEL)
862
+ log_lambda_out_ptrs = LOG_LAMBDA + (P_SEQ + offs_m) * stride_log_lambda_n
863
+
864
+ dq_ptrs = DQ + (offs_m[:, None] * stride_dqm + offs_k[None, :] * stride_dqk) # (BLOCK_M, BLOCK_DMODEL)
865
+ dlog_lambda_out_ptrs = DLOG_LAMBDA + (P_SEQ + offs_m) * stride_dlog_lambda_n
866
+ do_ptrs = DO + (offs_m[:, None] * stride_dom + offs_k[None, :] * stride_dok) # (BLOCK_M, BLOCK_DMODEL)
867
+
868
+ # pointer to row-wise quantities in value-like data
869
+ d_ptrs = D + offs_m
870
+ l_ptrs = L + offs_m
871
+
872
+ # load q: it will stay in SRAM throughout
873
+ if DIVISIBLE_M:
874
+ q = tl.load(q_ptrs)
875
+ do = tl.load(do_ptrs)
876
+ delta = tl.load(d_ptrs)
877
+ l = tl.load(l_ptrs)
878
+ log_lambda_out = tl.load(log_lambda_out_ptrs)
879
+ else:
880
+ mask_m = offs_m < M
881
+ q = tl.load(q_ptrs, mask=mask_m[:, None])
882
+ do = tl.load(do_ptrs, mask=mask_m[:, None])
883
+ delta = tl.load(d_ptrs, mask=mask_m)
884
+ l = tl.load(l_ptrs, mask=mask_m)
885
+ log_lambda_out = tl.load(log_lambda_out_ptrs, mask=mask_m)
886
+
887
+ # initialize dq
888
+ dq = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
889
+ dlog_lambda_out = tl.zeros([BLOCK_M], dtype=tl.float32)
890
+
891
+ # loop over k, v and update accumulator
892
+ # see note "Loop-Bound-For-N"
893
+ if CAUSAL:
894
+ hi = tl.minimum(N, P_SEQ + (start_m + 1) * BLOCK_M)
895
+ if LARGER_M:
896
+ hi = tl.maximum(0, hi)
897
+ else:
898
+ hi = N
899
+
900
+ offs_n_base = tl.arange(0, BLOCK_N)
901
+ offs_n_init = offs_n_base
902
+ if HAS_SEQ_START:
903
+ SEQ_START += off_z
904
+ seq_start = tl.load(SEQ_START)
905
+ lo = tl.minimum(seq_start, hi)
906
+ lo = (lo // BLOCK_N) * BLOCK_N
907
+ offs_n_init += lo
908
+ else:
909
+ lo = 0
910
+ k_ptrs = K + (offs_n_init[:, None] * stride_kn + offs_k[None, :] * stride_kk) # (BLOCK_N, BLOCK_DMODEL)
911
+ v_ptrs = V + (offs_n_init[:, None] * stride_vn + offs_k[None, :] * stride_vk) # (BLOCK_N, BLOCK_DMODEL)
912
+ log_lambda_in_ptrs = LOG_LAMBDA + (offs_n_init * stride_log_lambda_n)
913
+
914
+ # loop over a row
915
+ for start_n in range(lo, hi, BLOCK_N):
916
+ offs_n = start_n + offs_n_base
917
+
918
+ # load k1, k2, v on chip
919
+ if DIVISIBLE_N:
920
+ v = tl.load(v_ptrs)
921
+ k = tl.load(k_ptrs)
922
+ log_lambda_in = tl.load(log_lambda_in_ptrs)
923
+ else:
924
+ mask_n = offs_n < N
925
+ v = tl.load(v_ptrs, mask=mask_n[:, None])
926
+ k = tl.load(k_ptrs, mask=mask_n[:, None])
927
+ log_lambda_in = tl.load(log_lambda_in_ptrs, mask=mask_n)
928
+
929
+
930
+ # recompute p = softmax(qk * sm_scale, dim=-1)
931
+ if not DIVISIBLE_N:
932
+ valid_mask = mask_n[None, :] # & mask_m[:, None]
933
+ if CAUSAL:
934
+ causal_mask = (P_SEQ + offs_m[:, None]) >= (offs_n[None, :]) # (BLOCK_M, BLOCK_N)
935
+ # s = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
936
+ s = tl.dot(q, tl.trans(k), input_precision="ieee") * qk_scale
937
+ decay_bias = log_lambda_out[:, None] - log_lambda_in[None, :]
938
+ s += decay_bias * log2e
939
+
940
+ # NOTE: since softmax in backward is pointwise, the normalizer has been saved in fwd)
941
+ # So masking on s is not needed.
942
+ # if CAUSAL:
943
+ # s = tl.where(causal_mask & valid_mask, s, float("-inf"))
944
+ # else:
945
+ # s = tl.where(valid_mask, s, float("-inf"))
946
+ p = tl.math.exp2(s - l[:, None] * log2e) # (BLOCK_M, BLOCK_N)
947
+
948
+ # compute dp = dot(v, do)
949
+ # dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
950
+ dp = tl.dot(do.to(input_dtype), tl.trans(v), input_precision="ieee")
951
+
952
+
953
+ # no need to mask dp
954
+ # if CAUSAL:
955
+ # dp = tl.where(causal_mask & valid_mask, dp, 0.0)
956
+ # else:
957
+ # dp = tl.where(valid_mask, dp, 0.0)
958
+
959
+ # compute ds = p * (dp - delta[:, None])
960
+ # move scale out to dq at last
961
+ ds = p * (dp - delta[:, None]) # (BLOCK_M, BLOCK_N)
962
+
963
+ # mask ds to ensure no small values
964
+ if not DIVISIBLE_N:
965
+ ds = tl.where(valid_mask, ds, 0.0)
966
+ if CAUSAL:
967
+ ds = tl.where(causal_mask, ds, 0.0)
968
+ if HAS_SEQ_START:
969
+ ds = tl.where(offs_n[None, :] >= seq_start, ds, 0.0)
970
+
971
+ dq += tl.dot(ds.to(input_dtype), k, input_precision="ieee")
972
+ dlog_lambda_out += tl.sum(ds, axis=1)
973
+
974
+ # increment pointers
975
+ k_ptrs += BLOCK_N * stride_kn
976
+ v_ptrs += BLOCK_N * stride_vn
977
+ log_lambda_in_ptrs += BLOCK_N * stride_log_lambda_n
978
+
979
+ dq *= sm_scale
980
+ if DIVISIBLE_M:
981
+ tmp = tl.load(dlog_lambda_out_ptrs)
982
+ else:
983
+ tmp = tl.load(dlog_lambda_out_ptrs, mask=mask_m)
984
+ dlog_lambda_out += tmp
985
+ if DIVISIBLE_M:
986
+ tl.store(dq_ptrs, dq.to(input_dtype))
987
+ tl.store(dlog_lambda_out_ptrs, dlog_lambda_out)
988
+ else:
989
+ tl.store(dq_ptrs, dq.to(input_dtype), mask=mask_m[:, None])
990
+ tl.store(dlog_lambda_out_ptrs, dlog_lambda_out, mask=mask_m)
991
+
992
+
993
+
994
+ @pytest.mark.parametrize("Z, H, M, N, HEAD_DIM", [(4, 2, 1020, 2098, 64), (4, 2, 1024, 2048, 64)])
995
+ @pytest.mark.parametrize("causal", [True])
996
+ def test_op(Z, H, M, N, HEAD_DIM, causal, dtype=torch.bfloat16):
997
+ torch.manual_seed(24)
998
+ q = (torch.empty((Z, H, M, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
999
+ k = (torch.empty((Z, H, N, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
1000
+ v = (torch.empty((Z, H, N, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
1001
+ fgate_logit = torch.empty((Z, H, N), dtype=torch.float32, device="cuda").uniform_(5, 10)
1002
+ log_fgate = torch.nn.functional.logsigmoid(fgate_logit).requires_grad_()
1003
+ seq_start = torch.randint(low=0, high=N, size=(Z,), dtype=torch.long, device="cuda")
1004
+ # seq_start = torch.randint(low=0, high=10, size=(Z,), dtype=torch.long, device="cuda")
1005
+ # seq_start = torch.full(fill_value=0, size=(Z,), dtype=torch.long, device="cuda")
1006
+ sm_scale = 0.5
1007
+ dout = torch.randn_like(q)
1008
+ # reference implementation
1009
+ P_SEQ = N - M
1010
+ mask = torch.tril(torch.ones((M, N), device="cuda"), diagonal=P_SEQ)
1011
+ p = torch.matmul(q, k.transpose(2, 3)) * sm_scale
1012
+ p = p.float()
1013
+
1014
+ log_lambda = torch.cumsum(log_fgate, dim=-1)
1015
+ decay_bias = log_lambda[..., -M:, None] - log_lambda[..., None, :]
1016
+ p = p + decay_bias
1017
+ if causal:
1018
+ p[:, :, mask == 0] = float("-inf")
1019
+
1020
+ attention_mask = torch.arange(N, device="cuda") < seq_start[:, None, None, None]
1021
+ p = torch.where(attention_mask, float("-inf"), p)
1022
+ p = torch.softmax(p.float(), dim=-1).to(dtype)
1023
+ p = p.clone()
1024
+ p[torch.isnan(p)] = 0.0
1025
+ # p = torch.exp(p)
1026
+ ref_out = torch.matmul(p, v)
1027
+ ref_out.backward(dout)
1028
+ ref_dv, v.grad = v.grad.clone(), None
1029
+ ref_dk, k.grad = k.grad.clone(), None
1030
+ ref_dq, q.grad = q.grad.clone(), None
1031
+ ref_dlog_fgate, log_fgate.grad = log_fgate.grad.clone(), None
1032
+ # triton implementation
1033
+ tri_out = forgetting_attention(q, k, v, log_fgate, head_first=True, seq_start=seq_start, sm_scale=sm_scale)
1034
+ tri_out = tri_out.to(dtype)
1035
+
1036
+ tri_out.backward(dout)
1037
+ tri_dv, v.grad = v.grad.clone(), None
1038
+ tri_dk, k.grad = k.grad.clone(), None
1039
+ tri_dq, q.grad = q.grad.clone(), None
1040
+ tri_dlog_fgate, log_fgate.grad = log_fgate.grad.clone(), None
1041
+ # compare
1042
+ # assert torch.allclose(tri_log_normalizer[~torch.isnan(tri_log_normalizer)], ref_log_normalizer[~torch.isnan(ref_log_normalizer)], atol=1e-2, rtol=0)
1043
+ assert torch.allclose(ref_out, tri_out, atol=1e-2, rtol=0), (ref_out - tri_out).abs().max()
1044
+ rtol = 0
1045
+ # Relative tolerance workaround for known hardware limitation of MI200 GPU.
1046
+ # For details see https://pytorch.org/docs/stable/notes/numerical_accuracy.html#reduced-precision-fp16-and-bf16-gemms-and-convolutions-on-amd-instinct-mi200-devices
1047
+ # if torch.version.hip is not None and triton.runtime.driver.active.get_current_target().arch == "gfx90a":
1048
+ # rtol = 1e-2
1049
+ assert torch.allclose(ref_dv, tri_dv, atol=1e-2, rtol=rtol), (ref_dv - tri_dv).abs().max()
1050
+ assert torch.allclose(ref_dk, tri_dk, atol=1e-2, rtol=rtol), (ref_dk - tri_dk).abs().max()
1051
+ assert torch.allclose(ref_dq, tri_dq, atol=1e-2, rtol=rtol), (ref_dq - tri_dq).abs().max()
1052
+ assert torch.allclose(ref_dlog_fgate, tri_dlog_fgate, atol=1e-2, rtol=rtol), (ref_dlog_fgate - tri_dlog_fgate).abs().max()
1053
+
1054
+ try:
1055
+ from flash_attn.flash_attn_interface import \
1056
+ flash_attn_qkvpacked_func as flash_attn_func
1057
+ HAS_FLASH = True
1058
+ except BaseException:
1059
+ HAS_FLASH = False
1060
+
1061
+ TORCH_HAS_FP8 = hasattr(torch, 'float8_e5m2')
1062
+ BATCH, N_HEADS, HEAD_DIM = 4, 32, 128
1063
+ # vary seq length for fixed head and batch=4
1064
+ configs = []
1065
+ for mode in ["fwd", "bwd"]:
1066
+ # for mode in ["bwd"]:
1067
+ # for causal in [True, False]:
1068
+ for causal in [True]:
1069
+ if mode == "bwd" and not causal:
1070
+ continue
1071
+ configs.append(
1072
+ triton.testing.Benchmark(
1073
+ x_names=["N_CTX"],
1074
+ # x_vals=[2**i for i in range(10, 15)],
1075
+ x_vals=[2**i for i in range(14, 15)],
1076
+ line_arg="provider",
1077
+ # line_vals=["triton-fp16", "flag"] + (["flash"] if HAS_FLASH else []),
1078
+ # line_names=["Triton [FP16]", "Flag"] + (["Flash-2"] if HAS_FLASH else []),
1079
+ line_vals=["flag"] + (["flash"] if HAS_FLASH else []),
1080
+ line_names=["Flag"] + (["Flash-2"] if HAS_FLASH else []),
1081
+ styles=[("red", "-"), ("blue", "-"), ("green", "-")],
1082
+ ylabel="ms",
1083
+ plot_name=f"fused-attention-batch{BATCH}-head{N_HEADS}-d{HEAD_DIM}-{mode}-causal={causal}",
1084
+ args={
1085
+ "H": N_HEADS,
1086
+ "BATCH": BATCH,
1087
+ "HEAD_DIM": HEAD_DIM,
1088
+ "mode": mode,
1089
+ "causal": causal,
1090
+ },
1091
+ ))
1092
+
1093
+
1094
+ @triton.testing.perf_report(configs)
1095
+ def bench_flash_attention(BATCH, H, N_CTX, HEAD_DIM, causal, mode, provider, device="cuda"):
1096
+ assert mode in ["fwd", "bwd"]
1097
+ warmup = 25
1098
+ rep = 100
1099
+ dtype = torch.bfloat16
1100
+ if "flag" in provider:
1101
+ q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1102
+ k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1103
+ v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1104
+ fgate_logit = torch.empty((BATCH, H, N_CTX), dtype=torch.float32, device="cuda").uniform_(5, 10)
1105
+ log_fgate = torch.nn.functional.logsigmoid(fgate_logit).requires_grad_()
1106
+ # if mode == "fwd" and "fp8" in provider:
1107
+ # q = q.to(torch.float8_e5m2)
1108
+ # k = k.to(torch.float8_e5m2)
1109
+ # v = v.permute(0, 1, 3, 2).contiguous()
1110
+ # v = v.permute(0, 1, 3, 2)
1111
+ # v = v.to(torch.float8_e5m2)
1112
+ sm_scale = 1.3
1113
+ fn = lambda: forgetting_attention(q, k, v, log_fgate, head_first=True, sm_scale=sm_scale)
1114
+ if mode == "bwd":
1115
+ o = fn()
1116
+ do = torch.randn_like(o)
1117
+ fn = lambda: o.backward(do, retain_graph=True)
1118
+ ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
1119
+ if provider == "flash":
1120
+ qkv = torch.randn((BATCH, N_CTX, 3, H, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1121
+ fn = lambda: flash_attn_func(qkv, causal=causal)
1122
+ if mode == "bwd":
1123
+ o = fn()
1124
+ do = torch.randn_like(o)
1125
+ fn = lambda: o.backward(do, retain_graph=True)
1126
+ ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
1127
+ flops_per_matmul = 2.0 * BATCH * H * N_CTX * N_CTX * HEAD_DIM
1128
+ total_flops = 2 * flops_per_matmul
1129
+ if causal:
1130
+ total_flops *= 0.5
1131
+ if mode == "bwd":
1132
+ total_flops *= 2.5 # 2.0(bwd) + 0.5(recompute)
1133
+ return total_flops / ms * 1e-9
1134
+
1135
+
1136
+ if __name__ == "__main__":
1137
+ # only works on post-Ampere GPUs right now
1138
+ bench_flash_attention.run(save_path=".", print_data=True)
ops/.ipynb_checkpoints/forgetting_attention_std-checkpoint.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Forgetting Attention - 标准 Softmax 版本
3
+ 在 forgetting_attention.py 最后添加这个函数
4
+ """
5
+
6
+ import math
7
+ import torch
8
+ import torch.nn.functional as F
9
+ from einops import rearrange
10
+ from typing import Optional
11
+
12
+
13
+ def forgetting_attention_std(
14
+ q: torch.Tensor,
15
+ k: torch.Tensor,
16
+ v: torch.Tensor,
17
+ log_fgate: torch.Tensor,
18
+ *,
19
+ head_first: bool = False,
20
+ seq_start: Optional[torch.Tensor] = None,
21
+ sm_scale: Optional[float] = None,
22
+ ) -> torch.Tensor:
23
+ """标准 Softmax 版本的 Forgetting Attention"""
24
+
25
+ if not head_first:
26
+ q = rearrange(q, "b t h d -> b h t d")
27
+ k = rearrange(k, "b t h d -> b h t d")
28
+ v = rearrange(v, "b t h d -> b h t d")
29
+ log_fgate = rearrange(log_fgate, "b t h -> b h t")
30
+
31
+ B, H, T_q, D = q.shape
32
+ T_k = k.shape[2]
33
+
34
+ if sm_scale is None:
35
+ sm_scale = 1.0 / math.sqrt(D)
36
+
37
+ # 计算 QK 分数
38
+ scores = torch.matmul(q.float(), k.float().transpose(-2, -1)) * sm_scale
39
+
40
+ # 处理 seq_start
41
+ log_fgate_masked = log_fgate.float()
42
+ if seq_start is not None:
43
+ log_fgate_masked = log_fgate_masked.clone()
44
+ mask_idx = torch.arange(T_k, device=q.device)[None, None, :] < seq_start[:, None, None]
45
+ log_fgate_masked[mask_idx] = 0.0
46
+
47
+ # 计算累积衰减
48
+ log_lambda = torch.cumsum(log_fgate_masked, dim=-1)
49
+ decay_bias = log_lambda[:, :, :T_q, None] - log_lambda[:, :, None, :]
50
+ scores = scores + decay_bias
51
+
52
+ # Causal mask
53
+ P_SEQ = T_k - T_q
54
+ causal_mask = torch.triu(torch.ones((T_q, T_k), dtype=torch.bool, device=q.device), diagonal=P_SEQ + 1)
55
+ scores = scores.masked_fill(causal_mask[None, None, :, :], float('-inf'))
56
+
57
+ # seq_start mask
58
+ if seq_start is not None:
59
+ seq_mask = torch.arange(T_k, device=q.device)[None, None, None, :] < seq_start[None, :, None, None]
60
+ scores = scores.masked_fill(seq_mask, float('-inf'))
61
+
62
+ # Softmax
63
+ attn = F.softmax(scores, dim=-1)
64
+ attn = torch.nan_to_num(attn, 0.0)
65
+
66
+ # 计算输出
67
+ out = torch.matmul(attn.to(v.dtype), v)
68
+
69
+ if not head_first:
70
+ out = rearrange(out, "b h t d -> b t h d")
71
+
72
+ return out
ops/.ipynb_checkpoints/geometric_attention_std-checkpoint.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Geometric Attention - 标准 Softmax 版本
3
+ 基于论文 "The Neural Data Router" (Csordás et al., 2022)
4
+ """
5
+
6
+ import math
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+ from einops import rearrange
11
+ from typing import Optional
12
+
13
+
14
+ def geometric_attention_std(
15
+ q: torch.Tensor,
16
+ k: torch.Tensor,
17
+ v: torch.Tensor,
18
+ *,
19
+ head_first: bool = False,
20
+ seq_start: Optional[torch.Tensor] = None,
21
+ sm_scale: Optional[float] = None,
22
+ normalize: bool = True,
23
+ ) -> torch.Tensor:
24
+ """
25
+ 标准 Softmax 版本的 Geometric Attention
26
+
27
+ Args:
28
+ q: Query tensor [B, T, H, D] or [B, H, T, D] if head_first
29
+ k: Key tensor [B, T, H, D] or [B, H, T, D] if head_first
30
+ v: Value tensor [B, T, H, D] or [B, H, T, D] if head_first
31
+ head_first: 是否head维度在前
32
+ seq_start: 序列起始位置 [B]
33
+ sm_scale: scaling factor,默认 1/sqrt(D)
34
+ normalize: 是否归一化attention weights
35
+
36
+ Returns:
37
+ output: [B, T, H, D] or [B, H, T, D] if head_first
38
+ """
39
+
40
+ # Rearrange to head_first format
41
+ if not head_first:
42
+ q = rearrange(q, "b t h d -> b h t d")
43
+ k = rearrange(k, "b t h d -> b h t d")
44
+ v = rearrange(v, "b t h d -> b h t d")
45
+
46
+ B, H, T_q, D = q.shape
47
+ T_k = k.shape[2]
48
+
49
+ if sm_scale is None:
50
+ sm_scale = 1.0 / math.sqrt(D)
51
+
52
+ # Step 1: 计算 content-based logits
53
+ logits = torch.matmul(q.float(), k.float().transpose(-2, -1)) * sm_scale
54
+ # logits: [B, H, T_q, T_k]
55
+
56
+ # Step 2: Mask diagonal (不允许attend到自己)
57
+ if T_q == T_k:
58
+ diag_mask = torch.eye(T_q, dtype=torch.bool, device=q.device)
59
+ logits = logits.masked_fill(diag_mask[None, None, :, :], float('-inf'))
60
+
61
+ # Step 3: 处理 seq_start mask
62
+ if seq_start is not None:
63
+ seq_mask = torch.arange(T_k, device=q.device)[None, None, None, :] < seq_start[None, :, None, None]
64
+ logits = logits.masked_fill(seq_mask, float('-inf'))
65
+
66
+ # Step 4: Causal mask (如果需要)
67
+ # 注意:geometric attention论文中没有causal,如果你的任务需要可以取消注释
68
+ # P_SEQ = T_k - T_q
69
+ # causal_mask = torch.triu(torch.ones((T_q, T_k), dtype=torch.bool, device=q.device), diagonal=P_SEQ + 1)
70
+ # logits = logits.masked_fill(causal_mask[None, None, :, :], float('-inf'))
71
+
72
+ # Step 5: Geometric weighting (核心算法)
73
+ attn_weights = geometric_weighting(logits, normalize=normalize)
74
+
75
+ # Step 6: 应用attention到values
76
+ out = torch.matmul(attn_weights.to(v.dtype), v)
77
+
78
+ if not head_first:
79
+ out = rearrange(out, "b h t d -> b t h d")
80
+
81
+ return out
82
+
83
+
84
+ def geometric_weighting(
85
+ logits: torch.Tensor,
86
+ normalize: bool = True,
87
+ ) -> torch.Tensor:
88
+ """
89
+ 计算geometric attention weights
90
+
91
+ 实现论文中的 Equation 7:
92
+ A[i,j] = P[i,j] * ∏(1 - P[i,k]) for k closer to i than j
93
+
94
+ Args:
95
+ logits: [B, H, T_q, T_k] attention logits
96
+ normalize: 是否归一化
97
+
98
+ Returns:
99
+ weights: [B, H, T_q, T_k] attention weights
100
+ """
101
+ B, H, T_q, T_k = logits.shape
102
+
103
+ # Step 1: Sigmoid to get matching probabilities
104
+ P = torch.sigmoid(logits) # [B, H, T_q, T_k]
105
+
106
+ # Step 2: 使用 log-space 计算(数值稳定)
107
+ log_P = torch.log(P + 1e-10)
108
+ log_one_minus_P = torch.log(1.0 - P + 1e-10)
109
+
110
+ # Step 3: 简化版本 - 使用cumsum实现几何分布
111
+ # 这是一个高效的近似,避免了显式的循环
112
+
113
+ # 对于每个位置i,计算其左侧所有位置的log(1-P)累积和
114
+ log_decay_left = log_one_minus_P.cumsum(dim=-1)
115
+
116
+ # 计算weights(简化版)
117
+ # 完整版本需要根据距离动态选择区间,这里用一个高效近似
118
+ weights = torch.exp(log_P + log_decay_left.roll(1, dims=-1))
119
+
120
+ # 第一个位置特殊处理(没有左侧元素)
121
+ # 避免inplace操作
122
+ weights_first = P[:, :, :, :1] # 获取第一列
123
+ weights = torch.cat([weights_first, weights[:, :, :, 1:]], dim=-1)
124
+
125
+ # Step 4: 归一化(可选)
126
+ if normalize:
127
+ weights = F.normalize(weights, p=1, dim=-1)
128
+
129
+ # 处理NaN(如果所有位置都是-inf)
130
+ weights = torch.nan_to_num(weights, 0.0)
131
+
132
+ return weights
133
+
134
+
135
+ def geometric_weighting_full(
136
+ logits: torch.Tensor,
137
+ normalize: bool = True,
138
+ ) -> torch.Tensor:
139
+ """
140
+ 完整版geometric weighting(更慢但更准确)
141
+
142
+ 仅在需要最高精度时使用,训练时建议用上面的简化版
143
+ """
144
+ B, H, T_q, T_k = logits.shape
145
+ device = logits.device
146
+
147
+ P = torch.sigmoid(logits)
148
+ log_P = torch.log(P + 1e-10)
149
+ log_one_minus_P = torch.log(1.0 - P + 1e-10)
150
+
151
+ # 初始化weights
152
+ weights = torch.zeros_like(P)
153
+
154
+ # 对每个(i,j)计算geometric weight
155
+ for i in range(T_q):
156
+ for j in range(T_k):
157
+ # 找出比j更接近i的所有位���k
158
+ if i < j:
159
+ # 向右看:closer positions are [i+1, ..., j-1]
160
+ closer_positions = range(i + 1, j)
161
+ elif i > j:
162
+ # 向左看:closer positions are [j+1, ..., i-1]
163
+ closer_positions = range(j + 1, i)
164
+ else:
165
+ # i == j (对角线),已经在外面mask掉了
166
+ continue
167
+
168
+ # 计算 ∏(1 - P[i,k]) in log-space
169
+ log_prod = sum(log_one_minus_P[:, :, i, k] for k in closer_positions) if closer_positions else 0.0
170
+
171
+ # weights[i,j] = P[i,j] * ∏(1 - P[i,k])
172
+ weights[:, :, i, j] = torch.exp(log_P[:, :, i, j] + log_prod)
173
+
174
+ if normalize:
175
+ weights = F.normalize(weights, p=1, dim=-1)
176
+
177
+ weights = torch.nan_to_num(weights, 0.0)
178
+
179
+ return weights
ops/.ipynb_checkpoints/sliding_window_attention_std-checkpoint.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Sliding Window / Hard Attention
3
+ Based on "Context Limitations Make Neural Language Models More Human-Like"
4
+ (Kuribayashi et al., 2022)
5
+ """
6
+
7
+ import math
8
+ import torch
9
+ import torch.nn.functional as F
10
+ from einops import rearrange
11
+ from typing import Optional
12
+
13
+
14
+ def sliding_window_attention_std(
15
+ q: torch.Tensor,
16
+ k: torch.Tensor,
17
+ v: torch.Tensor,
18
+ *,
19
+ head_first: bool = False,
20
+ seq_start: Optional[torch.Tensor] = None,
21
+ sm_scale: Optional[float] = None,
22
+ window_size: int = 2, # 默认2-gram(看前1个token)
23
+ ) -> torch.Tensor:
24
+ """
25
+ Sliding Window Attention
26
+
27
+ 硬截断:只能attend到最近window_size个token
28
+ """
29
+
30
+ if not head_first:
31
+ q = rearrange(q, "b t h d -> b h t d")
32
+ k = rearrange(k, "b t h d -> b h t d")
33
+ v = rearrange(v, "b t h d -> b h t d")
34
+
35
+ B, H, T_q, D = q.shape
36
+ T_k = k.shape[2]
37
+
38
+ if sm_scale is None:
39
+ sm_scale = 1.0 / math.sqrt(D)
40
+
41
+ # Compute logits
42
+ logits = torch.matmul(q.float(), k.float().transpose(-2, -1)) * sm_scale
43
+
44
+ # Create sliding window mask
45
+ mask = create_sliding_window_mask(T_q, T_k, window_size, device=q.device)
46
+ logits = logits.masked_fill(~mask, float('-inf'))
47
+
48
+ # Seq start mask
49
+ if seq_start is not None:
50
+ seq_mask = torch.arange(T_k, device=q.device)[None, None, None, :] < seq_start[None, :, None, None]
51
+ logits = logits.masked_fill(seq_mask, float('-inf'))
52
+
53
+ # Standard softmax
54
+ weights = F.softmax(logits, dim=-1)
55
+
56
+ # Apply to values
57
+ out = torch.matmul(weights, v)
58
+
59
+ if not head_first:
60
+ out = rearrange(out, "b h t d -> b t h d")
61
+
62
+ return out
63
+
64
+
65
+ def create_sliding_window_mask(
66
+ T_q: int,
67
+ T_k: int,
68
+ window_size: int,
69
+ device: torch.device
70
+ ) -> torch.Tensor:
71
+ """
72
+ 创建sliding window mask
73
+
74
+ window_size=1: 只看前1个token (2-gram)
75
+ window_size=2: 只看前2个token (3-gram)
76
+ """
77
+ # 基础causal mask
78
+ mask = torch.tril(torch.ones(T_q, T_k, dtype=torch.bool, device=device))
79
+
80
+ # 应用window限制
81
+ if window_size > 0 and window_size < T_k:
82
+ for i in range(T_q):
83
+ # 只保留 [i-window_size+1, i] 范围
84
+ start = max(0, i - window_size + 1)
85
+ if start > 0:
86
+ mask[i, :start] = False
87
+
88
+ return mask[None, None, :, :] # [1, 1, T_q, T_k]
ops/.ipynb_checkpoints/stickbreaking_attention_std-checkpoint.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Stick-breaking Attention - ICLR 2025
3
+ 基于论文 "Scaling Stick-Breaking Attention" (Tan et al., 2025)
4
+ 简化的PyTorch实现(不使用Triton)
5
+ """
6
+
7
+ import math
8
+ import torch
9
+ import torch.nn as nn
10
+ import torch.nn.functional as F
11
+ from einops import rearrange
12
+ from typing import Optional
13
+
14
+
15
+ def stickbreaking_attention_std(
16
+ q: torch.Tensor,
17
+ k: torch.Tensor,
18
+ v: torch.Tensor,
19
+ *,
20
+ head_first: bool = False,
21
+ seq_start: Optional[torch.Tensor] = None,
22
+ sm_scale: Optional[float] = None,
23
+ normalize: bool = True,
24
+ attend_current: bool = False,
25
+ ) -> torch.Tensor:
26
+ """
27
+ Stick-breaking attention
28
+
29
+ Based on ICLR 2025 paper, simplified PyTorch implementation
30
+ A_{i,j} = exp(z_{i,j} - ∑_{k=i}^{j-1} softplus(z_{k,j}))
31
+
32
+ Args:
33
+ q: query [B, T, H, D] or [B, H, T, D] if head_first
34
+ k: key [B, T, H, D] or [B, H, T, D] if head_first
35
+ v: value [B, T, H, D] or [B, H, T, D] if head_first
36
+ attend_current: whether to attend to current position
37
+ normalize: whether to normalize attention weights
38
+ """
39
+
40
+ if not head_first:
41
+ q = rearrange(q, "b t h d -> b h t d")
42
+ k = rearrange(k, "b t h d -> b h t d")
43
+ v = rearrange(v, "b t h d -> b h t d")
44
+
45
+ B, H, T_q, D = q.shape
46
+ T_k = k.shape[2]
47
+
48
+ if sm_scale is None:
49
+ sm_scale = 1.0 / math.sqrt(D)
50
+
51
+ # Compute logits: QK^T / sqrt(d)
52
+ logits = torch.matmul(q.float(), k.float().transpose(-2, -1)) * sm_scale
53
+ # [B, H, T_q, T_k]
54
+
55
+ # Causal mask (optional: mask diagonal if not attend_current)
56
+ if T_q == T_k and not attend_current:
57
+ diag_mask = torch.eye(T_q, dtype=torch.bool, device=q.device)
58
+ logits = logits.masked_fill(diag_mask[None, None, :, :], float('-inf'))
59
+
60
+ # Seq start mask
61
+ if seq_start is not None:
62
+ seq_mask = torch.arange(T_k, device=q.device)[None, None, None, :] < seq_start[None, :, None, None]
63
+ logits = logits.masked_fill(seq_mask, float('-inf'))
64
+
65
+ # Stick-breaking weighting
66
+ attn_weights = stickbreaking_weighting(logits, normalize=normalize)
67
+
68
+ # Apply attention to values
69
+ out = torch.matmul(attn_weights.to(v.dtype), v)
70
+
71
+ if not head_first:
72
+ out = rearrange(out, "b h t d -> b t h d")
73
+
74
+ return out
75
+
76
+
77
+ def stickbreaking_weighting(
78
+ logits: torch.Tensor,
79
+ normalize: bool = True,
80
+ ) -> torch.Tensor:
81
+ """
82
+ Compute stick-breaking attention weights
83
+
84
+ From paper Equation 4:
85
+ A_{i,j} = exp(z_{i,j} - ∑_{k=i}^{j-1} log(1 + exp(z_{k,j})))
86
+
87
+ Where log(1 + exp(x)) is softplus(x)
88
+ """
89
+ B, H, T_q, T_k = logits.shape
90
+ device = logits.device
91
+
92
+ # Softplus: log(1 + exp(x))
93
+ # Numerically stable version from paper (Equation 5)
94
+ def softplus_stable(x):
95
+ # softplus(x) = log(1 + exp(x))
96
+ # When x > 15, exp(x) is huge, just return x
97
+ return torch.where(
98
+ x > 15.0,
99
+ x,
100
+ torch.log1p(torch.exp(torch.clamp(x, max=15.0)))
101
+ )
102
+
103
+ # Compute softplus for all logits
104
+ logits_sp = softplus_stable(logits) # [B, H, T_q, T_k]
105
+
106
+ # For each query position, compute cumulative sum
107
+ # We need to accumulate from left to right (position i to j-1)
108
+ log_weights = torch.zeros_like(logits)
109
+
110
+ for i in range(T_q):
111
+ # For query i, we compute attention to all keys j
112
+ z_i = logits[:, :, i, :] # [B, H, T_k]
113
+ z_sp_i = logits_sp[:, :, i, :] # [B, H, T_k]
114
+
115
+ # Cumulative sum of softplus
116
+ # csum[j] = ∑_{k=0}^{j} softplus(z_{i,k})
117
+ csum = z_sp_i.cumsum(dim=-1)
ops/.ipynb_checkpoints/vanilla_attention_std-checkpoint.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Vanilla Transformer 的标准 Softmax Attention
3
+ 用于替换 flash_attn 的实现
4
+ """
5
+ import math
6
+ import torch
7
+ import torch.nn.functional as F
8
+ from einops import rearrange
9
+ from typing import Optional, Tuple
10
+
11
+ def vanilla_attention_std(
12
+ q: torch.Tensor,
13
+ k: torch.Tensor,
14
+ v: torch.Tensor,
15
+ causal: bool = True,
16
+ window_size: Optional[Tuple[int, int]] = None,
17
+ sm_scale: Optional[float] = None,
18
+ ) -> torch.Tensor:
19
+ """
20
+ 标准 Softmax Attention,兼容 flash_attn_func 的输入格式
21
+
22
+ Args:
23
+ q, k, v: [batch, seq_len, num_heads, head_dim] 格式
24
+ causal: 是否使用因果mask
25
+ window_size: 滑动窗口大小 (left, right),(-1, -1) 表示无限制
26
+ sm_scale: softmax 缩放因子
27
+
28
+ Returns:
29
+ output: [batch, seq_len, num_heads, head_dim] 格式
30
+ """
31
+ B, T_q, H, D = q.shape
32
+ T_k = k.shape[1]
33
+
34
+ if sm_scale is None:
35
+ sm_scale = 1.0 / math.sqrt(D)
36
+
37
+ # 转换为 [B, H, T, D] 格式进行计算
38
+ q = rearrange(q, 'b t h d -> b h t d')
39
+ k = rearrange(k, 'b t h d -> b h t d')
40
+ v = rearrange(v, 'b t h d -> b h t d')
41
+
42
+ # 计算 attention scores
43
+ scores = torch.matmul(q.float(), k.float().transpose(-2, -1)) * sm_scale
44
+
45
+ # Causal mask
46
+ if causal:
47
+ P_SEQ = T_k - T_q # 处理 KV cache 的情况
48
+ causal_mask = torch.triu(
49
+ torch.ones((T_q, T_k), dtype=torch.bool, device=q.device),
50
+ diagonal=P_SEQ + 1
51
+ )
52
+ scores = scores.masked_fill(causal_mask[None, None, :, :], float('-inf'))
53
+
54
+ # Window mask (sliding window attention)
55
+ if window_size is not None and window_size != (-1, -1):
56
+ left_window, right_window = window_size
57
+ window_mask = torch.ones((T_q, T_k), dtype=torch.bool, device=q.device)
58
+ for i in range(T_q):
59
+ # 计算每个查询位置的有效窗口范围
60
+ start = max(0, i - left_window)
61
+ end = min(T_k, i + right_window + 1)
62
+ window_mask[i, start:end] = False
63
+ scores = scores.masked_fill(window_mask[None, None, :, :], float('-inf'))
64
+
65
+ # Softmax
66
+ attn_weights = F.softmax(scores, dim=-1)
67
+ attn_weights = torch.nan_to_num(attn_weights, 0.0)
68
+
69
+ # Apply attention to values
70
+ output = torch.matmul(attn_weights.to(v.dtype), v)
71
+
72
+ # 转换回 [B, T, H, D] 格式
73
+ output = rearrange(output, 'b h t d -> b t h d')
74
+
75
+ return output
76
+
77
+
78
+ def vanilla_attention_varlen_std(
79
+ q: torch.Tensor,
80
+ k: torch.Tensor,
81
+ v: torch.Tensor,
82
+ cu_seqlens_q: torch.Tensor,
83
+ cu_seqlens_k: torch.Tensor,
84
+ max_seqlen_q: int,
85
+ max_seqlen_k: int,
86
+ causal: bool = True,
87
+ window_size: Optional[Tuple[int, int]] = None,
88
+ sm_scale: Optional[float] = None,
89
+ ) -> torch.Tensor:
90
+ """
91
+ 变长序列的标准 Softmax Attention,兼容 flash_attn_varlen_func
92
+
93
+ Args:
94
+ q: [total_q_tokens, num_heads, head_dim]
95
+ k: [total_k_tokens, num_kv_heads, head_dim]
96
+ v: [total_k_tokens, num_kv_heads, head_dim]
97
+ cu_seqlens_q: 累积序列长度 [batch_size + 1]
98
+ cu_seqlens_k: 累积序列长度 [batch_size + 1]
99
+ max_seqlen_q: 最大查询序列长度
100
+ max_seqlen_k: 最大键值序列长度
101
+
102
+ Returns:
103
+ output: [total_q_tokens, num_heads, head_dim]
104
+ """
105
+ batch_size = cu_seqlens_q.shape[0] - 1
106
+ H = q.shape[1]
107
+ D = q.shape[2]
108
+
109
+ if sm_scale is None:
110
+ sm_scale = 1.0 / math.sqrt(D)
111
+
112
+ outputs = []
113
+
114
+ # 逐批次处理
115
+ for b in range(batch_size):
116
+ q_start, q_end = cu_seqlens_q[b].item(), cu_seqlens_q[b+1].item()
117
+ k_start, k_end = cu_seqlens_k[b].item(), cu_seqlens_k[b+1].item()
118
+
119
+ if q_start == q_end: # 空序列
120
+ continue
121
+
122
+ # 提取当前批次的 q, k, v
123
+ q_b = q[q_start:q_end] # [T_q, H, D]
124
+ k_b = k[k_start:k_end] # [T_k, H, D]
125
+ v_b = v[k_start:k_end] # [T_k, H, D]
126
+
127
+ T_q = q_b.shape[0]
128
+ T_k = k_b.shape[0]
129
+
130
+ # 转换为 [H, T, D] 格式
131
+ q_b = rearrange(q_b, 't h d -> h t d')
132
+ k_b = rearrange(k_b, 't h d -> h t d')
133
+ v_b = rearrange(v_b, 't h d -> h t d')
134
+
135
+ # 计算 attention scores
136
+ scores = torch.matmul(q_b.float(), k_b.float().transpose(-2, -1)) * sm_scale
137
+
138
+ # Causal mask
139
+ if causal:
140
+ P_SEQ = T_k - T_q
141
+ causal_mask = torch.triu(
142
+ torch.ones((T_q, T_k), dtype=torch.bool, device=q.device),
143
+ diagonal=P_SEQ + 1
144
+ )
145
+ scores = scores.masked_fill(causal_mask[None, :, :], float('-inf'))
146
+
147
+ # Window mask
148
+ if window_size is not None and window_size != (-1, -1):
149
+ left_window, right_window = window_size
150
+ window_mask = torch.ones((T_q, T_k), dtype=torch.bool, device=q.device)
151
+ for i in range(T_q):
152
+ start = max(0, i - left_window)
153
+ end = min(T_k, i + right_window + 1)
154
+ window_mask[i, start:end] = False
155
+ scores = scores.masked_fill(window_mask[None, :, :], float('-inf'))
156
+
157
+ # Softmax
158
+ attn_weights = F.softmax(scores, dim=-1)
159
+ attn_weights = torch.nan_to_num(attn_weights, 0.0)
160
+
161
+ # Apply attention
162
+ output_b = torch.matmul(attn_weights.to(v_b.dtype), v_b)
163
+
164
+ # 转换回 [T, H, D] 格式
165
+ output_b = rearrange(output_b, 'h t d -> t h d')
166
+ outputs.append(output_b)
167
+
168
+ # 拼接所有批次的输出
169
+ output = torch.cat(outputs, dim=0)
170
+
171
+ return output
ops/__init__.py ADDED
File without changes
ops/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (169 Bytes). View file
 
ops/__pycache__/forgetting_attention.cpython-310.pyc ADDED
Binary file (25.1 kB). View file
 
ops/__pycache__/forgetting_attention_std.cpython-310.pyc ADDED
Binary file (1.84 kB). View file
 
ops/__pycache__/geometric_attention_std.cpython-310.pyc ADDED
Binary file (2.76 kB). View file
 
ops/__pycache__/sliding_window_attention_std.cpython-310.pyc ADDED
Binary file (2.07 kB). View file
 
ops/__pycache__/stickbreaking_attention_std.cpython-310.pyc ADDED
Binary file (1.14 kB). View file
 
ops/__pycache__/vanilla_attention_std.cpython-310.pyc ADDED
Binary file (3.95 kB). View file
 
ops/forgetting_attention.py ADDED
@@ -0,0 +1,1138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Implementation of Forgetting Attention.
3
+
4
+ Our code is adapted from https://github.com/FlagOpen/FlagAttention/blob/ee91638dec6da8c00c4113d179f469e0ffcd5852/src/flag_attn/flash.py. The code is modified to implement Forgetting Attention.
5
+
6
+ The original license info from FlagAttention:
7
+
8
+ Copyright 2023 BAAI
9
+
10
+ Licensed under the Apache License, Version 2.0 (the "License");
11
+ you may not use this file except in compliance with the License.
12
+ You may obtain a copy of the License at
13
+
14
+ http://www.apache.org/licenses/LICENSE-2.0
15
+
16
+ Unless required by applicable law or agreed to in writing, software
17
+ distributed under the License is distributed on an "AS IS" BASIS,
18
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ See the License for the specific language governing permissions and
20
+ limitations under the License.
21
+ """
22
+ import pytest
23
+ import math
24
+ import torch
25
+ import triton
26
+ import triton.language as tl
27
+ from einops import rearrange
28
+ from typing import Optional
29
+
30
+
31
+ __all__ = ["forgetting_attention"]
32
+
33
+
34
+ # File flash.py
35
+ def maybe_contiguous(x):
36
+ # only when the inner most dimension is contiguous can LDGSTS be used
37
+ # so inner-dimension contiguity is enforced.
38
+ return x.contiguous() if x.stride(-1) != 1 else x
39
+
40
+ def rounded_multiple(a, b):
41
+ return (a + b - 1) // b * b
42
+
43
+ # --------------------------- public API ---------------------------
44
+ class ForgettingAttention(torch.autograd.Function):
45
+ @staticmethod
46
+ def forward(ctx, q, k, v, log_fgate, seq_start, causal, sm_scale, return_log_normalizer):
47
+ assert causal, "Only causal attention is supported"
48
+ Dq, Dk, Dv = q.shape[-1], k.shape[-1], v.shape[-1]
49
+ assert Dq == Dk == Dv, "feature size of q, k, v should be equal"
50
+ assert Dk in {16, 32, 64, 128}, "We only support head dims in {16, 32, 64, 128}"
51
+
52
+ B, H, M, D = q.shape
53
+ if seq_start is not None:
54
+ has_seq_start = True
55
+ assert seq_start.shape == (B,)
56
+ else:
57
+ has_seq_start = False
58
+ seq_start = torch.zeros((B,), device=q.device, dtype=torch.long)
59
+ N = k.shape[2]
60
+ assert log_fgate.shape == (B, H, N)
61
+ log_fgate = log_fgate.float()
62
+ if has_seq_start:
63
+ log_fgate = log_fgate.clone()
64
+ # We absolutely don't want masked value to affect result. If we
65
+ # don't do this then it could via affecting numerical precision of
66
+ # cumsum
67
+ mask_index = (torch.arange(N, device=q.device)[None, None, :] < seq_start[:, None, None])
68
+ mask_index = torch.broadcast_to(mask_index, log_fgate.size())
69
+ log_fgate[mask_index] = 0.0
70
+
71
+ log_lambda = torch.cumsum(log_fgate, dim=-1, dtype=log_fgate.dtype).float()
72
+
73
+ Hk, Hv = k.shape[1], v.shape[1]
74
+ assert Hk == Hv, "num of heads in k and v should be equal"
75
+ assert H == Hk, "groupped query attention has not been tested. You can uncomment this if you know what you are doing."
76
+ assert H % Hk == 0, "number of heads in q must be a multiple of that in k & v"
77
+ num_groups = H // Hk
78
+
79
+ P_SEQ = N - M
80
+ larger_m = M > N
81
+ assert (not larger_m), "The key/value tensors must be longer than the query tensor"
82
+
83
+ if sm_scale is None:
84
+ sm_scale = 1. / math.sqrt(D)
85
+
86
+ # contiguity
87
+ q, k, v = maybe_contiguous(q), maybe_contiguous(k), maybe_contiguous(v)
88
+
89
+ # to work around https://github.com/openai/triton/issues/2441
90
+ device = torch.cuda.device_of(q)
91
+
92
+ with torch.cuda.device(device):
93
+
94
+ config = get_fwd_config(B, H, M, N, D, causal)
95
+ BLOCK_M, BLOCK_N, num_stages, num_warps = config
96
+
97
+ divisible_m = M % BLOCK_M == 0
98
+ divisible_n = N % BLOCK_N == 0
99
+ # consider using 3d grid to avoid div & rem
100
+ grid = (triton.cdiv(M, BLOCK_M), H, B)
101
+ o = torch.empty_like(q)
102
+ L = torch.empty((B, H, M), device=q.device, dtype=torch.float32)
103
+ _fwd_kernel[grid](
104
+ q, k, v, log_lambda, seq_start, sm_scale,
105
+ L, o,
106
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
107
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
108
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3),
109
+ log_lambda.stride(0), log_lambda.stride(1), log_lambda.stride(2),
110
+ o.stride(0), o.stride(1), o.stride(2), o.stride(3),
111
+ B, H, M, N, P_SEQ, num_groups,
112
+ BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N, BLOCK_DMODEL=D,
113
+ IS_CAUSAL=causal, LARGER_M=larger_m, HAS_SEQ_START=has_seq_start,
114
+ DIVISIBLE_M=divisible_m, DIVISIBLE_N=divisible_n,
115
+ num_warps=num_warps, num_stages=num_stages,
116
+ )
117
+
118
+ # autograd context maintenance
119
+ ctx.save_for_backward(q, k, v, o, L, log_lambda, seq_start)
120
+ ctx.sm_scale = sm_scale
121
+ ctx.causal = causal
122
+ ctx.has_seq_start = has_seq_start
123
+
124
+ has_extra_return = return_log_normalizer
125
+ if has_extra_return:
126
+ outs = (
127
+ o,
128
+ L if return_log_normalizer else None,
129
+ )
130
+ return outs
131
+ return o
132
+
133
+ @staticmethod
134
+ def backward(ctx, do, *ignored):
135
+ q, k, v, o, L, log_lambda, seq_start = ctx.saved_tensors
136
+ sm_scale = ctx.sm_scale
137
+ causal = ctx.causal
138
+ has_seq_start = ctx.has_seq_start
139
+
140
+ B, H, M, D = q.shape
141
+ N = k.shape[2]
142
+ Hk = k.shape[1]
143
+ num_groups = H // Hk
144
+ P_SEQ = N - M
145
+ larger_m = M > N
146
+
147
+ if sm_scale is None:
148
+ sm_scale = 1. / math.sqrt(D)
149
+
150
+ # to work around https://github.com/openai/triton/issues/2441
151
+ device = torch.cuda.device_of(q)
152
+ with torch.cuda.device(device):
153
+ config = get_bwd_config(B, H, M, N, D, causal)
154
+ BLOCK_M, BLOCK_N, num_stages, num_warps = config
155
+
156
+ divisible_m = M % BLOCK_M == 0
157
+ divisible_n = N % BLOCK_N == 0
158
+
159
+ delta = torch.empty_like(L)
160
+ grid = (triton.cdiv(M, BLOCK_M), H, B)
161
+ _bwd_preprocess[grid](
162
+ o, do,
163
+ delta,
164
+ o.stride(0), o.stride(1), o.stride(2), o.stride(3),
165
+ do.stride(0), do.stride(1), do.stride(2), do.stride(3),
166
+ delta.stride(0), delta.stride(1), delta.stride(2),
167
+ M,
168
+ BLOCK_M=BLOCK_M, D_HEAD=D,
169
+ DIVISIBLE_M=divisible_m,
170
+ )
171
+
172
+ # NOTE that dk & dv always have the same number of heads as q, instead of q.
173
+ BLOCK_M, BLOCK_N, num_stages, num_warps = get_bwd_kv_config(B, H, M, N, D, causal)
174
+ divisible_m = M % BLOCK_M == 0
175
+ divisible_n = N % BLOCK_N == 0
176
+
177
+ dk = torch.empty((B, H, N, D), dtype=k.dtype, device=q.device)
178
+ dv = torch.empty((B, H, N, D), dtype=v.dtype, device=q.device)
179
+ dlog_lambda = torch.empty((B, H, N), dtype=log_lambda.dtype, device=q.device)
180
+ grid = (triton.cdiv(N, BLOCK_N), H, B)
181
+ _bwd_kv_kernel[grid](
182
+ q, k, v, log_lambda, seq_start, sm_scale, do,
183
+ dk, dv, dlog_lambda,
184
+ L, delta,
185
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
186
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
187
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3),
188
+ log_lambda.stride(0), log_lambda.stride(1), log_lambda.stride(2),
189
+ do.stride(0), do.stride(1), do.stride(2), do.stride(3),
190
+ dk.stride(0), dk.stride(1), dk.stride(2), dk.stride(3),
191
+ dv.stride(0), dv.stride(1), dv.stride(2), dv.stride(3),
192
+ dlog_lambda.stride(0), dlog_lambda.stride(1), dlog_lambda.stride(2),
193
+ B, H, M, N, P_SEQ,
194
+ num_groups,
195
+ BLOCK_M=BLOCK_M, BLOCK_DMODEL=D, BLOCK_N=BLOCK_N, CAUSAL=causal,
196
+ DIVISIBLE_M=divisible_m, DIVISIBLE_N=divisible_n, HAS_SEQ_START=has_seq_start,
197
+ num_stages=num_stages, num_warps=num_warps,
198
+ )
199
+
200
+ BLOCK_M, BLOCK_N, num_stages, num_warps = get_bwd_q_config(B, H, M, N, D, causal)
201
+ divisible_m = M % BLOCK_M == 0
202
+ divisible_n = N % BLOCK_N == 0
203
+ dq = torch.zeros_like(q)
204
+ grid = (triton.cdiv(M, BLOCK_M), H, B)
205
+ _bwd_q_kernel[grid](
206
+ q, k, v, log_lambda, seq_start, sm_scale, do,
207
+ dq, dlog_lambda,
208
+ L, delta,
209
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
210
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
211
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3),
212
+ log_lambda.stride(0), log_lambda.stride(1), log_lambda.stride(2),
213
+ do.stride(0), do.stride(1), do.stride(2), do.stride(3),
214
+ dq.stride(0), dq.stride(1), dq.stride(2), dq.stride(3),
215
+ dlog_lambda.stride(0), dlog_lambda.stride(1), dlog_lambda.stride(2),
216
+ B, H, M, N, P_SEQ,
217
+ num_groups,
218
+ BLOCK_M=BLOCK_M, BLOCK_DMODEL=D, BLOCK_N=BLOCK_N,
219
+ CAUSAL=causal, LARGER_M=larger_m, HAS_SEQ_START=has_seq_start,
220
+ DIVISIBLE_M=divisible_m, DIVISIBLE_N=divisible_n,
221
+ num_stages=num_stages, num_warps = num_warps,
222
+ )
223
+ dk = dk.reshape((B, Hk, num_groups, N, D)).sum(2)
224
+ dv = dv.reshape((B, Hk, num_groups, N, D)).sum(2)
225
+ dcumsum = torch.cumsum(dlog_lambda, dim=-1, dtype=log_lambda.dtype)
226
+ dlog_fgate = dlog_lambda + dcumsum[..., -1:] - dcumsum
227
+ dlog_fgate = dlog_fgate.float()
228
+ return dq, dk, dv, dlog_fgate, None, None, None, None, None, None, None
229
+
230
+
231
+ def forgetting_attention(
232
+ q: torch.Tensor,
233
+ k: torch.Tensor,
234
+ v: torch.Tensor,
235
+ log_fgate: torch.Tensor,
236
+ *,
237
+ head_first: bool = False,
238
+ seq_start: Optional[torch.Tensor] = None,
239
+ sm_scale: Optional[float] = None,
240
+ ):
241
+ """
242
+ A FlashAttention-based implementation of Forgetting Attention.
243
+
244
+ Note:
245
+ - We recommand bfloat16/float16 for q, k, v and float32 for log_fgate. float32 for
246
+ q, k, v is also supported, but the kernel will not use tensor cores if q, k, v are
247
+ in float32 (which would be slow).
248
+ - We only support seqlen_q <= seqlen_k
249
+ - We only support causal attention
250
+ - Head dimension must be in one of {16, 32, 64, 128}
251
+
252
+ Arguments:
253
+ - q: (batch_size, seqlen_q, num_heads, head_dim) unless head_first=True.
254
+ - k: (batch_size, seqlen_k, num_heads, head_dim) unless head_first=True.
255
+ - v: (batch_size, seqlen_k, num_heads, head_dim) unless head_first=True.
256
+ - log_fgate: (batch_size, seqlen_k, num_heads) unless head_first=True.
257
+ This should be the **log** of the forget gates. This is typically the
258
+ output of torch.nn.functional.logsigmoid.
259
+ - head_first: if True, the order the num_heads and seqlen_* axis of the all
260
+ FloatTensor inputs and outputs should be (num_heads, seq_len_*) instead of
261
+ (seq_len_*, num_heads)
262
+ - seq_start: If not None, should be LongTensor with shape (batch_size,)
263
+ and range in [0, seq_len_k). For each batch index batch_id, no attention
264
+ will be allocated to tokens before the token index seq_start[batch_id].
265
+ This is useful for left-padded inputs.
266
+ - sm_scale: The scaling of attention scores before applying softmax. If
267
+ None, it defaults to (1.0 / math.sqrt(head_dim))
268
+
269
+ Returns:
270
+ out (torch.Tensor): (batch_size, seqlen_q, num_heads, head_dim) unless head_first=True.
271
+ """
272
+ if not head_first:
273
+ q, k, v = [rearrange(item, "b t h d -> b h t d") for item in (q, k, v)]
274
+ log_fgate = rearrange(log_fgate, "b t h -> b h t")
275
+ out = ForgettingAttention.apply(q, k, v, log_fgate, seq_start, True, sm_scale, False)
276
+ if not head_first:
277
+ out = rearrange(out, "b h t d -> b t h d")
278
+ return out
279
+
280
+
281
+ # --------------------------- Forward ---------------------------
282
+ # NOTE: this function can be overwritten at runtime to use your custom config
283
+ def get_fwd_config(B, H, M, N, D, causal):
284
+ assert causal
285
+ if torch.cuda.get_device_capability() == (8, 0):
286
+ if D <= 64:
287
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 32, 3, 4
288
+ else:
289
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 4, 4
290
+ elif torch.cuda.get_device_capability() == (9, 0):
291
+ # H100
292
+ if D <= 64:
293
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 8
294
+ else:
295
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 128, 2, 8
296
+ elif torch.cuda.get_device_capability() == (8, 6):
297
+ if not causal:
298
+ if D <= 64:
299
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 4
300
+ else:
301
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 2, 4
302
+ else: # causal
303
+ if D <= 64:
304
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 3, 4
305
+ else:
306
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 2, 4
307
+ elif torch.cuda.get_device_capability() == (8, 9):
308
+ # L40S
309
+ if D <= 64:
310
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 2, 4
311
+ else:
312
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 2, 4
313
+ else:
314
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
315
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
316
+
317
+
318
+ @triton.jit
319
+ def _fwd_kernel(
320
+ Q, K, V, LOG_LAMBDA, SEQ_START, sm_scale,
321
+ L, O,
322
+ stride_qz, stride_qh, stride_qm, stride_qk,
323
+ stride_kz, stride_kh, stride_kn, stride_kk,
324
+ stride_vz, stride_vh, stride_vn, stride_vk,
325
+ stride_log_lambda_z, stride_log_lambda_h, stride_log_lambda_n,
326
+ stride_oz, stride_oh, stride_om, stride_ok,
327
+ Z, H, M, N, P_SEQ,
328
+ num_groups,
329
+ BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr,
330
+ IS_CAUSAL: tl.constexpr, LARGER_M: tl.constexpr, HAS_SEQ_START: tl.constexpr,
331
+ DIVISIBLE_M: tl.constexpr, DIVISIBLE_N: tl.constexpr,
332
+ ):
333
+ input_dtype = Q.dtype.element_ty
334
+ # -- grid id --
335
+ start_m = tl.program_id(0)
336
+ off_h = tl.program_id(1)
337
+ off_z = tl.program_id(2)
338
+
339
+ # scale sm_scale by log_2(e) and use
340
+ # 2^x instead of exp in the loop because CSE and LICM
341
+ # don't work as expected with `exp` in the loop
342
+ log2e: tl.constexpr = 1.4426950408889634
343
+ loge2: tl.constexpr = 0.6931471805599453
344
+ qk_scale = sm_scale * log2e
345
+
346
+ # offset pointers for (batch, head)
347
+ off_hk = off_h // num_groups
348
+ Q += off_z * stride_qz + off_h * stride_qh
349
+ K += off_z * stride_kz + off_hk * stride_kh
350
+ V += off_z * stride_vz + off_hk * stride_vh
351
+ LOG_LAMBDA += off_z * stride_log_lambda_z + off_h * stride_log_lambda_h
352
+ O += off_z * stride_oz + off_h * stride_oh
353
+ L += (off_z * H + off_h) * M # l's shape is (B, H, M)
354
+
355
+ offs_m_base = tl.arange(0, BLOCK_M)
356
+ offs_m = start_m * BLOCK_M + offs_m_base
357
+ offs_n_base = tl.arange(0, BLOCK_N)
358
+ offs_k = tl.arange(0, BLOCK_DMODEL)
359
+
360
+
361
+ # initialize pointers to value-like data
362
+ q_ptrs = Q + (offs_m[:, None] * stride_qm + offs_k[None, :] * stride_qk) # (BLOCK_M, BLOCK_DMODEL)
363
+ log_lambda_out_ptrs = LOG_LAMBDA + (P_SEQ + offs_m) * stride_log_lambda_n
364
+ o_ptrs = O + (offs_m[:, None] * stride_om + offs_k[None, :] * stride_ok) # (BLOCK_M, BLOCK_DMODEL)
365
+ l_ptrs = L + offs_m
366
+
367
+ # initialize pointer to m and l, fp32 for accumulators
368
+ m_i = tl.full([BLOCK_M], value=-float("inf"), dtype=tl.float32)
369
+ l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
370
+ acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
371
+
372
+ # load q
373
+ if DIVISIBLE_M:
374
+ q = tl.load(q_ptrs, cache_modifier=".cg")
375
+ log_lambda_out = tl.load(log_lambda_out_ptrs, cache_modifier=".cg")
376
+ else:
377
+ mask_m = offs_m < M
378
+ q = tl.load(q_ptrs, mask=mask_m[:, None], cache_modifier=".cg")
379
+ log_lambda_out = tl.load(log_lambda_out_ptrs, mask=mask_m, cache_modifier=".cg")
380
+
381
+ #Dot I trick: to place q in registers, it saves shared memory
382
+ # if BLOCK_DMODEL < 128:
383
+ # I = tl.where(offs_k[:, None] == offs_k,
384
+ # tl.full((BLOCK_DMODEL, BLOCK_DMODEL), 1.0, dtype=input_dtype),
385
+ # tl.full((BLOCK_DMODEL, BLOCK_DMODEL), 0.0, dtype=input_dtype))
386
+ # q = tl.dot(q, I, input_precision="ieee").to(input_dtype)
387
+ # else:
388
+ # I = tl.where(offs_m_base[:, None] == offs_m_base,
389
+ # tl.full((BLOCK_M, BLOCK_M), 1.0, dtype=input_dtype),
390
+ # tl.full((BLOCK_M, BLOCK_M), 0.0, dtype=input_dtype))
391
+ # q = tl.dot(I, q, input_precision="ieee").to(input_dtype)
392
+
393
+ # NOTE: Loop-Bound-For-N
394
+ # The indices in m-dimension that this block may access is in `[start_m * BLOCK_M, (start_m + 1) * BLOCK_M)`.
395
+ # According to the rule of causal masking, then max index in n-dimension that this block may access
396
+ # is `P_SEQ + (start_m + 1) * BLOCK_M`.
397
+ # However, the upper bound of index in n-dimension should never exceed the sequence length of k/v(`P_SEQ + N_CTX`).
398
+ # `P_SEQ + (start_m + 1) * BLOCK_M` may be larger than `N`.
399
+ # At this case, there would be illegal memory access when loading k & v tiles
400
+ # if mask_n is not applied for loading(only when `DIVISIBLE_N`` is true).
401
+ # See also https://github.com/FlagOpen/FlagAttention/pull/8
402
+ if IS_CAUSAL:
403
+ hi = tl.minimum(N, P_SEQ + (start_m + 1) * BLOCK_M)
404
+ if LARGER_M:
405
+ hi = tl.maximum(0, hi)
406
+ else:
407
+ hi = N
408
+
409
+ offs_n_init = offs_n_base
410
+ if HAS_SEQ_START:
411
+ SEQ_START += off_z
412
+ seq_start = tl.load(SEQ_START)
413
+ lo = tl.minimum(seq_start, hi)
414
+ lo = (lo // BLOCK_N) * BLOCK_N
415
+ offs_n_init += lo
416
+ else:
417
+ lo = 0
418
+ seq_start = 0
419
+
420
+ # loop over k, v and update accumulators
421
+ k_ptrs = K + (offs_k[:, None] * stride_kk + offs_n_init[None, :] * stride_kn) # (BLOCK_DMODEL, BLOCK_N)
422
+ v_ptrs = V + (offs_n_init[:, None] * stride_vn + offs_k[None, :] * stride_vk) # (BLOCK_N, BLOCK_DMODEL)
423
+ log_lambda_in_ptrs = LOG_LAMBDA + (offs_n_init * stride_log_lambda_n) # (BLOCK_N, BLOCK_DMODEL)
424
+ for start_n in range(lo, hi, BLOCK_N):
425
+ start_n = tl.multiple_of(start_n, BLOCK_N)
426
+ offs_n = start_n + offs_n_base
427
+
428
+ # -- load k, v --
429
+ if DIVISIBLE_N:
430
+ k = tl.load(k_ptrs, cache_modifier=".cg")
431
+ v = tl.load(v_ptrs, cache_modifier=".cg")
432
+ log_lambda_in = tl.load(log_lambda_in_ptrs, cache_modifier=".cg")
433
+ else:
434
+ mask_n = offs_n < N
435
+ k = tl.load(k_ptrs, mask=mask_n[None, :], cache_modifier=".cg")
436
+ v = tl.load(v_ptrs, mask=mask_n[:, None], cache_modifier=".cg")
437
+ log_lambda_in = tl.load(log_lambda_in_ptrs, mask=mask_n, cache_modifier=".cg")
438
+
439
+ # -- compute qk ---
440
+ # s = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
441
+ s = tl.dot(q, k, input_precision="ieee") * qk_scale
442
+ decay_bias = log_lambda_out[:, None] - log_lambda_in[None, :]
443
+ s += decay_bias * log2e
444
+
445
+ if not DIVISIBLE_N:
446
+ s = tl.where(mask_n[None, :], s, float("-inf"))
447
+ if IS_CAUSAL:
448
+ causal_mask = (P_SEQ + offs_m[:, None]) >= offs_n[None, :]
449
+ s = tl.where(causal_mask, s, float("-inf"))
450
+ if HAS_SEQ_START:
451
+ s = tl.where(offs_n[None, :] >= seq_start, s, float("-inf"))
452
+
453
+
454
+ # -- compute scaling constant ---
455
+ m_i_new = tl.maximum(m_i, tl.max(s, 1))
456
+ alpha = tl.math.exp2((m_i - m_i_new))
457
+ p = tl.math.exp2(s - m_i_new[:, None])
458
+
459
+ # -- compute partial sumexpn before applying dropout
460
+ p_sum = tl.sum(p, 1)
461
+
462
+
463
+ # -- scale and update acc: acc *= alpha[:, None]--
464
+ acc *= alpha[:, None]
465
+ acc += tl.dot(p.to(input_dtype), v, input_precision="ieee")
466
+
467
+ # -- update m_i and l_i --
468
+ l_i = l_i * alpha + p_sum
469
+ m_i = m_i_new
470
+ # update pointers
471
+ k_ptrs += BLOCK_N * stride_kn
472
+ v_ptrs += BLOCK_N * stride_vn
473
+ log_lambda_in_ptrs += BLOCK_N * stride_log_lambda_n
474
+
475
+ # write back l & o
476
+ if IS_CAUSAL and (LARGER_M or HAS_SEQ_START):
477
+ is_empty_line = (offs_m + P_SEQ) < seq_start
478
+ acc = tl.where(is_empty_line[:, None], 0.0, acc * (1.0 / l_i[:, None]))
479
+ l = tl.where(is_empty_line, float("-inf"), m_i * loge2 + tl.log(l_i))
480
+ else:
481
+ acc = acc * (1.0 / l_i[:, None])
482
+ l = m_i * loge2 + tl.log(l_i) # log(normalizer)
483
+
484
+
485
+ if DIVISIBLE_M:
486
+ tl.store(l_ptrs, l, cache_modifier=".cg")
487
+ tl.store(o_ptrs, acc.to(input_dtype), cache_modifier=".cg")
488
+ else:
489
+ tl.store(l_ptrs, l, mask=mask_m, cache_modifier=".cg")
490
+ tl.store(o_ptrs, acc.to(input_dtype), mask=mask_m[:, None], cache_modifier=".cg")
491
+
492
+
493
+ # --------------------------- Backward ---------------------------
494
+ # NOTE: this function can be overwritten at runtime to use your custom config
495
+ def get_bwd_config(B, H, M, N, D, causal):
496
+ if torch.cuda.get_device_capability() == (9, 0):
497
+ if not causal:
498
+ BLOCK_M = 128 if D <= 64 else 64
499
+ BLOCK_N = 64
500
+ num_stages = 2
501
+ num_warps = 4
502
+ else:
503
+ BLOCK_M = 64
504
+ BLOCK_N = 64
505
+ num_stages = 3 if D <= 64 else 2
506
+ num_warps = 4
507
+ elif torch.cuda.get_device_capability() == (8, 0):
508
+ if not causal:
509
+ BLOCK_M = 128 if D <= 64 else 64
510
+ BLOCK_N = 64
511
+ num_stages = 2
512
+ num_warps = 4
513
+ else:
514
+ BLOCK_M = 64
515
+ BLOCK_N = 64
516
+ num_stages = 3 if D <= 64 else 2
517
+ num_warps = 4
518
+ elif torch.cuda.get_device_capability() == (8, 6): # tune for RTX-3090, device_capability(8, 6)
519
+ if not causal:
520
+ if D <= 64:
521
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
522
+ else:
523
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 8
524
+ else:
525
+ if D <= 64:
526
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
527
+ else:
528
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 2, 4
529
+ else:
530
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 1, 4
531
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
532
+
533
+ def get_bwd_kv_config(B, H, M, N, D, causal):
534
+ assert causal
535
+ if torch.cuda.get_device_capability() == (8, 0): # A100
536
+ if D <= 64:
537
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 4, 4
538
+ else:
539
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 128, 4, 8
540
+ elif torch.cuda.get_device_capability() == (8, 6): # tune for RTX-3090, device_capability(8, 6)
541
+ if D <= 64:
542
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
543
+ else:
544
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 2, 4
545
+ elif torch.cuda.get_device_capability() == (8, 9): # L40S
546
+ if D <= 64:
547
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 128, 4, 8
548
+ else:
549
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 128, 2, 8
550
+ elif torch.cuda.get_device_capability() == (9, 0): # H100
551
+ if D <= 64:
552
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 4
553
+ else:
554
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
555
+ else:
556
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
557
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
558
+
559
+ def get_bwd_q_config(B, H, M, N, D, causal):
560
+ assert causal
561
+ if torch.cuda.get_device_capability() == (8, 0): # A100
562
+ if D <= 64:
563
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 4
564
+ else:
565
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 4, 8
566
+ elif torch.cuda.get_device_capability() == (8, 6): # tune for RTX-3090, device_capability(8, 6)
567
+ if D <= 64:
568
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
569
+ else:
570
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 2, 4
571
+ elif torch.cuda.get_device_capability() == (8, 9): # L40S
572
+ if D <= 64:
573
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 4, 4
574
+ else:
575
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 3, 4
576
+ elif torch.cuda.get_device_capability() == (9, 0): # H100
577
+ if D <= 64:
578
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 128, 4, 8
579
+ else:
580
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 128, 2, 8
581
+ else:
582
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
583
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
584
+
585
+
586
+ @triton.jit
587
+ def _bwd_preprocess(
588
+ Out, DO,
589
+ Delta,
590
+ stride_oz, stride_oh, stride_om, stride_ok,
591
+ stride_doz, stride_doh, stride_dom, stride_dok,
592
+ stride_dz, stride_dh, stride_dm,
593
+ M,
594
+ BLOCK_M: tl.constexpr, D_HEAD: tl.constexpr,
595
+ DIVISIBLE_M: tl.constexpr,
596
+ ):
597
+ off_h = tl.program_id(1)
598
+ off_z = tl.program_id(2)
599
+ Out += off_z * stride_oz + off_h * stride_oh
600
+ DO += off_z * stride_doz + off_h * stride_doh
601
+ Delta += off_z * stride_dz + off_h * stride_dh
602
+
603
+ # compute (Out * Dout).sum() for vector interpretation
604
+ off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)
605
+ off_n = tl.arange(0, D_HEAD)
606
+
607
+ # load
608
+ o_ptrs = Out + off_m[:, None] * stride_om + off_n[None, :] * stride_ok
609
+ do_ptrs = DO + off_m[:, None] * stride_dom + off_n[None, :] * stride_dok
610
+
611
+ if DIVISIBLE_M:
612
+ o = tl.load(o_ptrs).to(tl.float32)
613
+ do = tl.load(do_ptrs).to(tl.float32)
614
+ else:
615
+ mask_m = off_m < M
616
+ o = tl.load(o_ptrs, mask=mask_m[:, None]).to(tl.float32)
617
+ do = tl.load(do_ptrs, mask=mask_m[:, None]).to(tl.float32)
618
+
619
+ # compute
620
+ delta = tl.sum(o * do, axis=1)
621
+
622
+ # write-back
623
+ d_ptrs = Delta + off_m * stride_dm
624
+ if DIVISIBLE_M:
625
+ tl.store(d_ptrs, delta)
626
+ else:
627
+ tl.store(d_ptrs, delta, mask=mask_m)
628
+
629
+
630
+ @triton.jit
631
+ def _bwd_kv_kernel(
632
+ Q, K, V, LOG_LAMBDA, SEQ_START, sm_scale, DO,
633
+ DK, DV, DLOG_LAMBDA,
634
+ L,
635
+ D,
636
+ stride_qz, stride_qh, stride_qm, stride_qk,
637
+ stride_kz, stride_kh, stride_kn, stride_kk,
638
+ stride_vz, stride_vh, stride_vn, stride_vk,
639
+ stride_log_lambda_z, stride_log_lambda_h, stride_log_lambda_n,
640
+ stride_doz, stride_doh, stride_dom, stride_dok,
641
+ stride_dkz, stride_dkh, stride_dkn, stride_dkk,
642
+ stride_dvz, stride_dvh, stride_dvn, stride_dvk,
643
+ stride_dlog_lambda_z, stride_dlog_lambda_h, stride_dlog_lambda_n,
644
+ Z, H, M, N, P_SEQ,
645
+ num_groups,
646
+ BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr,
647
+ CAUSAL: tl.constexpr,
648
+ DIVISIBLE_M: tl.constexpr, DIVISIBLE_N: tl.constexpr, HAS_SEQ_START: tl.constexpr,
649
+ ):
650
+ input_dtype = Q.dtype.element_ty
651
+ # -- grid id --
652
+ start_n = tl.program_id(0)
653
+ off_h = tl.program_id(1)
654
+ off_z = tl.program_id(2)
655
+ log2e: tl.constexpr = 1.4426950408889634
656
+ qk_scale = sm_scale * log2e
657
+
658
+ # offset pointers for (batch, head)
659
+ off_hk = off_h // num_groups
660
+ Q += off_z * stride_qz + off_h * stride_qh
661
+ K += off_z * stride_kz + off_hk * stride_kh
662
+ V += off_z * stride_vz + off_hk * stride_vh
663
+ LOG_LAMBDA += off_z * stride_log_lambda_z + off_h * stride_log_lambda_h
664
+ DO += off_z * stride_doz + off_h * stride_doh
665
+
666
+ # offset pointers for batch/head
667
+ DK += off_z * stride_dkz + off_h * stride_dkh
668
+ DV += off_z * stride_dvz + off_h * stride_dvh
669
+ DLOG_LAMBDA += off_z * stride_dlog_lambda_z + off_h * stride_dlog_lambda_h
670
+
671
+ # offset pointers for batch/head
672
+ D += (off_z * H + off_h) * M
673
+ L += (off_z * H + off_h) * M
674
+
675
+ if CAUSAL:
676
+ lo = tl.maximum(start_n * BLOCK_N - P_SEQ, 0)
677
+ lo = (lo // BLOCK_M) * BLOCK_M
678
+ else:
679
+ lo = 0
680
+
681
+ offs_m_init = lo + tl.arange(0, BLOCK_M)
682
+ offs_n = start_n * BLOCK_N + tl.arange(0, BLOCK_N)
683
+ offs_m_base = tl.arange(0, BLOCK_M)
684
+ offs_k = tl.arange(0, BLOCK_DMODEL)
685
+
686
+ # initialize pointers to value-like data
687
+ q_ptrs = Q + (offs_m_init[:, None] * stride_qm + offs_k[None, :] * stride_qk) # (BLOCK_M, BLOCK_DMODEL)
688
+ log_lambda_out_ptrs = LOG_LAMBDA + (P_SEQ + offs_m_init) * stride_log_lambda_n # (BLOCK_N, BLOCK_DMODEL)
689
+ k_ptrs = K + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk) # (BLOCK_N, BLOCK_DMODEL)
690
+ v_ptrs = V + (offs_n[:, None] * stride_vn + offs_k[None, :] * stride_vk) # (BLOCK_N, BLOCK_DMODEL)
691
+ log_lambda_in_ptrs = LOG_LAMBDA + (offs_n * stride_log_lambda_n) # (BLOCK_N, BLOCK_DMODEL)
692
+ do_ptrs = DO + (offs_m_init[:, None] * stride_dom + offs_k[None, :] * stride_dok) # (BLOCK_M, BLOCK_DMODEL)
693
+
694
+ dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_k[None, :] * stride_dvk) # (BLOCK_N, BLOCK_DMODEL)
695
+ dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_k[None, :] * stride_dkk) # (BLOCK_N, BLOCK_DMODEL)
696
+ dlog_lambda_in_ptrs = DLOG_LAMBDA + (offs_n * stride_dlog_lambda_n) # (BLOCK_N, BLOCK_DMODEL)
697
+
698
+ # k and v stay in SRAM throughout
699
+ if DIVISIBLE_N:
700
+ v = tl.load(v_ptrs)
701
+ k = tl.load(k_ptrs)
702
+ log_lambda_in = tl.load(log_lambda_in_ptrs)
703
+ else:
704
+ mask_n = offs_n < N
705
+ v = tl.load(v_ptrs, mask=mask_n[:, None])
706
+ k = tl.load(k_ptrs, mask=mask_n[:, None])
707
+ log_lambda_in = tl.load(log_lambda_in_ptrs, mask=mask_n)
708
+
709
+ # If the N block doesn't contain seq_start, no need to loop
710
+ if HAS_SEQ_START:
711
+ SEQ_START += off_z
712
+ seq_start = tl.load(SEQ_START)
713
+ hi = tl.where(start_n * BLOCK_N + BLOCK_N >= seq_start - 1, M, lo)
714
+ else:
715
+ hi = M
716
+
717
+ # initialize dk amd dv
718
+ dk = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32)
719
+ dv = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32)
720
+ dlog_lambda_in = tl.zeros([BLOCK_N], dtype=tl.float32)
721
+
722
+ # loop over a col
723
+ for start_m in range(lo, hi, BLOCK_M):
724
+ start_m = tl.multiple_of(start_m, BLOCK_M)
725
+ offs_m = start_m + offs_m_base
726
+ causal_mask = (P_SEQ + offs_m[None, :]) >= (offs_n[:, None]) # (BLOCK_M, BLOCK_N)
727
+
728
+ # load q1, k1, q2, k2, v, do on-chip
729
+ if DIVISIBLE_M:
730
+ q = tl.load(q_ptrs)
731
+ log_lambda_out = tl.load(log_lambda_out_ptrs)
732
+ else:
733
+ mask_m = offs_m < M
734
+ valid_mask = mask_m[None, :] # & mask_n
735
+ q = tl.load(q_ptrs, mask=mask_m[:, None])
736
+ log_lambda_out = tl.load(log_lambda_out_ptrs, mask=mask_m)
737
+ # recompute p = softmax(qk * sm_scale, dim=-1)
738
+ # s = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
739
+ sT = tl.dot(k, tl.trans(q), input_precision="ieee") * qk_scale
740
+ decay_bias = log_lambda_out[None, :] - log_lambda_in[:, None]
741
+ sT += decay_bias * log2e
742
+ # NOTE: since softmax in backward is pointwise, the normalizer has been saved in fwd)
743
+ # So masking on s is not needed.
744
+ # s = tl.where(valid_mask, s , float("-inf"))
745
+ # if CAUSAL:
746
+ # s = tl.where(causal_mask, s, float("-inf"))
747
+
748
+ # -- recompute p ---
749
+ if DIVISIBLE_M:
750
+ l = tl.load(L + offs_m)
751
+ else:
752
+ l = tl.load(L + offs_m, mask=mask_m)
753
+ pT = tl.math.exp2(sT - l[None, :] * log2e) # (BLOCK_M, BLOCK_N)
754
+
755
+ if not DIVISIBLE_M:
756
+ pT = tl.where(valid_mask, pT, 0.0)
757
+ if CAUSAL:
758
+ pT = tl.where(causal_mask, pT, 0.0)
759
+
760
+ # compute dv = dot(p, do)
761
+ if DIVISIBLE_M:
762
+ do = tl.load(do_ptrs)
763
+ else:
764
+ do = tl.load(do_ptrs, mask=mask_m[:, None]) # (BLOCK_M, BLOCK_DMODEL)
765
+
766
+
767
+ dv += tl.dot(pT.to(input_dtype), do, input_precision="ieee") # (BLOCK_N, BLOCK_DMODEL) # still correct
768
+
769
+ # compute dp = dot(v, do)
770
+ if DIVISIBLE_M:
771
+ delta = tl.load(D + offs_m)
772
+ else:
773
+ delta = tl.load(D + offs_m, mask=mask_m)
774
+ # dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
775
+ dpT = tl.dot(v, tl.trans(do), input_precision="ieee")
776
+
777
+
778
+ # compute ds = p * (dp - delta[:, None])
779
+ dsT = pT * (dpT - delta[None, :]) # (BLOCK_M, BLOCK_N)
780
+
781
+ if not DIVISIBLE_M:
782
+ dsT = tl.where(valid_mask, dsT, 0.0)
783
+ if CAUSAL:
784
+ dsT = tl.where(causal_mask, dsT, 0.0)
785
+
786
+ # compute dk = dot(ds.T, q) masking
787
+ dk += tl.dot(dsT.to(input_dtype), q, input_precision="ieee")
788
+ dlog_lambda_in += -tl.sum(dsT, axis=1)
789
+
790
+ # increment pointers
791
+ q_ptrs += BLOCK_M * stride_qm
792
+ log_lambda_out_ptrs += BLOCK_M * stride_log_lambda_n
793
+ do_ptrs += BLOCK_M * stride_dom
794
+
795
+ dk *= sm_scale
796
+ if HAS_SEQ_START:
797
+ # Mask out
798
+ seq_mask = (offs_n >= seq_start)
799
+ dk = tl.where(seq_mask[:, None], dk, 0.0)
800
+ dv = tl.where(seq_mask[:, None], dv, 0.0)
801
+ dlog_lambda_in = tl.where(seq_mask, dlog_lambda_in, 0.0)
802
+ if DIVISIBLE_N:
803
+ tl.store(dk_ptrs, dk.to(input_dtype)) # (BLOCK_N, BLOCK_DMODEL)
804
+ tl.store(dv_ptrs, dv.to(input_dtype)) # (BLOCK_N, BLOCK_DMODEL,)
805
+ tl.store(dlog_lambda_in_ptrs, dlog_lambda_in.to(tl.float32)) # (BLOCK_N, BLOCK_DMODEL,)
806
+ else:
807
+ tl.store(dk_ptrs, dk.to(input_dtype), mask=mask_n[:, None]) # (BLOCK_N, BLOCK_DMODEL)
808
+ tl.store(dv_ptrs, dv.to(input_dtype), mask=mask_n[:, None]) # (BLOCK_N, BLOCK_DMODEL)
809
+ tl.store(dlog_lambda_in_ptrs, dlog_lambda_in.to(tl.float32), mask=mask_n) # (BLOCK_N, BLOCK_DMODEL,)
810
+
811
+
812
+ @triton.jit
813
+ def _bwd_q_kernel(
814
+ Q, K, V, LOG_LAMBDA, SEQ_START, sm_scale, DO,
815
+ DQ, DLOG_LAMBDA,
816
+ L,
817
+ D,
818
+ stride_qz, stride_qh, stride_qm, stride_qk,
819
+ stride_kz, stride_kh, stride_kn, stride_kk,
820
+ stride_vz, stride_vh, stride_vn, stride_vk,
821
+ stride_log_lambda_z, stride_log_lambda_h, stride_log_lambda_n,
822
+ stride_doz, stride_doh, stride_dom, stride_dok,
823
+ stride_dqz, stride_dqh, stride_dqm, stride_dqk,
824
+ stride_dlog_lambda_z, stride_dlog_lambda_h, stride_dlog_lambda_n,
825
+ Z, H, M, N, P_SEQ,
826
+ num_groups,
827
+ BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr,
828
+ CAUSAL: tl.constexpr, LARGER_M: tl.constexpr, HAS_SEQ_START: tl.constexpr,
829
+ DIVISIBLE_M: tl.constexpr, DIVISIBLE_N: tl.constexpr,
830
+ ):
831
+ input_dtype = Q.dtype.element_ty
832
+ # -- grid id --
833
+ start_m = tl.program_id(0)
834
+ off_h = tl.program_id(1)
835
+ off_z = tl.program_id(2)
836
+
837
+ # scale sm_scale by log_2(e) and use
838
+ # 2^x instead of exp in the loop because CSE and LICM
839
+ # don't work as expected with `exp` in the loop
840
+ log2e: tl.constexpr = 1.4426950408889634
841
+ qk_scale = sm_scale * log2e
842
+
843
+ # offset pointers for (batch, head)
844
+ off_hk = off_h // num_groups
845
+ Q += off_z * stride_qz + off_h * stride_qh
846
+ K += off_z * stride_kz + off_hk * stride_kh
847
+ V += off_z * stride_vz + off_hk * stride_vh
848
+ LOG_LAMBDA += off_z * stride_log_lambda_z + off_h * stride_log_lambda_h
849
+ DO += off_z * stride_doz + off_h * stride_doh
850
+ D += (off_z * H + off_h) * M
851
+ L += (off_z * H + off_h) * M
852
+
853
+ # offset pointers for batch/head
854
+ DQ += off_z * stride_dqz + off_h * stride_dqh
855
+ DLOG_LAMBDA += off_z * stride_dlog_lambda_z + off_h * stride_dlog_lambda_h
856
+
857
+ offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
858
+ offs_k = tl.arange(0, BLOCK_DMODEL)
859
+
860
+ # initialize pointers to value-like data
861
+ q_ptrs = Q + (offs_m[:, None] * stride_qm + offs_k[None, :] * stride_qk) # (BLOCK_M, BLOCK_DMODEL)
862
+ log_lambda_out_ptrs = LOG_LAMBDA + (P_SEQ + offs_m) * stride_log_lambda_n
863
+
864
+ dq_ptrs = DQ + (offs_m[:, None] * stride_dqm + offs_k[None, :] * stride_dqk) # (BLOCK_M, BLOCK_DMODEL)
865
+ dlog_lambda_out_ptrs = DLOG_LAMBDA + (P_SEQ + offs_m) * stride_dlog_lambda_n
866
+ do_ptrs = DO + (offs_m[:, None] * stride_dom + offs_k[None, :] * stride_dok) # (BLOCK_M, BLOCK_DMODEL)
867
+
868
+ # pointer to row-wise quantities in value-like data
869
+ d_ptrs = D + offs_m
870
+ l_ptrs = L + offs_m
871
+
872
+ # load q: it will stay in SRAM throughout
873
+ if DIVISIBLE_M:
874
+ q = tl.load(q_ptrs)
875
+ do = tl.load(do_ptrs)
876
+ delta = tl.load(d_ptrs)
877
+ l = tl.load(l_ptrs)
878
+ log_lambda_out = tl.load(log_lambda_out_ptrs)
879
+ else:
880
+ mask_m = offs_m < M
881
+ q = tl.load(q_ptrs, mask=mask_m[:, None])
882
+ do = tl.load(do_ptrs, mask=mask_m[:, None])
883
+ delta = tl.load(d_ptrs, mask=mask_m)
884
+ l = tl.load(l_ptrs, mask=mask_m)
885
+ log_lambda_out = tl.load(log_lambda_out_ptrs, mask=mask_m)
886
+
887
+ # initialize dq
888
+ dq = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
889
+ dlog_lambda_out = tl.zeros([BLOCK_M], dtype=tl.float32)
890
+
891
+ # loop over k, v and update accumulator
892
+ # see note "Loop-Bound-For-N"
893
+ if CAUSAL:
894
+ hi = tl.minimum(N, P_SEQ + (start_m + 1) * BLOCK_M)
895
+ if LARGER_M:
896
+ hi = tl.maximum(0, hi)
897
+ else:
898
+ hi = N
899
+
900
+ offs_n_base = tl.arange(0, BLOCK_N)
901
+ offs_n_init = offs_n_base
902
+ if HAS_SEQ_START:
903
+ SEQ_START += off_z
904
+ seq_start = tl.load(SEQ_START)
905
+ lo = tl.minimum(seq_start, hi)
906
+ lo = (lo // BLOCK_N) * BLOCK_N
907
+ offs_n_init += lo
908
+ else:
909
+ lo = 0
910
+ k_ptrs = K + (offs_n_init[:, None] * stride_kn + offs_k[None, :] * stride_kk) # (BLOCK_N, BLOCK_DMODEL)
911
+ v_ptrs = V + (offs_n_init[:, None] * stride_vn + offs_k[None, :] * stride_vk) # (BLOCK_N, BLOCK_DMODEL)
912
+ log_lambda_in_ptrs = LOG_LAMBDA + (offs_n_init * stride_log_lambda_n)
913
+
914
+ # loop over a row
915
+ for start_n in range(lo, hi, BLOCK_N):
916
+ offs_n = start_n + offs_n_base
917
+
918
+ # load k1, k2, v on chip
919
+ if DIVISIBLE_N:
920
+ v = tl.load(v_ptrs)
921
+ k = tl.load(k_ptrs)
922
+ log_lambda_in = tl.load(log_lambda_in_ptrs)
923
+ else:
924
+ mask_n = offs_n < N
925
+ v = tl.load(v_ptrs, mask=mask_n[:, None])
926
+ k = tl.load(k_ptrs, mask=mask_n[:, None])
927
+ log_lambda_in = tl.load(log_lambda_in_ptrs, mask=mask_n)
928
+
929
+
930
+ # recompute p = softmax(qk * sm_scale, dim=-1)
931
+ if not DIVISIBLE_N:
932
+ valid_mask = mask_n[None, :] # & mask_m[:, None]
933
+ if CAUSAL:
934
+ causal_mask = (P_SEQ + offs_m[:, None]) >= (offs_n[None, :]) # (BLOCK_M, BLOCK_N)
935
+ # s = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
936
+ s = tl.dot(q, tl.trans(k), input_precision="ieee") * qk_scale
937
+ decay_bias = log_lambda_out[:, None] - log_lambda_in[None, :]
938
+ s += decay_bias * log2e
939
+
940
+ # NOTE: since softmax in backward is pointwise, the normalizer has been saved in fwd)
941
+ # So masking on s is not needed.
942
+ # if CAUSAL:
943
+ # s = tl.where(causal_mask & valid_mask, s, float("-inf"))
944
+ # else:
945
+ # s = tl.where(valid_mask, s, float("-inf"))
946
+ p = tl.math.exp2(s - l[:, None] * log2e) # (BLOCK_M, BLOCK_N)
947
+
948
+ # compute dp = dot(v, do)
949
+ # dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
950
+ dp = tl.dot(do.to(input_dtype), tl.trans(v), input_precision="ieee")
951
+
952
+
953
+ # no need to mask dp
954
+ # if CAUSAL:
955
+ # dp = tl.where(causal_mask & valid_mask, dp, 0.0)
956
+ # else:
957
+ # dp = tl.where(valid_mask, dp, 0.0)
958
+
959
+ # compute ds = p * (dp - delta[:, None])
960
+ # move scale out to dq at last
961
+ ds = p * (dp - delta[:, None]) # (BLOCK_M, BLOCK_N)
962
+
963
+ # mask ds to ensure no small values
964
+ if not DIVISIBLE_N:
965
+ ds = tl.where(valid_mask, ds, 0.0)
966
+ if CAUSAL:
967
+ ds = tl.where(causal_mask, ds, 0.0)
968
+ if HAS_SEQ_START:
969
+ ds = tl.where(offs_n[None, :] >= seq_start, ds, 0.0)
970
+
971
+ dq += tl.dot(ds.to(input_dtype), k, input_precision="ieee")
972
+ dlog_lambda_out += tl.sum(ds, axis=1)
973
+
974
+ # increment pointers
975
+ k_ptrs += BLOCK_N * stride_kn
976
+ v_ptrs += BLOCK_N * stride_vn
977
+ log_lambda_in_ptrs += BLOCK_N * stride_log_lambda_n
978
+
979
+ dq *= sm_scale
980
+ if DIVISIBLE_M:
981
+ tmp = tl.load(dlog_lambda_out_ptrs)
982
+ else:
983
+ tmp = tl.load(dlog_lambda_out_ptrs, mask=mask_m)
984
+ dlog_lambda_out += tmp
985
+ if DIVISIBLE_M:
986
+ tl.store(dq_ptrs, dq.to(input_dtype))
987
+ tl.store(dlog_lambda_out_ptrs, dlog_lambda_out)
988
+ else:
989
+ tl.store(dq_ptrs, dq.to(input_dtype), mask=mask_m[:, None])
990
+ tl.store(dlog_lambda_out_ptrs, dlog_lambda_out, mask=mask_m)
991
+
992
+
993
+
994
+ @pytest.mark.parametrize("Z, H, M, N, HEAD_DIM", [(4, 2, 1020, 2098, 64), (4, 2, 1024, 2048, 64)])
995
+ @pytest.mark.parametrize("causal", [True])
996
+ def test_op(Z, H, M, N, HEAD_DIM, causal, dtype=torch.bfloat16):
997
+ torch.manual_seed(24)
998
+ q = (torch.empty((Z, H, M, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
999
+ k = (torch.empty((Z, H, N, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
1000
+ v = (torch.empty((Z, H, N, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
1001
+ fgate_logit = torch.empty((Z, H, N), dtype=torch.float32, device="cuda").uniform_(5, 10)
1002
+ log_fgate = torch.nn.functional.logsigmoid(fgate_logit).requires_grad_()
1003
+ seq_start = torch.randint(low=0, high=N, size=(Z,), dtype=torch.long, device="cuda")
1004
+ # seq_start = torch.randint(low=0, high=10, size=(Z,), dtype=torch.long, device="cuda")
1005
+ # seq_start = torch.full(fill_value=0, size=(Z,), dtype=torch.long, device="cuda")
1006
+ sm_scale = 0.5
1007
+ dout = torch.randn_like(q)
1008
+ # reference implementation
1009
+ P_SEQ = N - M
1010
+ mask = torch.tril(torch.ones((M, N), device="cuda"), diagonal=P_SEQ)
1011
+ p = torch.matmul(q, k.transpose(2, 3)) * sm_scale
1012
+ p = p.float()
1013
+
1014
+ log_lambda = torch.cumsum(log_fgate, dim=-1)
1015
+ decay_bias = log_lambda[..., -M:, None] - log_lambda[..., None, :]
1016
+ p = p + decay_bias
1017
+ if causal:
1018
+ p[:, :, mask == 0] = float("-inf")
1019
+
1020
+ attention_mask = torch.arange(N, device="cuda") < seq_start[:, None, None, None]
1021
+ p = torch.where(attention_mask, float("-inf"), p)
1022
+ p = torch.softmax(p.float(), dim=-1).to(dtype)
1023
+ p = p.clone()
1024
+ p[torch.isnan(p)] = 0.0
1025
+ # p = torch.exp(p)
1026
+ ref_out = torch.matmul(p, v)
1027
+ ref_out.backward(dout)
1028
+ ref_dv, v.grad = v.grad.clone(), None
1029
+ ref_dk, k.grad = k.grad.clone(), None
1030
+ ref_dq, q.grad = q.grad.clone(), None
1031
+ ref_dlog_fgate, log_fgate.grad = log_fgate.grad.clone(), None
1032
+ # triton implementation
1033
+ tri_out = forgetting_attention(q, k, v, log_fgate, head_first=True, seq_start=seq_start, sm_scale=sm_scale)
1034
+ tri_out = tri_out.to(dtype)
1035
+
1036
+ tri_out.backward(dout)
1037
+ tri_dv, v.grad = v.grad.clone(), None
1038
+ tri_dk, k.grad = k.grad.clone(), None
1039
+ tri_dq, q.grad = q.grad.clone(), None
1040
+ tri_dlog_fgate, log_fgate.grad = log_fgate.grad.clone(), None
1041
+ # compare
1042
+ # assert torch.allclose(tri_log_normalizer[~torch.isnan(tri_log_normalizer)], ref_log_normalizer[~torch.isnan(ref_log_normalizer)], atol=1e-2, rtol=0)
1043
+ assert torch.allclose(ref_out, tri_out, atol=1e-2, rtol=0), (ref_out - tri_out).abs().max()
1044
+ rtol = 0
1045
+ # Relative tolerance workaround for known hardware limitation of MI200 GPU.
1046
+ # For details see https://pytorch.org/docs/stable/notes/numerical_accuracy.html#reduced-precision-fp16-and-bf16-gemms-and-convolutions-on-amd-instinct-mi200-devices
1047
+ # if torch.version.hip is not None and triton.runtime.driver.active.get_current_target().arch == "gfx90a":
1048
+ # rtol = 1e-2
1049
+ assert torch.allclose(ref_dv, tri_dv, atol=1e-2, rtol=rtol), (ref_dv - tri_dv).abs().max()
1050
+ assert torch.allclose(ref_dk, tri_dk, atol=1e-2, rtol=rtol), (ref_dk - tri_dk).abs().max()
1051
+ assert torch.allclose(ref_dq, tri_dq, atol=1e-2, rtol=rtol), (ref_dq - tri_dq).abs().max()
1052
+ assert torch.allclose(ref_dlog_fgate, tri_dlog_fgate, atol=1e-2, rtol=rtol), (ref_dlog_fgate - tri_dlog_fgate).abs().max()
1053
+
1054
+ try:
1055
+ from flash_attn.flash_attn_interface import \
1056
+ flash_attn_qkvpacked_func as flash_attn_func
1057
+ HAS_FLASH = True
1058
+ except BaseException:
1059
+ HAS_FLASH = False
1060
+
1061
+ TORCH_HAS_FP8 = hasattr(torch, 'float8_e5m2')
1062
+ BATCH, N_HEADS, HEAD_DIM = 4, 32, 128
1063
+ # vary seq length for fixed head and batch=4
1064
+ configs = []
1065
+ for mode in ["fwd", "bwd"]:
1066
+ # for mode in ["bwd"]:
1067
+ # for causal in [True, False]:
1068
+ for causal in [True]:
1069
+ if mode == "bwd" and not causal:
1070
+ continue
1071
+ configs.append(
1072
+ triton.testing.Benchmark(
1073
+ x_names=["N_CTX"],
1074
+ # x_vals=[2**i for i in range(10, 15)],
1075
+ x_vals=[2**i for i in range(14, 15)],
1076
+ line_arg="provider",
1077
+ # line_vals=["triton-fp16", "flag"] + (["flash"] if HAS_FLASH else []),
1078
+ # line_names=["Triton [FP16]", "Flag"] + (["Flash-2"] if HAS_FLASH else []),
1079
+ line_vals=["flag"] + (["flash"] if HAS_FLASH else []),
1080
+ line_names=["Flag"] + (["Flash-2"] if HAS_FLASH else []),
1081
+ styles=[("red", "-"), ("blue", "-"), ("green", "-")],
1082
+ ylabel="ms",
1083
+ plot_name=f"fused-attention-batch{BATCH}-head{N_HEADS}-d{HEAD_DIM}-{mode}-causal={causal}",
1084
+ args={
1085
+ "H": N_HEADS,
1086
+ "BATCH": BATCH,
1087
+ "HEAD_DIM": HEAD_DIM,
1088
+ "mode": mode,
1089
+ "causal": causal,
1090
+ },
1091
+ ))
1092
+
1093
+
1094
+ @triton.testing.perf_report(configs)
1095
+ def bench_flash_attention(BATCH, H, N_CTX, HEAD_DIM, causal, mode, provider, device="cuda"):
1096
+ assert mode in ["fwd", "bwd"]
1097
+ warmup = 25
1098
+ rep = 100
1099
+ dtype = torch.bfloat16
1100
+ if "flag" in provider:
1101
+ q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1102
+ k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1103
+ v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1104
+ fgate_logit = torch.empty((BATCH, H, N_CTX), dtype=torch.float32, device="cuda").uniform_(5, 10)
1105
+ log_fgate = torch.nn.functional.logsigmoid(fgate_logit).requires_grad_()
1106
+ # if mode == "fwd" and "fp8" in provider:
1107
+ # q = q.to(torch.float8_e5m2)
1108
+ # k = k.to(torch.float8_e5m2)
1109
+ # v = v.permute(0, 1, 3, 2).contiguous()
1110
+ # v = v.permute(0, 1, 3, 2)
1111
+ # v = v.to(torch.float8_e5m2)
1112
+ sm_scale = 1.3
1113
+ fn = lambda: forgetting_attention(q, k, v, log_fgate, head_first=True, sm_scale=sm_scale)
1114
+ if mode == "bwd":
1115
+ o = fn()
1116
+ do = torch.randn_like(o)
1117
+ fn = lambda: o.backward(do, retain_graph=True)
1118
+ ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
1119
+ if provider == "flash":
1120
+ qkv = torch.randn((BATCH, N_CTX, 3, H, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1121
+ fn = lambda: flash_attn_func(qkv, causal=causal)
1122
+ if mode == "bwd":
1123
+ o = fn()
1124
+ do = torch.randn_like(o)
1125
+ fn = lambda: o.backward(do, retain_graph=True)
1126
+ ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
1127
+ flops_per_matmul = 2.0 * BATCH * H * N_CTX * N_CTX * HEAD_DIM
1128
+ total_flops = 2 * flops_per_matmul
1129
+ if causal:
1130
+ total_flops *= 0.5
1131
+ if mode == "bwd":
1132
+ total_flops *= 2.5 # 2.0(bwd) + 0.5(recompute)
1133
+ return total_flops / ms * 1e-9
1134
+
1135
+
1136
+ if __name__ == "__main__":
1137
+ # only works on post-Ampere GPUs right now
1138
+ bench_flash_attention.run(save_path=".", print_data=True)
ops/forgetting_attention_std.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Forgetting Attention - 标准 Softmax 版本
3
+ 在 forgetting_attention.py 最后添加这个函数
4
+ """
5
+
6
+ import math
7
+ import torch
8
+ import torch.nn.functional as F
9
+ from einops import rearrange
10
+ from typing import Optional
11
+
12
+
13
+ def forgetting_attention_std(
14
+ q: torch.Tensor,
15
+ k: torch.Tensor,
16
+ v: torch.Tensor,
17
+ log_fgate: torch.Tensor,
18
+ *,
19
+ head_first: bool = False,
20
+ seq_start: Optional[torch.Tensor] = None,
21
+ sm_scale: Optional[float] = None,
22
+ ) -> torch.Tensor:
23
+ """标准 Softmax 版本的 Forgetting Attention"""
24
+
25
+ if not head_first:
26
+ q = rearrange(q, "b t h d -> b h t d")
27
+ k = rearrange(k, "b t h d -> b h t d")
28
+ v = rearrange(v, "b t h d -> b h t d")
29
+ log_fgate = rearrange(log_fgate, "b t h -> b h t")
30
+
31
+ B, H, T_q, D = q.shape
32
+ T_k = k.shape[2]
33
+
34
+ if sm_scale is None:
35
+ sm_scale = 1.0 / math.sqrt(D)
36
+
37
+ # 计算 QK 分数
38
+ scores = torch.matmul(q.float(), k.float().transpose(-2, -1)) * sm_scale
39
+
40
+ # 处理 seq_start
41
+ log_fgate_masked = log_fgate.float()
42
+ if seq_start is not None:
43
+ log_fgate_masked = log_fgate_masked.clone()
44
+ mask_idx = torch.arange(T_k, device=q.device)[None, None, :] < seq_start[:, None, None]
45
+ log_fgate_masked[mask_idx] = 0.0
46
+
47
+ # 计算累积衰减
48
+ log_lambda = torch.cumsum(log_fgate_masked, dim=-1)
49
+ decay_bias = log_lambda[:, :, :T_q, None] - log_lambda[:, :, None, :]
50
+ scores = scores + decay_bias
51
+
52
+ # Causal mask
53
+ P_SEQ = T_k - T_q
54
+ causal_mask = torch.triu(torch.ones((T_q, T_k), dtype=torch.bool, device=q.device), diagonal=P_SEQ + 1)
55
+ scores = scores.masked_fill(causal_mask[None, None, :, :], float('-inf'))
56
+
57
+ # seq_start mask
58
+ if seq_start is not None:
59
+ seq_mask = torch.arange(T_k, device=q.device)[None, None, None, :] < seq_start[None, :, None, None]
60
+ scores = scores.masked_fill(seq_mask, float('-inf'))
61
+
62
+ # Softmax
63
+ attn = F.softmax(scores, dim=-1)
64
+ attn = torch.nan_to_num(attn, 0.0)
65
+
66
+ # 计算输出
67
+ out = torch.matmul(attn.to(v.dtype), v)
68
+
69
+ if not head_first:
70
+ out = rearrange(out, "b h t d -> b t h d")
71
+
72
+ return out
ops/geometric_attention_std.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Geometric Attention - 标准 Softmax 版本
3
+ 基于论文 "The Neural Data Router" (Csordás et al., 2022)
4
+ """
5
+
6
+ import math
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+ from einops import rearrange
11
+ from typing import Optional
12
+
13
+
14
+ def geometric_attention_std(
15
+ q: torch.Tensor,
16
+ k: torch.Tensor,
17
+ v: torch.Tensor,
18
+ *,
19
+ head_first: bool = False,
20
+ seq_start: Optional[torch.Tensor] = None,
21
+ sm_scale: Optional[float] = None,
22
+ normalize: bool = True,
23
+ ) -> torch.Tensor:
24
+ """
25
+ 标准 Softmax 版本的 Geometric Attention
26
+
27
+ Args:
28
+ q: Query tensor [B, T, H, D] or [B, H, T, D] if head_first
29
+ k: Key tensor [B, T, H, D] or [B, H, T, D] if head_first
30
+ v: Value tensor [B, T, H, D] or [B, H, T, D] if head_first
31
+ head_first: 是否head维度在前
32
+ seq_start: 序列起始位置 [B]
33
+ sm_scale: scaling factor,默认 1/sqrt(D)
34
+ normalize: 是否归一化attention weights
35
+
36
+ Returns:
37
+ output: [B, T, H, D] or [B, H, T, D] if head_first
38
+ """
39
+
40
+ # Rearrange to head_first format
41
+ if not head_first:
42
+ q = rearrange(q, "b t h d -> b h t d")
43
+ k = rearrange(k, "b t h d -> b h t d")
44
+ v = rearrange(v, "b t h d -> b h t d")
45
+
46
+ B, H, T_q, D = q.shape
47
+ T_k = k.shape[2]
48
+
49
+ if sm_scale is None:
50
+ sm_scale = 1.0 / math.sqrt(D)
51
+
52
+ # Step 1: 计算 content-based logits
53
+ logits = torch.matmul(q.float(), k.float().transpose(-2, -1)) * sm_scale
54
+ # logits: [B, H, T_q, T_k]
55
+
56
+ # Step 2: Mask diagonal (不允许attend到自己)
57
+ if T_q == T_k:
58
+ diag_mask = torch.eye(T_q, dtype=torch.bool, device=q.device)
59
+ logits = logits.masked_fill(diag_mask[None, None, :, :], float('-inf'))
60
+
61
+ # Step 3: 处理 seq_start mask
62
+ if seq_start is not None:
63
+ seq_mask = torch.arange(T_k, device=q.device)[None, None, None, :] < seq_start[None, :, None, None]
64
+ logits = logits.masked_fill(seq_mask, float('-inf'))
65
+
66
+ # Step 4: Causal mask (如果需要)
67
+ # 注意:geometric attention论文中没有causal,如果你的任务需要可以取消注释
68
+ # P_SEQ = T_k - T_q
69
+ # causal_mask = torch.triu(torch.ones((T_q, T_k), dtype=torch.bool, device=q.device), diagonal=P_SEQ + 1)
70
+ # logits = logits.masked_fill(causal_mask[None, None, :, :], float('-inf'))
71
+
72
+ # Step 5: Geometric weighting (核心算法)
73
+ attn_weights = geometric_weighting(logits, normalize=normalize)
74
+
75
+ # Step 6: 应用attention到values
76
+ out = torch.matmul(attn_weights.to(v.dtype), v)
77
+
78
+ if not head_first:
79
+ out = rearrange(out, "b h t d -> b t h d")
80
+
81
+ return out
82
+
83
+
84
+ def geometric_weighting(
85
+ logits: torch.Tensor,
86
+ normalize: bool = True,
87
+ ) -> torch.Tensor:
88
+ """
89
+ 计算geometric attention weights
90
+
91
+ 实现论文中的 Equation 7:
92
+ A[i,j] = P[i,j] * ∏(1 - P[i,k]) for k closer to i than j
93
+
94
+ Args:
95
+ logits: [B, H, T_q, T_k] attention logits
96
+ normalize: 是否归一化
97
+
98
+ Returns:
99
+ weights: [B, H, T_q, T_k] attention weights
100
+ """
101
+ B, H, T_q, T_k = logits.shape
102
+
103
+ # Step 1: Sigmoid to get matching probabilities
104
+ P = torch.sigmoid(logits) # [B, H, T_q, T_k]
105
+
106
+ # Step 2: 使用 log-space 计算(数值稳定)
107
+ log_P = torch.log(P + 1e-10)
108
+ log_one_minus_P = torch.log(1.0 - P + 1e-10)
109
+
110
+ # Step 3: 简化版本 - 使用cumsum实现几何分布
111
+ # 这是一个高效的近似,避免了显式的循环
112
+
113
+ # 对于每个位置i,计算其左侧所有位置的log(1-P)累积和
114
+ log_decay_left = log_one_minus_P.cumsum(dim=-1)
115
+
116
+ # 计算weights(简化版)
117
+ # 完整版本需要根据距离动态选择区间,这里用一个高效近似
118
+ weights = torch.exp(log_P + log_decay_left.roll(1, dims=-1))
119
+
120
+ # 第一个位置特殊处理(没有左侧元素)
121
+ # 避免inplace操作
122
+ weights_first = P[:, :, :, :1] # 获取第一列
123
+ weights = torch.cat([weights_first, weights[:, :, :, 1:]], dim=-1)
124
+
125
+ # Step 4: 归一化(可选)
126
+ if normalize:
127
+ weights = F.normalize(weights, p=1, dim=-1)
128
+
129
+ # 处理NaN(如果所有位置都是-inf)
130
+ weights = torch.nan_to_num(weights, 0.0)
131
+
132
+ return weights
133
+
134
+
135
+ def geometric_weighting_full(
136
+ logits: torch.Tensor,
137
+ normalize: bool = True,
138
+ ) -> torch.Tensor:
139
+ """
140
+ 完整版geometric weighting(更慢但更准确)
141
+
142
+ 仅在需要最高精度时使用,训练时建议用上面的简化版
143
+ """
144
+ B, H, T_q, T_k = logits.shape
145
+ device = logits.device
146
+
147
+ P = torch.sigmoid(logits)
148
+ log_P = torch.log(P + 1e-10)
149
+ log_one_minus_P = torch.log(1.0 - P + 1e-10)
150
+
151
+ # 初始化weights
152
+ weights = torch.zeros_like(P)
153
+
154
+ # 对每个(i,j)计算geometric weight
155
+ for i in range(T_q):
156
+ for j in range(T_k):
157
+ # 找出比j更接近i的所有位���k
158
+ if i < j:
159
+ # 向右看:closer positions are [i+1, ..., j-1]
160
+ closer_positions = range(i + 1, j)
161
+ elif i > j:
162
+ # 向左看:closer positions are [j+1, ..., i-1]
163
+ closer_positions = range(j + 1, i)
164
+ else:
165
+ # i == j (对角线),已经在外面mask掉了
166
+ continue
167
+
168
+ # 计算 ∏(1 - P[i,k]) in log-space
169
+ log_prod = sum(log_one_minus_P[:, :, i, k] for k in closer_positions) if closer_positions else 0.0
170
+
171
+ # weights[i,j] = P[i,j] * ∏(1 - P[i,k])
172
+ weights[:, :, i, j] = torch.exp(log_P[:, :, i, j] + log_prod)
173
+
174
+ if normalize:
175
+ weights = F.normalize(weights, p=1, dim=-1)
176
+
177
+ weights = torch.nan_to_num(weights, 0.0)
178
+
179
+ return weights
ops/sliding_window_attention_std.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Sliding Window / Hard Attention
3
+ Based on "Context Limitations Make Neural Language Models More Human-Like"
4
+ (Kuribayashi et al., 2022)
5
+ """
6
+
7
+ import math
8
+ import torch
9
+ import torch.nn.functional as F
10
+ from einops import rearrange
11
+ from typing import Optional
12
+
13
+
14
+ def sliding_window_attention_std(
15
+ q: torch.Tensor,
16
+ k: torch.Tensor,
17
+ v: torch.Tensor,
18
+ *,
19
+ head_first: bool = False,
20
+ seq_start: Optional[torch.Tensor] = None,
21
+ sm_scale: Optional[float] = None,
22
+ window_size: int = 2, # 默认2-gram(看前1个token)
23
+ ) -> torch.Tensor:
24
+ """
25
+ Sliding Window Attention
26
+
27
+ 硬截断:只能attend到最近window_size个token
28
+ """
29
+
30
+ if not head_first:
31
+ q = rearrange(q, "b t h d -> b h t d")
32
+ k = rearrange(k, "b t h d -> b h t d")
33
+ v = rearrange(v, "b t h d -> b h t d")
34
+
35
+ B, H, T_q, D = q.shape
36
+ T_k = k.shape[2]
37
+
38
+ if sm_scale is None:
39
+ sm_scale = 1.0 / math.sqrt(D)
40
+
41
+ # Compute logits
42
+ logits = torch.matmul(q.float(), k.float().transpose(-2, -1)) * sm_scale
43
+
44
+ # Create sliding window mask
45
+ mask = create_sliding_window_mask(T_q, T_k, window_size, device=q.device)
46
+ logits = logits.masked_fill(~mask, float('-inf'))
47
+
48
+ # Seq start mask
49
+ if seq_start is not None:
50
+ seq_mask = torch.arange(T_k, device=q.device)[None, None, None, :] < seq_start[None, :, None, None]
51
+ logits = logits.masked_fill(seq_mask, float('-inf'))
52
+
53
+ # Standard softmax
54
+ weights = F.softmax(logits, dim=-1)
55
+
56
+ # Apply to values
57
+ out = torch.matmul(weights, v)
58
+
59
+ if not head_first:
60
+ out = rearrange(out, "b h t d -> b t h d")
61
+
62
+ return out
63
+
64
+
65
+ def create_sliding_window_mask(
66
+ T_q: int,
67
+ T_k: int,
68
+ window_size: int,
69
+ device: torch.device
70
+ ) -> torch.Tensor:
71
+ """
72
+ 创建sliding window mask
73
+
74
+ window_size=1: 只看前1个token (2-gram)
75
+ window_size=2: 只看前2个token (3-gram)
76
+ """
77
+ # 基础causal mask
78
+ mask = torch.tril(torch.ones(T_q, T_k, dtype=torch.bool, device=device))
79
+
80
+ # 应用window限制
81
+ if window_size > 0 and window_size < T_k:
82
+ for i in range(T_q):
83
+ # 只保留 [i-window_size+1, i] 范围
84
+ start = max(0, i - window_size + 1)
85
+ if start > 0:
86
+ mask[i, :start] = False
87
+
88
+ return mask[None, None, :, :] # [1, 1, T_q, T_k]
ops/stickbreaking_attention_std.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Stick-breaking Attention - 官方Triton实现
3
+ """
4
+
5
+ from stickbreaking_attention.sb_attn import sb_attn
6
+ import math
7
+ import torch
8
+ from einops import rearrange
9
+ from typing import Optional
10
+
11
+
12
+ def stickbreaking_attention_std(
13
+ q: torch.Tensor,
14
+ k: torch.Tensor,
15
+ v: torch.Tensor,
16
+ *,
17
+ head_first: bool = False,
18
+ seq_start: Optional[torch.Tensor] = None,
19
+ sm_scale: Optional[float] = None,
20
+ normalize: bool = True,
21
+ attend_current: bool = False,
22
+ ) -> torch.Tensor:
23
+ """Stick-breaking attention using official Triton implementation"""
24
+
25
+ if not head_first:
26
+ q = rearrange(q, "b t h d -> b h t d")
27
+ k = rearrange(k, "b t h d -> b h t d")
28
+ v = rearrange(v, "b t h d -> b h t d")
29
+
30
+ B, H, T_q, D = q.shape
31
+
32
+ if sm_scale is None:
33
+ sm_scale = 1.0 / math.sqrt(D)
34
+
35
+ # 官方Triton实现
36
+ # 返回 (output, remainder)
37
+ out, rem = sb_attn(
38
+ q, k, v,
39
+ inv_temp=sm_scale,
40
+ attend_current=attend_current
41
+ )
42
+
43
+ if not head_first:
44
+ out = rearrange(out, "b h t d -> b t h d")
45
+
46
+ return out
ops/vanilla_attention_std.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Vanilla Transformer 的标准 Softmax Attention
3
+ 用于替换 flash_attn 的实现
4
+ """
5
+ import math
6
+ import torch
7
+ import torch.nn.functional as F
8
+ from einops import rearrange
9
+ from typing import Optional, Tuple
10
+
11
+ def vanilla_attention_std(
12
+ q: torch.Tensor,
13
+ k: torch.Tensor,
14
+ v: torch.Tensor,
15
+ causal: bool = True,
16
+ window_size: Optional[Tuple[int, int]] = None,
17
+ sm_scale: Optional[float] = None,
18
+ ) -> torch.Tensor:
19
+ """
20
+ 标准 Softmax Attention,兼容 flash_attn_func 的输入格式
21
+
22
+ Args:
23
+ q, k, v: [batch, seq_len, num_heads, head_dim] 格式
24
+ causal: 是否使用因果mask
25
+ window_size: 滑动窗口大小 (left, right),(-1, -1) 表示无限制
26
+ sm_scale: softmax 缩放因子
27
+
28
+ Returns:
29
+ output: [batch, seq_len, num_heads, head_dim] 格式
30
+ """
31
+ B, T_q, H, D = q.shape
32
+ T_k = k.shape[1]
33
+
34
+ if sm_scale is None:
35
+ sm_scale = 1.0 / math.sqrt(D)
36
+
37
+ # 转换为 [B, H, T, D] 格式进行计算
38
+ q = rearrange(q, 'b t h d -> b h t d')
39
+ k = rearrange(k, 'b t h d -> b h t d')
40
+ v = rearrange(v, 'b t h d -> b h t d')
41
+
42
+ # 计算 attention scores
43
+ scores = torch.matmul(q.float(), k.float().transpose(-2, -1)) * sm_scale
44
+
45
+ # Causal mask
46
+ if causal:
47
+ P_SEQ = T_k - T_q # 处理 KV cache 的情况
48
+ causal_mask = torch.triu(
49
+ torch.ones((T_q, T_k), dtype=torch.bool, device=q.device),
50
+ diagonal=P_SEQ + 1
51
+ )
52
+ scores = scores.masked_fill(causal_mask[None, None, :, :], float('-inf'))
53
+
54
+ # Window mask (sliding window attention)
55
+ if window_size is not None and window_size != (-1, -1):
56
+ left_window, right_window = window_size
57
+ window_mask = torch.ones((T_q, T_k), dtype=torch.bool, device=q.device)
58
+ for i in range(T_q):
59
+ # 计算每个查询位置的有效窗口范围
60
+ start = max(0, i - left_window)
61
+ end = min(T_k, i + right_window + 1)
62
+ window_mask[i, start:end] = False
63
+ scores = scores.masked_fill(window_mask[None, None, :, :], float('-inf'))
64
+
65
+ # Softmax
66
+ attn_weights = F.softmax(scores, dim=-1)
67
+ attn_weights = torch.nan_to_num(attn_weights, 0.0)
68
+
69
+ # Apply attention to values
70
+ output = torch.matmul(attn_weights.to(v.dtype), v)
71
+
72
+ # 转换回 [B, T, H, D] 格式
73
+ output = rearrange(output, 'b h t d -> b t h d')
74
+
75
+ return output
76
+
77
+
78
+ def vanilla_attention_varlen_std(
79
+ q: torch.Tensor,
80
+ k: torch.Tensor,
81
+ v: torch.Tensor,
82
+ cu_seqlens_q: torch.Tensor,
83
+ cu_seqlens_k: torch.Tensor,
84
+ max_seqlen_q: int,
85
+ max_seqlen_k: int,
86
+ causal: bool = True,
87
+ window_size: Optional[Tuple[int, int]] = None,
88
+ sm_scale: Optional[float] = None,
89
+ ) -> torch.Tensor:
90
+ """
91
+ 变长序列的标准 Softmax Attention,兼容 flash_attn_varlen_func
92
+
93
+ Args:
94
+ q: [total_q_tokens, num_heads, head_dim]
95
+ k: [total_k_tokens, num_kv_heads, head_dim]
96
+ v: [total_k_tokens, num_kv_heads, head_dim]
97
+ cu_seqlens_q: 累积序列长度 [batch_size + 1]
98
+ cu_seqlens_k: 累积序列长度 [batch_size + 1]
99
+ max_seqlen_q: 最大查询序列长度
100
+ max_seqlen_k: 最大键值序列长度
101
+
102
+ Returns:
103
+ output: [total_q_tokens, num_heads, head_dim]
104
+ """
105
+ batch_size = cu_seqlens_q.shape[0] - 1
106
+ H = q.shape[1]
107
+ D = q.shape[2]
108
+
109
+ if sm_scale is None:
110
+ sm_scale = 1.0 / math.sqrt(D)
111
+
112
+ outputs = []
113
+
114
+ # 逐批次处理
115
+ for b in range(batch_size):
116
+ q_start, q_end = cu_seqlens_q[b].item(), cu_seqlens_q[b+1].item()
117
+ k_start, k_end = cu_seqlens_k[b].item(), cu_seqlens_k[b+1].item()
118
+
119
+ if q_start == q_end: # 空序列
120
+ continue
121
+
122
+ # 提取当前批次的 q, k, v
123
+ q_b = q[q_start:q_end] # [T_q, H, D]
124
+ k_b = k[k_start:k_end] # [T_k, H, D]
125
+ v_b = v[k_start:k_end] # [T_k, H, D]
126
+
127
+ T_q = q_b.shape[0]
128
+ T_k = k_b.shape[0]
129
+
130
+ # 转换为 [H, T, D] 格式
131
+ q_b = rearrange(q_b, 't h d -> h t d')
132
+ k_b = rearrange(k_b, 't h d -> h t d')
133
+ v_b = rearrange(v_b, 't h d -> h t d')
134
+
135
+ # 计算 attention scores
136
+ scores = torch.matmul(q_b.float(), k_b.float().transpose(-2, -1)) * sm_scale
137
+
138
+ # Causal mask
139
+ if causal:
140
+ P_SEQ = T_k - T_q
141
+ causal_mask = torch.triu(
142
+ torch.ones((T_q, T_k), dtype=torch.bool, device=q.device),
143
+ diagonal=P_SEQ + 1
144
+ )
145
+ scores = scores.masked_fill(causal_mask[None, :, :], float('-inf'))
146
+
147
+ # Window mask
148
+ if window_size is not None and window_size != (-1, -1):
149
+ left_window, right_window = window_size
150
+ window_mask = torch.ones((T_q, T_k), dtype=torch.bool, device=q.device)
151
+ for i in range(T_q):
152
+ start = max(0, i - left_window)
153
+ end = min(T_k, i + right_window + 1)
154
+ window_mask[i, start:end] = False
155
+ scores = scores.masked_fill(window_mask[None, :, :], float('-inf'))
156
+
157
+ # Softmax
158
+ attn_weights = F.softmax(scores, dim=-1)
159
+ attn_weights = torch.nan_to_num(attn_weights, 0.0)
160
+
161
+ # Apply attention
162
+ output_b = torch.matmul(attn_weights.to(v_b.dtype), v_b)
163
+
164
+ # 转换回 [T, H, D] 格式
165
+ output_b = rearrange(output_b, 'h t d -> t h d')
166
+ outputs.append(output_b)
167
+
168
+ # 拼接所有批次的输出
169
+ output = torch.cat(outputs, dim=0)
170
+
171
+ return output