Lanni-ni commited on
Commit
d353777
·
verified ·
1 Parent(s): 2e91ba0

add remote code + model files

Browse files
.ipynb_checkpoints/modeling_forgetting_transformer-checkpoint.py ADDED
@@ -0,0 +1,897 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ from __future__ import annotations
4
+
5
+ import math
6
+ import warnings
7
+ from typing import List, Optional, Tuple, Union
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.utils.checkpoint
12
+ from transformers.activations import ACT2FN
13
+ from transformers.cache_utils import Cache
14
+ from transformers.modeling_outputs import (BaseModelOutputWithPast,
15
+ CausalLMOutputWithPast)
16
+ from transformers.modeling_utils import PreTrainedModel
17
+ from transformers.utils import logging
18
+
19
+ # from fla.layers.attn import Attention
20
+ from fla.modules import FusedCrossEntropyLoss, RMSNorm
21
+ from fla.modules.layernorm import group_norm_fn
22
+ from fla.modules.activations import swiglu_linear
23
+
24
+ from fla.modules import RotaryEmbedding
25
+ from einops import rearrange
26
+
27
+ from .configuration_forgetting_transformer import ForgettingTransformerConfig
28
+ from forgetting_transformer.ops.forgetting_attention_std import forgetting_attention_std as forgetting_attention
29
+ from .fgate_cache import FgateDynamicCache
30
+ from .glu_linear import glu_linear
31
+ from .token_shift import token_shift
32
+
33
+ from functools import partial
34
+
35
+ logger = logging.get_logger(__name__)
36
+
37
+
38
+ class ShiftLinear(nn.Module):
39
+
40
+ def __init__(
41
+ self,
42
+ input_dim: int,
43
+ output_dim: int,
44
+ num_heads: int,
45
+ bias: bool,
46
+ shift_bias: bool = False
47
+ ):
48
+ super().__init__()
49
+
50
+ self.input_dim = input_dim
51
+ self.output_dim = output_dim
52
+ self.num_heads = num_heads
53
+ assert self.output_dim % self.num_heads == 0
54
+
55
+ self.linear = nn.Linear(input_dim, output_dim, bias=bias)
56
+ self.shift_proj = nn.Linear(input_dim, num_heads, bias=shift_bias)
57
+
58
+ def __repr__(self) -> str:
59
+ s = f"{self.__class__.__name__}({self.input_dim}, {self.output_dim})"
60
+ return s
61
+
62
+ def forward(self, x: torch.Tensor, shift_state: Optional[torch.Tensor]) -> torch.Tensor:
63
+ assert x.ndim == 3, "Input must be (B, T, D)"
64
+ B, T, D = x.size()
65
+ out = self.linear(x)
66
+ # (B, T, H, 1)
67
+ alpha = torch.sigmoid(self.shift_proj(x).float()).float()
68
+ # left, right, top, bottom (B, T=H, D=W)
69
+ # out_prev = nn.functional.pad(out, (0, 0, 1, -1))
70
+ # out_prev = torch.roll(out, shifts=1, dims=1)
71
+
72
+ out_per_head = rearrange(out, 'b t (h d) -> b t h d', h=self.num_heads)
73
+ if T > 1:
74
+ # TODO: note in this case cache is not used
75
+ result_per_head = token_shift(out_per_head, alpha, 1.0 - alpha)
76
+ else:
77
+ shift_state_per_head = rearrange(shift_state, 'b (h d) -> b 1 h d', h=self.num_heads)
78
+ result_per_head = (alpha[..., None] * shift_state_per_head + (1 - alpha[..., None]) * out_per_head)
79
+
80
+ result_per_head = result_per_head.to(out.dtype)
81
+
82
+ if shift_state is not None:
83
+ shift_state.copy_(out[:, -1, :])
84
+
85
+ result = rearrange(result_per_head, 'b t h d -> b t (h d)', h=self.num_heads)
86
+ return result
87
+
88
+ class GroupRMSNorm(nn.Module):
89
+ def __init__(
90
+ self,
91
+ num_groups: int,
92
+ hidden_size: int,
93
+ elementwise_affine: bool = True,
94
+ bias: bool = False,
95
+ eps: float = 1e-5
96
+ ) -> GroupRMSNorm:
97
+ super().__init__()
98
+
99
+ if hidden_size % num_groups != 0:
100
+ raise ValueError('num_channels must be divisible by num_groups')
101
+
102
+ self.num_groups = num_groups
103
+ self.hidden_size = hidden_size
104
+ self.elementwise_affine = elementwise_affine
105
+ self.eps = eps
106
+
107
+ self.register_parameter("weight", None)
108
+ self.register_parameter("bias", None)
109
+ if elementwise_affine:
110
+ self.weight = nn.Parameter(torch.ones(hidden_size))
111
+ if bias:
112
+ self.bias = nn.Parameter(torch.zeros(hidden_size))
113
+
114
+ def __repr__(self) -> str:
115
+ s = f"{self.__class__.__name__}({self.num_groups}, {self.hidden_size}"
116
+ if not self.elementwise_affine:
117
+ s += f", elementwise_affine={self.elementwise_affine}"
118
+ s += f", eps={self.eps}"
119
+ s += ")"
120
+ return s
121
+
122
+ def forward(self, x, residual=None, prenorm=False, residual_in_fp32=False):
123
+ return group_norm_fn(
124
+ x,
125
+ self.weight,
126
+ self.bias,
127
+ residual=residual,
128
+ eps=self.eps,
129
+ prenorm=prenorm,
130
+ residual_in_fp32=residual_in_fp32,
131
+ is_rms_norm=True,
132
+ num_groups=self.num_groups
133
+ )
134
+
135
+ class ForgettingAttentionLayer(nn.Module):
136
+
137
+ def __init__(
138
+ self,
139
+ hidden_size: int = 2048,
140
+ num_heads: int = 32,
141
+ num_kv_heads: Optional[int] = None,
142
+ window_size: Optional[int] = None,
143
+ max_position_embeddings: Optional[int] = None,
144
+ use_rope: bool = False,
145
+ rope_base: float = 500000.0,
146
+ use_output_gate: bool = False,
147
+ ogate_act: str = "sigmoid",
148
+ fgate_type: str = "full",
149
+ fgate_bias_init: bool = False,
150
+ decay_time_min: Optional[float] = None,
151
+ decay_time_max: Optional[float] = None,
152
+ use_output_norm: bool = False,
153
+ norm_eps: float = 1e-6,
154
+ qk_norm: bool = False,
155
+ qk_norm_share_param_across_head: bool = False,
156
+ use_k_shift: bool = False,
157
+ use_v_shift: bool = False,
158
+ initializer_range: float = 0.02,
159
+ layer_idx: int = None
160
+ ):
161
+ """
162
+ Forgetting Attention layer.
163
+
164
+ Arguments:
165
+ - hidden_size: Input dimension and qkv dimension
166
+ - num_heads: Number of heads
167
+ - num_kv_heads: Not used. Should be None
168
+ - window_size: Not used. Should be None
169
+ - max_position_embeddings: Not used. Should be None
170
+ - use_rope: Whether to use RoPE. Default is False
171
+ - rope_base: the theta hyperparameter in RoPE. This has no effect if
172
+ use_rope=False
173
+ - use_output_gate: Whether to use output gates. Note that using output gates
174
+ introduces extra parameters and you may want to reduce parameters from
175
+ other components (e.g., MLPs)
176
+ - ogate_act: Activation for the output gate. Either "sigmoid" or "silu"
177
+ - fgate_type: Forget gate type. The following are supported:
178
+ - "full": The default data-dependent forget gate
179
+ - "bias_only": The data-independent forget gate
180
+ - "fixed": Forget gates with fixed values
181
+ - "none": Not using forget gates. Equivalent to forget gates with all
182
+ ones.
183
+ - fgate_bias_init: Whether to use special initalization for the bias terms in
184
+ the forget gate. This should only be used with fgate types in
185
+ ["bias_only", "fixed"].
186
+ - decay_time_min: T_min for the forget gate bias initialization. See paper
187
+ for details.
188
+ - decay_time_max: T_max for the forget gate bias initalization. See paper
189
+ for details.
190
+ - use_output_norm: Whether to use output normalization.
191
+ - norm_eps: Epsilon for the RMSNorms
192
+ - qk_norm: Whether to use qk_norm
193
+ - qk_norm_share_param_across_head: In QK-norm, whether to share the RMSNorm
194
+ scaling parameters across heads. This is just for backward compatibility.
195
+ - use_k_shift: Whether to use data-dependent key shift
196
+ - use_v_shift: Whether to use data-dependent value shift
197
+ - initializer_range: standard deviation for initialization
198
+ - layer_idx: The block index of this layer. Needed for KV-cache
199
+ """
200
+ super().__init__()
201
+
202
+ self.num_heads = num_heads
203
+ if num_kv_heads is None:
204
+ self.num_kv_heads = self.num_heads
205
+ else:
206
+ raise NotImplementedError("GQA has not been tested.")
207
+ self.num_kv_heads = num_kv_heads
208
+ self.num_kv_groups = num_heads // self.num_kv_heads
209
+ self.hidden_size = hidden_size
210
+ self.head_dim = self.hidden_size // self.num_heads
211
+ self.kv_dim = self.num_kv_heads * self.head_dim
212
+ self.kv_dim = self.num_kv_heads * self.head_dim
213
+ self.window_size = window_size
214
+ self.max_position_embeddings = max_position_embeddings
215
+ self.layer_idx = layer_idx
216
+
217
+ self.q_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
218
+ if use_k_shift:
219
+ self.k_proj = ShiftLinear(self.hidden_size, self.kv_dim, self.num_heads, bias=False)
220
+ else:
221
+ self.k_proj = nn.Linear(self.hidden_size, self.kv_dim, bias=False)
222
+
223
+ if use_v_shift:
224
+ self.v_proj = ShiftLinear(self.hidden_size, self.kv_dim, self.num_heads, bias=False)
225
+ else:
226
+ self.v_proj = nn.Linear(self.hidden_size, self.kv_dim, bias=False)
227
+
228
+ self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
229
+ self.use_k_shift = use_k_shift
230
+ self.use_v_shift = use_v_shift
231
+
232
+
233
+ device = next(self.parameters()).device
234
+ # Forget gate
235
+ assert fgate_type in ["full", "bias_only", "fixed", "none"]
236
+ self.fgate_type = fgate_type
237
+ self.fgate_bias_init = fgate_bias_init
238
+ if fgate_type == "full":
239
+ assert not fgate_bias_init
240
+ self.fgate_proj = nn.Linear(self.hidden_size, self.num_heads, bias=True)
241
+ elif fgate_type == "bias_only":
242
+ self.fgate_bias = nn.Parameter(torch.zeros(size=(self.num_heads,), device=device))
243
+ self.fgate_bias._no_weight_decay = True
244
+ elif fgate_type == "fixed":
245
+ assert fgate_bias_init, "You must set fgate_bias_init = True with fixed fgate"
246
+ fgate_bias = torch.zeros(size=(self.num_heads,), device=device)
247
+ self.register_buffer("fgate_bias", fgate_bias)
248
+ elif fgate_type == "none":
249
+ pass
250
+ else:
251
+ raise ValueError(f"Unknown fgate type {fgate_type}")
252
+
253
+
254
+
255
+ # Forget gate intialization for data-independent and fixed forget gates
256
+ if fgate_bias_init:
257
+ assert decay_time_min is not None and decay_time_max is not None
258
+ assert decay_time_min > 0 and decay_time_max > 0
259
+ with torch.no_grad():
260
+ log_decay_time = torch.linspace(math.log(decay_time_min), math.log(decay_time_max), steps=self.num_heads)
261
+ decay_time = torch.exp(log_decay_time)
262
+ # Such that t = -1 / log(sigmoid(b))
263
+ bias_init = -torch.log(torch.expm1(1 / decay_time))
264
+ self.fgate_bias.copy_(bias_init)
265
+ else:
266
+ assert decay_time_min is None and decay_time_max is None
267
+
268
+ if use_output_gate:
269
+ self.ogate_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
270
+ self.ogate_act = ogate_act
271
+ assert ogate_act in ["silu", "sigmoid"]
272
+ else:
273
+ self.ogate_proj = None
274
+
275
+ if use_output_norm:
276
+ self.output_norm = GroupRMSNorm(num_groups=self.num_heads, hidden_size=self.hidden_size, eps=norm_eps)
277
+ else:
278
+ self.output_norm = None
279
+
280
+
281
+ if use_rope:
282
+ self.rotary = RotaryEmbedding(self.head_dim, base=rope_base)
283
+ else:
284
+ self.rotary = None
285
+
286
+
287
+ self.qk_norm = qk_norm
288
+ self.qk_norm_share_param_across_head = qk_norm_share_param_across_head
289
+ if qk_norm:
290
+ if self.qk_norm_share_param_across_head:
291
+ # This is an incorrect implemention kept just for backward compatibility
292
+ self.q_norm = RMSNorm(self.head_dim)
293
+ self.k_norm = RMSNorm(self.head_dim)
294
+ else:
295
+ self.q_norm = GroupRMSNorm(num_groups=self.num_heads, hidden_size=self.hidden_size)
296
+ self.k_norm = GroupRMSNorm(num_groups=self.num_heads, hidden_size=self.hidden_size)
297
+
298
+ self.initializer_range = initializer_range
299
+ self.apply(self._initialize_weights)
300
+
301
+ def _initialize_weights(self, module: nn.Module):
302
+ # This will actually be overwritten by outer init.
303
+ if isinstance(module, nn.Linear):
304
+ nn.init.normal_(module.weight, mean=0.0, std=self.initializer_range)
305
+ if module.bias is not None:
306
+ nn.init.zeros_(module.bias)
307
+
308
+ def forward(
309
+ self,
310
+ hidden_states: torch.Tensor,
311
+ attention_mask: Optional[torch.LongTensor] = None,
312
+ past_key_values: Optional[Cache] = None,
313
+ output_attentions: bool = False,
314
+ use_cache: bool = False,
315
+ **kwargs,
316
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
317
+ """
318
+ We assume that during decoding attention mask is always 1. Otherwise it won't work.
319
+ """
320
+ batch_size, q_len, _ = hidden_states.size()
321
+ if use_cache:
322
+ key_shift_state = past_key_values.key_shift_cache[self.layer_idx]
323
+ value_shift_state = past_key_values.value_shift_cache[self.layer_idx]
324
+ else:
325
+ key_shift_state = value_shift_state = None
326
+
327
+ # Shift states are updated in place
328
+ q = self.q_proj(hidden_states)
329
+ if self.use_k_shift:
330
+ k = self.k_proj(hidden_states, key_shift_state)
331
+ else:
332
+ k = self.k_proj(hidden_states)
333
+ if self.use_v_shift:
334
+ v = self.v_proj(hidden_states, value_shift_state)
335
+ else:
336
+ v = self.v_proj(hidden_states)
337
+
338
+ if self.qk_norm and (not self.qk_norm_share_param_across_head):
339
+ q = self.q_norm(q).to(q.dtype)
340
+ k = self.k_norm(k).to(k.dtype)
341
+
342
+ q = rearrange(q, '... (h d) -> ... h d', h=self.num_heads)
343
+ k = rearrange(k, '... (h d) -> ... h d', h=self.num_kv_heads)
344
+ v = rearrange(v, 'b t (h d) -> b h t d', h=self.num_kv_heads)
345
+
346
+
347
+ if self.qk_norm and (self.qk_norm_share_param_across_head):
348
+ q = self.q_norm(q).to(q.dtype)
349
+ k = self.k_norm(k).to(k.dtype)
350
+
351
+
352
+ seqlen_offset, max_seqlen = 0, q.shape[1]
353
+ if past_key_values is not None:
354
+ seqlen_offset = past_key_values.get_seq_length(self.layer_idx)
355
+ max_seqlen = q.shape[1] + seqlen_offset
356
+
357
+ if attention_mask is not None:
358
+ # to deliminate the offsets of padding tokens
359
+ seqlen_offset = (seqlen_offset + attention_mask.sum(-1) - attention_mask.shape[-1])
360
+ max_seqlen = q.shape[1] + max(seqlen_offset)
361
+
362
+ if self.max_position_embeddings is not None:
363
+ max_seqlen = max(max_seqlen, self.max_position_embeddings)
364
+ if self.rotary is not None:
365
+ q, k = self.rotary(q, k, seqlen_offset, max_seqlen)
366
+
367
+ if self.fgate_type == "full":
368
+ fgate_logit = self.fgate_proj(hidden_states)
369
+ fgate_logit = rearrange(fgate_logit, "b t h -> b h t")
370
+ log_fgate = torch.nn.functional.logsigmoid(fgate_logit.float())
371
+ elif self.fgate_type == "none":
372
+ log_fgate = torch.zeros((batch_size, self.num_heads, q_len), dtype=torch.float32, device=hidden_states.device)
373
+ else:
374
+ assert self.fgate_type in ["fixed", "bias_only"]
375
+ fgate_logit = torch.broadcast_to(self.fgate_bias, (batch_size, q_len, self.num_heads))
376
+ fgate_logit = rearrange(fgate_logit, "b t h -> b h t")
377
+ log_fgate = torch.nn.functional.logsigmoid(fgate_logit.float())
378
+
379
+ k = rearrange(k, 'b t h d -> b h t d')
380
+ if past_key_values is not None:
381
+ k, v, log_fgate = past_key_values.update(k, v, log_fgate, self.layer_idx)
382
+ # k, v = rearrange(k, 'b h t d -> b t h d'), rearrange(v, 'b h t d -> b t h d')
383
+ q = rearrange(q, 'b t h d -> b h t d')
384
+
385
+ if self.num_kv_groups > 1:
386
+ assert False
387
+ k = rearrange(k.unsqueeze(-2).repeat(1, 1, 1, self.num_kv_groups, 1), 'b t h g d -> b t (h g) d')
388
+ v = rearrange(v.unsqueeze(-2).repeat(1, 1, 1, self.num_kv_groups, 1), 'b t h g d -> b t (h g) d')
389
+
390
+ # Contains at least one padding token in the sequence
391
+ if attention_mask is not None:
392
+ B, _, T = log_fgate.size()
393
+ assert attention_mask.size() == (B, T), ((B, T), attention_mask.size())
394
+ seq_start = T - attention_mask.sum(dim=-1)
395
+ o = forgetting_attention(
396
+ q, k, v,
397
+ log_fgate,
398
+ head_first=True,
399
+ seq_start=seq_start,
400
+ sm_scale=1 / math.sqrt(self.head_dim),
401
+ )
402
+ o = rearrange(o, "b h t d -> b t h d")
403
+ else:
404
+ o = forgetting_attention(
405
+ q, k, v,
406
+ log_fgate,
407
+ head_first=True,
408
+ sm_scale=1 / math.sqrt(self.head_dim),
409
+ )
410
+ o = rearrange(o, "b h t d -> b t h d")
411
+
412
+ o = o.reshape(batch_size, q_len, self.hidden_size)
413
+
414
+ if self.output_norm is not None:
415
+ o = self.output_norm(o)
416
+
417
+ if self.ogate_proj is not None:
418
+ # ogate = self.ogate act(self.ogate_proj(hidden_states))
419
+ # o = o * ogate
420
+ # ogate = act_gate(self.ogate_proj(hidden_states), o)
421
+ ogate_logit = self.ogate_proj(hidden_states)
422
+ dtype = ogate_logit.dtype
423
+ if self.ogate_act == "silu":
424
+ o = swiglu_linear(ogate_logit, o, self.o_proj.weight.to(dtype), self.o_proj.bias.to(dtype) if self.o_proj.bias is not None else self.o_proj.bias)
425
+ elif self.ogate_act == "sigmoid":
426
+ o = glu_linear(ogate_logit, o, self.o_proj.weight.to(dtype), self.o_proj.bias.to(dtype) if self.o_proj.bias is not None else self.o_proj.bias)
427
+ else:
428
+ raise ValueError(f"Unknown ogate act {self.ogate_act}")
429
+ else:
430
+ o = self.o_proj(o)
431
+
432
+ if not output_attentions:
433
+ attentions = None
434
+ else:
435
+ SAVE_HEADS = [0, 1, 2, 3]
436
+ # (B, H, T, T)
437
+ score = q[:, SAVE_HEADS] @ k[:, SAVE_HEADS].mT
438
+ log_lambda = torch.cumsum(log_fgate, dim=-1)
439
+ decay_bias = (log_lambda[:, SAVE_HEADS, :, None] - log_lambda[:, SAVE_HEADS, None, :]).to(torch.bfloat16)
440
+ # normalized_score = torch.softmax(score, dim=-1)
441
+ attentions = (score, decay_bias)
442
+
443
+ return o, attentions, past_key_values
444
+
445
+ def init_shift_state(self, batch_size: int):
446
+ param = next(self.parameters())
447
+ state = dict()
448
+ try:
449
+ dtype = torch.get_autocast_dtype("cuda") if torch.is_autocast_enabled("cuda") else torch.float32
450
+ except TypeError:
451
+ # Support legacy torch version
452
+ dtype = torch.get_autocast_gpu_dtype() if torch.is_autocast_enabled() else torch.float32
453
+ if self.use_k_shift:
454
+ state['key_shift'] = param.new_zeros(batch_size, self.kv_dim, dtype=dtype)
455
+ else:
456
+ state['key_shift'] = None
457
+ if self.use_v_shift:
458
+ state['value_shift'] = param.new_zeros(batch_size, self.kv_dim, dtype=dtype)
459
+ else:
460
+ state['value_shift'] = None
461
+ return state
462
+
463
+
464
+ class ForgettingTransformerMLP(nn.Module):
465
+
466
+ def __init__(
467
+ self,
468
+ hidden_size: int,
469
+ hidden_ratio: Optional[float] = None,
470
+ intermediate_size: Optional[int] = None,
471
+ hidden_act: str = 'swish'
472
+ ) -> ForgettingTransformerMLP:
473
+ super().__init__()
474
+
475
+ self.hidden_size = hidden_size
476
+ # the final number of params is `hidden_ratio * hidden_size^2`
477
+ # `intermediate_size` is chosen to be a multiple of 256 closest to `2/3 * hidden_size * hidden_ratio`
478
+ if hidden_ratio is None:
479
+ hidden_ratio = 4
480
+ if intermediate_size is None:
481
+ intermediate_size = int(hidden_size * hidden_ratio * 2 / 3)
482
+ intermediate_size = 256 * ((intermediate_size + 256 - 1) // 256)
483
+ self.hidden_ratio = hidden_ratio
484
+ self.intermediate_size = intermediate_size
485
+
486
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=False)
487
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
488
+ self.act_fn = ACT2FN[hidden_act]
489
+ self.hidden_act = hidden_act
490
+ assert hidden_act in ["swish", "sigmoid"]
491
+
492
+ def forward(self, x):
493
+ y = self.gate_proj(x)
494
+ gate, y = y.chunk(2, -1)
495
+ # TODO: maybe wrap swiglu_linear in custom_fwd/custom_bwd
496
+ if self.hidden_act == "swish":
497
+ return swiglu_linear(
498
+ gate, y,
499
+ self.down_proj.weight.to(y.dtype),
500
+ self.down_proj.bias.to(y.dtype) if self.down_proj.bias is not None else self.down_proj.bias
501
+ )
502
+ elif self.hidden_act == "sigmoid":
503
+ return glu_linear(
504
+ gate, y,
505
+ self.down_proj.weight.to(y.dtype),
506
+ self.down_proj.bias.to(y.dtype) if self.down_proj.bias is not None else self.down_proj.bias
507
+ )
508
+ else:
509
+ raise ValueError()
510
+
511
+
512
+ class ForgettingTransformerBlock(nn.Module):
513
+ def __init__(self, config: ForgettingTransformerConfig, layer_idx: int):
514
+ super().__init__()
515
+ self.hidden_size = config.hidden_size
516
+
517
+ self.attn_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.norm_eps)
518
+ self.attn = ForgettingAttentionLayer(
519
+ hidden_size=config.hidden_size,
520
+ num_heads=config.num_heads,
521
+ num_kv_heads=config.num_kv_heads,
522
+ window_size=config.window_size,
523
+ max_position_embeddings=config.max_position_embeddings,
524
+ rope_base=config.rope_base,
525
+ use_rope=config.use_rope,
526
+ use_output_gate=config.use_output_gate,
527
+ ogate_act=config.ogate_act,
528
+ fgate_type=config.fgate_type,
529
+ fgate_bias_init=config.fgate_bias_init,
530
+ decay_time_min=config.decay_time_min,
531
+ decay_time_max=config.decay_time_max,
532
+ use_output_norm = config.use_output_norm,
533
+ norm_eps=config.norm_eps,
534
+ qk_norm=config.qk_norm,
535
+ qk_norm_share_param_across_head=config.qk_norm_share_param_across_head,
536
+ use_k_shift=config.use_k_shift,
537
+ use_v_shift=config.use_v_shift,
538
+ initializer_range=config.initializer_range,
539
+ layer_idx=layer_idx
540
+ )
541
+ self.mlp_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.norm_eps)
542
+ self.mlp = ForgettingTransformerMLP(
543
+ hidden_size=config.hidden_size,
544
+ hidden_ratio=config.hidden_ratio,
545
+ intermediate_size=config.intermediate_size,
546
+ hidden_act=config.hidden_act
547
+ )
548
+
549
+ def forward_attn(
550
+ self,
551
+ hidden_states: torch.Tensor,
552
+ attention_mask: Optional[torch.Tensor] = None,
553
+ past_key_values: Optional[Tuple[torch.Tensor]] = None,
554
+ output_attentions: Optional[bool] = False,
555
+ use_cache: Optional[bool] = False,
556
+ **kwargs,
557
+ ):
558
+ # residual handled outside of this
559
+ # residual = hidden_states
560
+ hidden_states = self.attn_norm(hidden_states)
561
+ hidden_states, attentions, past_key_values = self.attn(
562
+ hidden_states=hidden_states,
563
+ attention_mask=attention_mask,
564
+ past_key_values=past_key_values,
565
+ use_cache=use_cache,
566
+ output_attentions=output_attentions
567
+ )
568
+ return hidden_states, attentions, past_key_values
569
+
570
+ def forward_mlp(
571
+ self,
572
+ hidden_states: torch.Tensor,
573
+ residual: torch.Tensor,
574
+ ):
575
+ hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
576
+ hidden_states = self.mlp(hidden_states)
577
+ hidden_states = residual + hidden_states
578
+
579
+ return hidden_states
580
+
581
+ def forward(
582
+ self,
583
+ hidden_states: torch.Tensor,
584
+ attention_mask: Optional[torch.Tensor] = None,
585
+ past_key_values: Optional[Tuple[torch.Tensor]] = None,
586
+ output_attentions: Optional[bool] = False,
587
+ use_cache: Optional[bool] = False,
588
+ gradient_checkpointing: bool = False
589
+ # **kwargs,
590
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
591
+
592
+ residual = hidden_states
593
+
594
+
595
+ if gradient_checkpointing:
596
+ forward_attn = partial(torch.utils.checkpoint.checkpoint, self.forward_attn, use_reentrant=False)
597
+ forward_mlp = partial(torch.utils.checkpoint.checkpoint, self.forward_mlp, use_reentrant=False)
598
+ else:
599
+ forward_attn = self.forward_attn
600
+ forward_mlp = self.forward_mlp
601
+
602
+ hidden_states, attentions, past_key_values = forward_attn(
603
+ hidden_states=hidden_states,
604
+ attention_mask=attention_mask,
605
+ past_key_values=past_key_values,
606
+ use_cache=use_cache,
607
+ output_attentions=output_attentions
608
+ )
609
+
610
+ hidden_states = forward_mlp(
611
+ hidden_states,
612
+ residual,
613
+ )
614
+
615
+ outputs = (hidden_states,)
616
+
617
+ if output_attentions:
618
+ outputs += (attentions,)
619
+
620
+ if use_cache:
621
+ outputs += (past_key_values,)
622
+
623
+ return outputs
624
+
625
+
626
+
627
+ class ForgettingTransformerPreTrainedModel(PreTrainedModel):
628
+
629
+ config_class = ForgettingTransformerConfig
630
+ supports_gradient_checkpointing = True
631
+ _no_split_modules = ['ForgettingTransformerBlock']
632
+
633
+ def __init__(self, *inputs, **kwargs):
634
+ super().__init__(*inputs, **kwargs)
635
+
636
+ def _init_weights(
637
+ self,
638
+ module: nn.Module,
639
+ ):
640
+ # if isinstance(module, (nn.Linear, nn.Conv1d)):
641
+ if isinstance(module, (nn.Linear)):
642
+ # Slightly different from the TF version which uses truncated_normal for initialization
643
+ # cf https://github.com/pytorch/pytorch/pull/5617
644
+ nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
645
+ if module.bias is not None:
646
+ nn.init.zeros_(module.bias)
647
+ elif isinstance(module, nn.Embedding):
648
+ nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
649
+ if module.padding_idx is not None:
650
+ module.weight.data[module.padding_idx].zero_()
651
+
652
+
653
+ class ForgettingTransformerModel(ForgettingTransformerPreTrainedModel):
654
+
655
+ def __init__(self, config: ForgettingTransformerConfig):
656
+ super().__init__(config)
657
+ self.padding_idx = config.pad_token_id
658
+ self.vocab_size = config.vocab_size
659
+
660
+ self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
661
+ self.layers = nn.ModuleList([ForgettingTransformerBlock(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
662
+ self.norm = RMSNorm(config.hidden_size, eps=config.norm_eps)
663
+
664
+ self.gradient_checkpointing = False
665
+
666
+ self.post_init()
667
+
668
+ def get_input_embeddings(self):
669
+ return self.embeddings
670
+
671
+ def set_input_embeddings(self, value):
672
+ self.embeddings = value
673
+
674
+ def forward(
675
+ self,
676
+ input_ids: Optional[torch.LongTensor] = None,
677
+ attention_mask: Optional[torch.Tensor] = None,
678
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
679
+ inputs_embeds: Optional[torch.FloatTensor] = None,
680
+ use_cache: Optional[bool] = None,
681
+ output_attentions: Optional[bool] = None,
682
+ output_hidden_states: Optional[bool] = None,
683
+ return_dict: Optional[bool] = None
684
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
685
+ # if output_attentions:
686
+ # warnings.warn(
687
+ # "`ForgettingTransformerModel` does not support output attention weights now, so `output_attentions` is set to `False`."
688
+ # )
689
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
690
+ output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
691
+ use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
692
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
693
+
694
+ # retrieve input_ids and inputs_embeds
695
+ if input_ids is not None and inputs_embeds is not None:
696
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
697
+ elif input_ids is None and inputs_embeds is None:
698
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
699
+
700
+ if use_cache:
701
+ # use_legacy_cache = not isinstance(past_key_values, Cache)
702
+ # if use_legacy_cache:
703
+ # past_key_values = FgateDynamicCache.from_legacy_cache(past_key_values)
704
+ if past_key_values is None:
705
+ past_key_values = FgateDynamicCache()
706
+ for layer_idx, layer in enumerate(self.layers):
707
+ shift_state = layer.attn.init_shift_state(
708
+ batch_size=input_ids.size(0),
709
+ )
710
+ past_key_values.update_shift_cache(
711
+ key_shift_state=shift_state["key_shift"],
712
+ value_shift_state=shift_state["value_shift"],
713
+ layer_idx=layer_idx
714
+ )
715
+ else:
716
+ assert isinstance(past_key_values, FgateDynamicCache)
717
+
718
+ if inputs_embeds is None:
719
+ inputs_embeds = self.embeddings(input_ids)
720
+
721
+ # embed positions
722
+ hidden_states = inputs_embeds
723
+
724
+ if self.gradient_checkpointing and self.training:
725
+ if use_cache:
726
+ logger.warning_once(
727
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
728
+ )
729
+ use_cache = False
730
+
731
+ all_hidden_states = () if output_hidden_states else None
732
+ all_attns = {} if output_attentions else None
733
+ next_decoder_cache = None
734
+
735
+ for layer_id, layer in enumerate(self.layers):
736
+ if output_hidden_states:
737
+ all_hidden_states += (hidden_states,)
738
+
739
+ layer_outputs = layer(
740
+ hidden_states,
741
+ attention_mask=attention_mask,
742
+ past_key_values=past_key_values,
743
+ output_attentions=output_attentions,
744
+ use_cache=use_cache,
745
+ gradient_checkpointing=self.gradient_checkpointing and self.training
746
+ )
747
+
748
+ hidden_states = layer_outputs[0]
749
+
750
+ if use_cache:
751
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
752
+
753
+ if output_attentions:
754
+ OUTPUT_ATTN_LAYERS = [0, 7, 15, 23]
755
+ if layer_id in OUTPUT_ATTN_LAYERS:
756
+ # all_attns += (layer_outputs[1],)
757
+ all_attns[layer_id] = layer_outputs[1]
758
+
759
+ hidden_states = self.norm(hidden_states)
760
+
761
+ # add hidden states from the last decoder layer
762
+ if output_hidden_states:
763
+ all_hidden_states += (hidden_states,)
764
+
765
+ next_cache = None
766
+ if use_cache:
767
+ # next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
768
+ next_cache = next_decoder_cache
769
+ if not return_dict:
770
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_attns] if v is not None)
771
+
772
+ return BaseModelOutputWithPast(
773
+ last_hidden_state=hidden_states,
774
+ past_key_values=next_cache,
775
+ hidden_states=all_hidden_states,
776
+ attentions=all_attns
777
+ )
778
+
779
+
780
+ class ForgettingTransformerForCausalLM(ForgettingTransformerPreTrainedModel):
781
+ _tied_weights_keys = ["lm_head.weight"]
782
+
783
+ def __init__(self, config):
784
+ super().__init__(config)
785
+ self.model = ForgettingTransformerModel(config)
786
+ self.vocab_size = config.vocab_size
787
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
788
+
789
+ # Initialize weights and apply final processing
790
+ self.post_init()
791
+
792
+ def get_input_embeddings(self):
793
+ return self.model.embeddings
794
+
795
+ def set_input_embeddings(self, value):
796
+ self.model.embeddings = value
797
+
798
+ def get_output_embeddings(self):
799
+ return self.lm_head
800
+
801
+ def set_output_embeddings(self, new_embeddings):
802
+ self.lm_head = new_embeddings
803
+
804
+ def set_decoder(self, decoder):
805
+ self.model = decoder
806
+
807
+ def get_decoder(self):
808
+ return self.model
809
+
810
+ def prepare_inputs_for_generation(
811
+ self,
812
+ input_ids: torch.LongTensor = None,
813
+ past_key_values: Optional[torch.Tensor] = None,
814
+ attention_mask: Optional[torch.Tensor] = None,
815
+ inputs_embeds: Optional[torch.Tensor] = None,
816
+ **kwargs
817
+ ):
818
+ # only last token for `inputs_ids` if the `past_key_values` is passed along.
819
+ if past_key_values is not None:
820
+ input_ids = input_ids[:, -1:]
821
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
822
+ if inputs_embeds is not None and past_key_values is None:
823
+ model_inputs = {'inputs_embeds': inputs_embeds}
824
+ else:
825
+ # The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
826
+ # recompiles graphs as the stride of the inputs is a guard.
827
+ # Ref: https://github.com/huggingface/transformers/pull/29114
828
+ # TODO: use `next_tokens` directly instead.
829
+ model_inputs = {'input_ids': input_ids.contiguous()}
830
+
831
+ model_inputs.update({
832
+ 'past_key_values': past_key_values,
833
+ 'use_cache': kwargs.get('use_cache'),
834
+ 'attention_mask': attention_mask,
835
+ })
836
+ return model_inputs
837
+
838
+ def forward(
839
+ self,
840
+ input_ids: torch.LongTensor = None,
841
+ attention_mask: Optional[torch.Tensor] = None,
842
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
843
+ inputs_embeds: Optional[torch.FloatTensor] = None,
844
+ labels: Optional[torch.LongTensor] = None,
845
+ use_cache: Optional[bool] = None,
846
+ output_attentions: Optional[bool] = None,
847
+ output_hidden_states: Optional[bool] = None,
848
+ return_dict: Optional[bool] = None,
849
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
850
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
851
+ output_hidden_states = (
852
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
853
+ )
854
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
855
+
856
+ outputs = self.model(
857
+ input_ids=input_ids,
858
+ attention_mask=attention_mask,
859
+ past_key_values=past_key_values,
860
+ inputs_embeds=inputs_embeds,
861
+ use_cache=use_cache,
862
+ output_attentions=output_attentions,
863
+ output_hidden_states=output_hidden_states,
864
+ return_dict=return_dict
865
+ )
866
+
867
+ hidden_states = outputs[0]
868
+
869
+ loss = None
870
+ if labels is not None:
871
+ if self.config.fuse_cross_entropy:
872
+ loss_fct = FusedCrossEntropyLoss(inplace_backward=True, reduction='none')
873
+ else:
874
+ loss_fct = nn.CrossEntropyLoss(reduction='none')
875
+ logits = self.lm_head(hidden_states)
876
+ # Enable model parallelism
877
+ labels = labels.to(logits.device)
878
+ # labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], loss_fct.ignore_index)), 1)
879
+ loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
880
+ loss = loss.view(*labels.size())
881
+ del logits
882
+ logits = None
883
+ else:
884
+ logits = self.lm_head(hidden_states)
885
+
886
+ if not return_dict:
887
+ raise NotImplementedError
888
+ output = (logits,) + outputs[1:]
889
+ return (loss,) + output if loss is not None else output
890
+
891
+ return CausalLMOutputWithPast(
892
+ loss=loss,
893
+ logits=logits,
894
+ past_key_values=outputs.past_key_values,
895
+ hidden_states=outputs.hidden_states,
896
+ attentions=outputs.attentions,
897
+ )
__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # for HF remote code
__pycache__/__init__.cpython-310.pyc ADDED
Binary file (612 Bytes). View file
 
__pycache__/configuration_forgetting_transformer.cpython-310.pyc ADDED
Binary file (2.59 kB). View file
 
__pycache__/fgate_cache.cpython-310.pyc ADDED
Binary file (9.16 kB). View file
 
__pycache__/glu_linear.cpython-310.pyc ADDED
Binary file (2.35 kB). View file
 
__pycache__/modeling_forgetting_transformer.cpython-310.pyc ADDED
Binary file (23.7 kB). View file
 
__pycache__/token_shift.cpython-310.pyc ADDED
Binary file (6.37 kB). View file
 
configuration_forgetting_gate.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ from typing import Optional
4
+
5
+ from transformers.configuration_utils import PretrainedConfig
6
+
7
+
8
+ class ForgettingTransformerConfig(PretrainedConfig):
9
+
10
+ model_type = 'forgetting_transformer-project_fox'
11
+ keys_to_ignore_at_inference = ['past_key_values']
12
+
13
+ def __init__(
14
+ self,
15
+ vocab_size: int = 32000,
16
+ hidden_size: int = 2048,
17
+ hidden_ratio: Optional[float] = 4,
18
+ intermediate_size: Optional[int] = None,
19
+ num_hidden_layers: int = 24,
20
+ num_heads: int = 32,
21
+ num_kv_heads: int = None,
22
+ hidden_act: str = "swish",
23
+ window_size: Optional[int] = None,
24
+ max_position_embeddings: int = 2048,
25
+ initializer_range: float = 0.02,
26
+ elementwise_affine: Optional[bool] = True,
27
+ norm_eps: float = 1e-6,
28
+ use_cache: bool = True,
29
+ pad_token_id: int = None,
30
+ bos_token_id: int = 1,
31
+ eos_token_id: int = 2,
32
+ tie_word_embeddings: bool = False,
33
+ attention_bias: bool = False,
34
+ fuse_norm: bool = True,
35
+ fuse_cross_entropy: bool = True,
36
+ rope_base: float = 500000.0,
37
+ use_rope: bool = False,
38
+ use_output_gate: bool = False,
39
+ ogate_act: str = "sigmoid",
40
+ fgate_type: str = "full",
41
+ fgate_bias_init: bool = False,
42
+ decay_time_min: Optional[float] = None,
43
+ decay_time_max: Optional[float] = None,
44
+ use_output_norm: bool = False,
45
+ qk_norm: bool = False,
46
+ qk_norm_share_param_across_head: bool = False,
47
+ use_k_shift: bool = False,
48
+ use_v_shift: bool = False,
49
+ **kwargs,
50
+ ):
51
+ self.vocab_size = vocab_size
52
+ self.hidden_size = hidden_size
53
+ self.hidden_ratio = hidden_ratio
54
+ self.intermediate_size = intermediate_size
55
+ self.num_hidden_layers = num_hidden_layers
56
+ self.num_heads = num_heads
57
+ self.num_kv_heads = num_kv_heads
58
+ self.window_size = window_size
59
+ self.max_position_embeddings = max_position_embeddings
60
+
61
+ self.hidden_act = hidden_act
62
+ self.initializer_range = initializer_range
63
+ self.elementwise_affine = elementwise_affine
64
+ self.norm_eps = norm_eps
65
+ self.use_cache = use_cache
66
+ self.attention_bias = attention_bias
67
+ self.fuse_cross_entropy = fuse_cross_entropy
68
+ self.fuse_norm = fuse_norm
69
+ self.rope_base = rope_base
70
+ self.use_rope = use_rope
71
+ self.use_output_gate = use_output_gate
72
+ self.ogate_act = ogate_act
73
+ self.fgate_type = fgate_type
74
+ self.fgate_bias_init = fgate_bias_init
75
+ self.decay_time_min = decay_time_min
76
+ self.decay_time_max = decay_time_max
77
+ self.use_output_norm = use_output_norm
78
+ self.qk_norm = qk_norm
79
+ self.qk_norm_share_param_across_head = qk_norm_share_param_across_head
80
+ self.use_k_shift = use_k_shift
81
+ self.use_v_shift = use_v_shift
82
+
83
+ super().__init__(
84
+ pad_token_id=pad_token_id,
85
+ bos_token_id=bos_token_id,
86
+ eos_token_id=eos_token_id,
87
+ tie_word_embeddings=tie_word_embeddings,
88
+ **kwargs,
89
+ )
configuration_forgetting_transformer.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ from typing import Optional
4
+
5
+ from transformers.configuration_utils import PretrainedConfig
6
+
7
+
8
+ class ForgettingTransformerConfig(PretrainedConfig):
9
+
10
+ model_type = 'forgetting_transformer-project_fox'
11
+ keys_to_ignore_at_inference = ['past_key_values']
12
+
13
+ def __init__(
14
+ self,
15
+ vocab_size: int = 32000,
16
+ hidden_size: int = 2048,
17
+ hidden_ratio: Optional[float] = 4,
18
+ intermediate_size: Optional[int] = None,
19
+ num_hidden_layers: int = 24,
20
+ num_heads: int = 32,
21
+ num_kv_heads: int = None,
22
+ hidden_act: str = "swish",
23
+ window_size: Optional[int] = None,
24
+ max_position_embeddings: int = 2048,
25
+ initializer_range: float = 0.02,
26
+ elementwise_affine: Optional[bool] = True,
27
+ norm_eps: float = 1e-6,
28
+ use_cache: bool = True,
29
+ pad_token_id: int = None,
30
+ bos_token_id: int = 1,
31
+ eos_token_id: int = 2,
32
+ tie_word_embeddings: bool = False,
33
+ attention_bias: bool = False,
34
+ fuse_norm: bool = True,
35
+ fuse_cross_entropy: bool = True,
36
+ rope_base: float = 500000.0,
37
+ use_rope: bool = False,
38
+ use_output_gate: bool = False,
39
+ ogate_act: str = "sigmoid",
40
+ fgate_type: str = "full",
41
+ fgate_bias_init: bool = False,
42
+ decay_time_min: Optional[float] = None,
43
+ decay_time_max: Optional[float] = None,
44
+ use_output_norm: bool = False,
45
+ qk_norm: bool = False,
46
+ qk_norm_share_param_across_head: bool = False,
47
+ use_k_shift: bool = False,
48
+ use_v_shift: bool = False,
49
+ **kwargs,
50
+ ):
51
+ self.vocab_size = vocab_size
52
+ self.hidden_size = hidden_size
53
+ self.hidden_ratio = hidden_ratio
54
+ self.intermediate_size = intermediate_size
55
+ self.num_hidden_layers = num_hidden_layers
56
+ self.num_heads = num_heads
57
+ self.num_kv_heads = num_kv_heads
58
+ self.window_size = window_size
59
+ self.max_position_embeddings = max_position_embeddings
60
+
61
+ self.hidden_act = hidden_act
62
+ self.initializer_range = initializer_range
63
+ self.elementwise_affine = elementwise_affine
64
+ self.norm_eps = norm_eps
65
+ self.use_cache = use_cache
66
+ self.attention_bias = attention_bias
67
+ self.fuse_cross_entropy = fuse_cross_entropy
68
+ self.fuse_norm = fuse_norm
69
+ self.rope_base = rope_base
70
+ self.use_rope = use_rope
71
+ self.use_output_gate = use_output_gate
72
+ self.ogate_act = ogate_act
73
+ self.fgate_type = fgate_type
74
+ self.fgate_bias_init = fgate_bias_init
75
+ self.decay_time_min = decay_time_min
76
+ self.decay_time_max = decay_time_max
77
+ self.use_output_norm = use_output_norm
78
+ self.qk_norm = qk_norm
79
+ self.qk_norm_share_param_across_head = qk_norm_share_param_across_head
80
+ self.use_k_shift = use_k_shift
81
+ self.use_v_shift = use_v_shift
82
+
83
+ super().__init__(
84
+ pad_token_id=pad_token_id,
85
+ bos_token_id=bos_token_id,
86
+ eos_token_id=eos_token_id,
87
+ tie_word_embeddings=tie_word_embeddings,
88
+ **kwargs,
89
+ )
fgate_cache.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Tuple, Optional, Any, Dict
2
+ import torch
3
+ from transformers.cache_utils import Cache
4
+
5
+ class FgateDynamicCache(Cache):
6
+ """
7
+ A cache that grows dynamically as more tokens are generated. This is the default for generative models.
8
+
9
+ It stores the Key and Value states as a list of tensors, one for each layer. The expected shape for each tensor is
10
+ `[batch_size, num_heads, seq_len, head_dim]`.
11
+
12
+ Example:
13
+
14
+ ```python
15
+ >>> from transformers import AutoTokenizer, AutoModelForCausalLM, DynamicCache
16
+
17
+ >>> model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-0.5B-Instruct")
18
+ >>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B-Instruct")
19
+
20
+ >>> inputs = tokenizer(text="My name is Qwen2", return_tensors="pt")
21
+
22
+ >>> # Prepare a cache class and pass it to model's forward
23
+ >>> past_key_values = DynamicCache()
24
+ >>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True)
25
+ >>> outputs.past_key_values # access cache filled with key/values from generation
26
+ DynamicCache()
27
+ ```
28
+ """
29
+
30
+ def __init__(self) -> None:
31
+ super().__init__()
32
+ self.key_cache: List[torch.Tensor] = []
33
+ self.value_cache: List[torch.Tensor] = []
34
+ self.log_fgate_cache: List[torch.Tensor] = []
35
+
36
+ self.key_shift_cache: List[torch.Tensor] = []
37
+ self.value_shift_cache: List[torch.Tensor] = []
38
+
39
+ self._seen_tokens = 0 # Used in `generate` to keep tally of how many tokens the cache has seen
40
+
41
+ def update_shift_cache(
42
+ self,
43
+ key_shift_state: torch.Tensor,
44
+ value_shift_state: torch.Tensor,
45
+ layer_idx,
46
+ ):
47
+ assert layer_idx == len(self.key_shift_cache) == len(self.value_shift_cache)
48
+ self.key_shift_cache.append(key_shift_state)
49
+ self.value_shift_cache.append(value_shift_state)
50
+
51
+
52
+ def __getitem__(self, layer_idx: int) -> List[Tuple[torch.Tensor]]:
53
+ """
54
+ Support for backwards-compatible `past_key_value` indexing, e.g. `past_key_value[0][0].shape[2]` to get the
55
+ sequence length.
56
+ """
57
+ if layer_idx < len(self):
58
+ return (self.key_cache[layer_idx], self.value_cache[layer_idx], self.log_fgate_cache[layer_idx])
59
+ else:
60
+ raise KeyError(f"Cache only has {len(self)} layers, attempted to access layer with index {layer_idx}")
61
+
62
+ def __iter__(self):
63
+ """
64
+ Support for backwards-compatible `past_key_value` iteration, e.g. `for x in past_key_value:` to iterate over
65
+ keys and values
66
+ """
67
+ for layer_idx in range(len(self)):
68
+ yield (self.key_cache[layer_idx], self.value_cache[layer_idx], self.log_fgate_cache[layer_idx])
69
+
70
+ def __len__(self):
71
+ """
72
+ Support for backwards-compatible `past_key_value` length, e.g. `len(past_key_value)`. This value corresponds
73
+ to the number of layers in the model.
74
+ """
75
+ return len(self.key_cache)
76
+
77
+ def update(
78
+ self,
79
+ key_states: torch.Tensor,
80
+ value_states: torch.Tensor,
81
+ log_fgate_states: torch.Tensor,
82
+ layer_idx: int,
83
+ cache_kwargs: Optional[Dict[str, Any]] = None,
84
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
85
+ """
86
+ Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.
87
+
88
+ Parameters:
89
+ key_states (`torch.Tensor`):
90
+ The new key states to cache.
91
+ value_states (`torch.Tensor`):
92
+ The new value states to cache.
93
+ layer_idx (`int`):
94
+ The index of the layer to cache the states for.
95
+ cache_kwargs (`Dict[str, Any]`, `optional`):
96
+ Additional arguments for the cache subclass. No additional arguments are used in `DynamicCache`.
97
+
98
+ Return:
99
+ A tuple containing the updated key and value states.
100
+ """
101
+ assert log_fgate_states.ndim == 3, f"log_fgate must be (B, H, T), but get {log_fgate_states.size()}"
102
+ # Update the number of seen tokens
103
+ if layer_idx == 0:
104
+ self._seen_tokens += key_states.shape[-2]
105
+
106
+ # Update the cache
107
+ if len(self.key_cache) <= layer_idx:
108
+ self.key_cache.append(key_states)
109
+ self.value_cache.append(value_states)
110
+ self.log_fgate_cache.append(log_fgate_states)
111
+ else:
112
+ self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=-2)
113
+ self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=-2)
114
+ self.log_fgate_cache[layer_idx] = torch.cat([self.log_fgate_cache[layer_idx], log_fgate_states], dim=-1)
115
+
116
+ return self.key_cache[layer_idx], self.value_cache[layer_idx], self.log_fgate_cache[layer_idx]
117
+
118
+ def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
119
+ """Returns the sequence length of the cached states. A layer index can be optionally passed."""
120
+ # TODO: deprecate this function in favor of `cache_position`
121
+ if len(self.key_cache) <= layer_idx:
122
+ return 0
123
+ return self.key_cache[layer_idx].shape[-2]
124
+
125
+ def get_max_length(self) -> Optional[int]:
126
+ """Returns the maximum sequence length of the cached states. DynamicCache does not have a maximum length."""
127
+ return None
128
+
129
+ def to_legacy_cache(self) -> Tuple[Tuple[torch.Tensor], Tuple[torch.Tensor]]:
130
+ """Converts the `DynamicCache` instance into the its equivalent in the legacy cache format. Used for
131
+ backward compatibility."""
132
+ legacy_cache = ()
133
+ for layer_idx in range(len(self)):
134
+ legacy_cache += ((self.key_cache[layer_idx], self.value_cache[layer_idx], self.log_fgate_cache[layer_idx]),)
135
+ return legacy_cache
136
+
137
+ @classmethod
138
+ def from_legacy_cache(cls, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, num_layers: Optional[int] = None) -> "DynamicCache":
139
+ """Converts a cache in the legacy cache format into an equivalent `DynamicCache`. Used for
140
+ backward compatibility."""
141
+ raise NotImplementedError
142
+ assert num_layers is not None
143
+ cache = cls(num_layers)
144
+ if past_key_values is not None:
145
+ for layer_idx in range(len(past_key_values)):
146
+ key_states, value_states, log_fgate_states = past_key_values[layer_idx]
147
+ cache.update(key_states, value_states, log_fgate_states, layer_idx)
148
+ return cache
149
+
150
+ def crop(self, max_length: int):
151
+ """Crop the past key values up to a new `max_length` in terms of tokens. `max_length` can also be
152
+ negative to remove `max_length` tokens. This is used in assisted decoding and contrastive search."""
153
+ # In case it is negative
154
+ if max_length < 0:
155
+ max_length = self.get_seq_length() - abs(max_length)
156
+
157
+ if self.get_seq_length() <= max_length:
158
+ return
159
+
160
+ self._seen_tokens = max_length
161
+ for idx in range(len(self.key_cache)):
162
+ self.key_cache[idx] = self.key_cache[idx][..., :max_length, :]
163
+ self.value_cache[idx] = self.value_cache[idx][..., :max_length, :]
164
+ self.log_fgate_cache[idx] = self.log_fgate_cache[idx][..., :max_length]
165
+
166
+ def batch_split(self, full_batch_size: int, split_size: int) -> List["DynamicCache"]:
167
+ """Split the current instance into a list of `DynamicCache` by the batch size. This will be used by
168
+ `_split_model_inputs()` in `generation.utils`"""
169
+ out = []
170
+ for i in range(0, full_batch_size, split_size):
171
+ current_split = DynamicCache()
172
+ current_split._seen_tokens = self._seen_tokens
173
+ current_split.key_cache = [tensor[i : i + split_size] for tensor in self.key_cache]
174
+ current_split.value_cache = [tensor[i : i + split_size] for tensor in self.value_cache]
175
+ current_split.log_fgate_cache = [tensor[i : i + split_size] for tensor in self.log_fgate_cache]
176
+ out.append(current_split)
177
+ return out
178
+
179
+ @classmethod
180
+ def from_batch_splits(cls, splits: List["DynamicCache"]) -> "DynamicCache":
181
+ """This is the opposite of the above `batch_split()` method. This will be used by `stack_model_outputs` in
182
+ `generation.utils`"""
183
+ cache = cls()
184
+ for idx in range(len(splits[0])):
185
+ layer_keys = torch.cat([current.key_cache[idx] for current in splits], dim=0)
186
+ layer_values = torch.cat([current.value_cache[idx] for current in splits], dim=0)
187
+ layer_log_fgates = torch.cat([current.log_fgate_cache[idx] for current in splits], dim=0)
188
+ cache.update(layer_keys, layer_values, layer_log_fgates, idx)
189
+ return cache
190
+
191
+ def batch_repeat_interleave(self, repeats: int):
192
+ """Repeat the cache `repeats` times in the batch dimension. Used in contrastive search."""
193
+ for layer_idx in range(len(self)):
194
+ self.key_cache[layer_idx] = self.key_cache[layer_idx].repeat_interleave(repeats, dim=0)
195
+ self.value_cache[layer_idx] = self.value_cache[layer_idx].repeat_interleave(repeats, dim=0)
196
+ self.log_fgate_cache[layer_idx] = self.log_fgate_cache[layer_idx].repeat_interleave(repeats, dim=0)
197
+
198
+ def batch_select_indices(self, indices: torch.Tensor):
199
+ """Only keep the `indices` in the batch dimension of the cache. Used in contrastive search."""
200
+ for layer_idx in range(len(self)):
201
+ self.key_cache[layer_idx] = self.key_cache[layer_idx][indices, ...]
202
+ self.value_cache[layer_idx] = self.value_cache[layer_idx][indices, ...]
203
+ self.log_fgate_cache[layer_idx] = self.log_fgate_cache[layer_idx][indices, ...]
glu_linear.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+
4
+
5
+ glu_fwd_codestring = """
6
+ template <typename T> T glu_fwd(T x, T y) {
7
+ return float(y) / (1.0f + ::exp(-float(x)));
8
+ }
9
+ """
10
+ glu_bwd_codestring = """
11
+ template <typename T> T glu_bwd(T x, T y, T g, T& dx, T& dy) {
12
+ float x_sigmoid = 1.0f / (1.0f + ::exp(-float(x)));
13
+ dx = x_sigmoid * (1.0f - x_sigmoid) * float(g) * float(y);
14
+ dy = x_sigmoid * float(g);
15
+ }
16
+ """
17
+
18
+ glu_bwd_with_output_codestring = """
19
+ template <typename T> T glu_bwd_with_output(T x, T y, T g, T& dx, T& dy, T& z) {
20
+ float x_sigmoid = 1.0f / (1.0f + ::exp(-float(x)));
21
+ dx = x_sigmoid * (1.0f - x_sigmoid) * float(g) * float(y);
22
+ dy = x_sigmoid * float(g);
23
+ z = x_sigmoid * float(y);
24
+ }
25
+ """
26
+
27
+ glu_fwd = torch.cuda.jiterator._create_jit_fn(glu_fwd_codestring)
28
+ glu_bwd = torch.cuda.jiterator._create_multi_output_jit_fn(glu_bwd_codestring, num_outputs=2)
29
+ glu_bwd_with_output = torch.cuda.jiterator._create_multi_output_jit_fn(glu_bwd_with_output_codestring, num_outputs=3)
30
+
31
+
32
+ class GLULinearFunction(torch.autograd.Function):
33
+ r"""
34
+ Gated Linear Unit (GLU) function followed by a linear transformation.
35
+
36
+ .. math::
37
+ \text{GLULinear}(x, y, W, b) = (sh(x) * y) W + b
38
+
39
+ This simple wrap discards the intermediate results of GLU(x, y) to save memory.
40
+ """
41
+
42
+ @staticmethod
43
+ def forward(ctx, x, y, weight, bias):
44
+ z = glu_fwd(x, y)
45
+ out = F.linear(z.to(weight.dtype), weight, bias)
46
+ # We don't store z, will be recomputed in the backward pass to save memory
47
+ ctx.save_for_backward(x, y, weight)
48
+ ctx.linear_bias_is_none = bias is None
49
+ return out
50
+
51
+ @staticmethod
52
+ def backward(ctx, dout, *args):
53
+ x, y, weight = ctx.saved_tensors
54
+ dout = dout.reshape(-1, dout.shape[-1])
55
+ dz = F.linear(dout, weight.t()).view_as(x)
56
+ dx, dy, z = glu_bwd_with_output(x, y, dz)
57
+ dlinear_weight = torch.einsum("bo,bi->oi", dout, z.reshape(-1, z.shape[-1]))
58
+ dlinear_bias = None if ctx.linear_bias_is_none else dout.sum(0)
59
+ return dx, dy, dlinear_weight, dlinear_bias
60
+
61
+ glu_linear = GLULinearFunction.apply
modeling_forgetting_gate.py ADDED
@@ -0,0 +1,897 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ from __future__ import annotations
4
+
5
+ import math
6
+ import warnings
7
+ from typing import List, Optional, Tuple, Union
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.utils.checkpoint
12
+ from transformers.activations import ACT2FN
13
+ from transformers.cache_utils import Cache
14
+ from transformers.modeling_outputs import (BaseModelOutputWithPast,
15
+ CausalLMOutputWithPast)
16
+ from transformers.modeling_utils import PreTrainedModel
17
+ from transformers.utils import logging
18
+
19
+ # from fla.layers.attn import Attention
20
+ from fla.modules import FusedCrossEntropyLoss, RMSNorm
21
+ from fla.modules.layernorm import group_norm_fn
22
+ from fla.modules.activations import swiglu_linear
23
+
24
+ from fla.modules import RotaryEmbedding
25
+ from einops import rearrange
26
+
27
+ from .configuration_forgetting_transformer import ForgettingTransformerConfig
28
+ from forgetting_transformer.ops.forgetting_attention_std import forgetting_attention_std as forgetting_attention
29
+ from .fgate_cache import FgateDynamicCache
30
+ from .glu_linear import glu_linear
31
+ from .token_shift import token_shift
32
+
33
+ from functools import partial
34
+
35
+ logger = logging.get_logger(__name__)
36
+
37
+
38
+ class ShiftLinear(nn.Module):
39
+
40
+ def __init__(
41
+ self,
42
+ input_dim: int,
43
+ output_dim: int,
44
+ num_heads: int,
45
+ bias: bool,
46
+ shift_bias: bool = False
47
+ ):
48
+ super().__init__()
49
+
50
+ self.input_dim = input_dim
51
+ self.output_dim = output_dim
52
+ self.num_heads = num_heads
53
+ assert self.output_dim % self.num_heads == 0
54
+
55
+ self.linear = nn.Linear(input_dim, output_dim, bias=bias)
56
+ self.shift_proj = nn.Linear(input_dim, num_heads, bias=shift_bias)
57
+
58
+ def __repr__(self) -> str:
59
+ s = f"{self.__class__.__name__}({self.input_dim}, {self.output_dim})"
60
+ return s
61
+
62
+ def forward(self, x: torch.Tensor, shift_state: Optional[torch.Tensor]) -> torch.Tensor:
63
+ assert x.ndim == 3, "Input must be (B, T, D)"
64
+ B, T, D = x.size()
65
+ out = self.linear(x)
66
+ # (B, T, H, 1)
67
+ alpha = torch.sigmoid(self.shift_proj(x).float()).float()
68
+ # left, right, top, bottom (B, T=H, D=W)
69
+ # out_prev = nn.functional.pad(out, (0, 0, 1, -1))
70
+ # out_prev = torch.roll(out, shifts=1, dims=1)
71
+
72
+ out_per_head = rearrange(out, 'b t (h d) -> b t h d', h=self.num_heads)
73
+ if T > 1:
74
+ # TODO: note in this case cache is not used
75
+ result_per_head = token_shift(out_per_head, alpha, 1.0 - alpha)
76
+ else:
77
+ shift_state_per_head = rearrange(shift_state, 'b (h d) -> b 1 h d', h=self.num_heads)
78
+ result_per_head = (alpha[..., None] * shift_state_per_head + (1 - alpha[..., None]) * out_per_head)
79
+
80
+ result_per_head = result_per_head.to(out.dtype)
81
+
82
+ if shift_state is not None:
83
+ shift_state.copy_(out[:, -1, :])
84
+
85
+ result = rearrange(result_per_head, 'b t h d -> b t (h d)', h=self.num_heads)
86
+ return result
87
+
88
+ class GroupRMSNorm(nn.Module):
89
+ def __init__(
90
+ self,
91
+ num_groups: int,
92
+ hidden_size: int,
93
+ elementwise_affine: bool = True,
94
+ bias: bool = False,
95
+ eps: float = 1e-5
96
+ ) -> GroupRMSNorm:
97
+ super().__init__()
98
+
99
+ if hidden_size % num_groups != 0:
100
+ raise ValueError('num_channels must be divisible by num_groups')
101
+
102
+ self.num_groups = num_groups
103
+ self.hidden_size = hidden_size
104
+ self.elementwise_affine = elementwise_affine
105
+ self.eps = eps
106
+
107
+ self.register_parameter("weight", None)
108
+ self.register_parameter("bias", None)
109
+ if elementwise_affine:
110
+ self.weight = nn.Parameter(torch.ones(hidden_size))
111
+ if bias:
112
+ self.bias = nn.Parameter(torch.zeros(hidden_size))
113
+
114
+ def __repr__(self) -> str:
115
+ s = f"{self.__class__.__name__}({self.num_groups}, {self.hidden_size}"
116
+ if not self.elementwise_affine:
117
+ s += f", elementwise_affine={self.elementwise_affine}"
118
+ s += f", eps={self.eps}"
119
+ s += ")"
120
+ return s
121
+
122
+ def forward(self, x, residual=None, prenorm=False, residual_in_fp32=False):
123
+ return group_norm_fn(
124
+ x,
125
+ self.weight,
126
+ self.bias,
127
+ residual=residual,
128
+ eps=self.eps,
129
+ prenorm=prenorm,
130
+ residual_in_fp32=residual_in_fp32,
131
+ is_rms_norm=True,
132
+ num_groups=self.num_groups
133
+ )
134
+
135
+ class ForgettingAttentionLayer(nn.Module):
136
+
137
+ def __init__(
138
+ self,
139
+ hidden_size: int = 2048,
140
+ num_heads: int = 32,
141
+ num_kv_heads: Optional[int] = None,
142
+ window_size: Optional[int] = None,
143
+ max_position_embeddings: Optional[int] = None,
144
+ use_rope: bool = False,
145
+ rope_base: float = 500000.0,
146
+ use_output_gate: bool = False,
147
+ ogate_act: str = "sigmoid",
148
+ fgate_type: str = "full",
149
+ fgate_bias_init: bool = False,
150
+ decay_time_min: Optional[float] = None,
151
+ decay_time_max: Optional[float] = None,
152
+ use_output_norm: bool = False,
153
+ norm_eps: float = 1e-6,
154
+ qk_norm: bool = False,
155
+ qk_norm_share_param_across_head: bool = False,
156
+ use_k_shift: bool = False,
157
+ use_v_shift: bool = False,
158
+ initializer_range: float = 0.02,
159
+ layer_idx: int = None
160
+ ):
161
+ """
162
+ Forgetting Attention layer.
163
+
164
+ Arguments:
165
+ - hidden_size: Input dimension and qkv dimension
166
+ - num_heads: Number of heads
167
+ - num_kv_heads: Not used. Should be None
168
+ - window_size: Not used. Should be None
169
+ - max_position_embeddings: Not used. Should be None
170
+ - use_rope: Whether to use RoPE. Default is False
171
+ - rope_base: the theta hyperparameter in RoPE. This has no effect if
172
+ use_rope=False
173
+ - use_output_gate: Whether to use output gates. Note that using output gates
174
+ introduces extra parameters and you may want to reduce parameters from
175
+ other components (e.g., MLPs)
176
+ - ogate_act: Activation for the output gate. Either "sigmoid" or "silu"
177
+ - fgate_type: Forget gate type. The following are supported:
178
+ - "full": The default data-dependent forget gate
179
+ - "bias_only": The data-independent forget gate
180
+ - "fixed": Forget gates with fixed values
181
+ - "none": Not using forget gates. Equivalent to forget gates with all
182
+ ones.
183
+ - fgate_bias_init: Whether to use special initalization for the bias terms in
184
+ the forget gate. This should only be used with fgate types in
185
+ ["bias_only", "fixed"].
186
+ - decay_time_min: T_min for the forget gate bias initialization. See paper
187
+ for details.
188
+ - decay_time_max: T_max for the forget gate bias initalization. See paper
189
+ for details.
190
+ - use_output_norm: Whether to use output normalization.
191
+ - norm_eps: Epsilon for the RMSNorms
192
+ - qk_norm: Whether to use qk_norm
193
+ - qk_norm_share_param_across_head: In QK-norm, whether to share the RMSNorm
194
+ scaling parameters across heads. This is just for backward compatibility.
195
+ - use_k_shift: Whether to use data-dependent key shift
196
+ - use_v_shift: Whether to use data-dependent value shift
197
+ - initializer_range: standard deviation for initialization
198
+ - layer_idx: The block index of this layer. Needed for KV-cache
199
+ """
200
+ super().__init__()
201
+
202
+ self.num_heads = num_heads
203
+ if num_kv_heads is None:
204
+ self.num_kv_heads = self.num_heads
205
+ else:
206
+ raise NotImplementedError("GQA has not been tested.")
207
+ self.num_kv_heads = num_kv_heads
208
+ self.num_kv_groups = num_heads // self.num_kv_heads
209
+ self.hidden_size = hidden_size
210
+ self.head_dim = self.hidden_size // self.num_heads
211
+ self.kv_dim = self.num_kv_heads * self.head_dim
212
+ self.kv_dim = self.num_kv_heads * self.head_dim
213
+ self.window_size = window_size
214
+ self.max_position_embeddings = max_position_embeddings
215
+ self.layer_idx = layer_idx
216
+
217
+ self.q_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
218
+ if use_k_shift:
219
+ self.k_proj = ShiftLinear(self.hidden_size, self.kv_dim, self.num_heads, bias=False)
220
+ else:
221
+ self.k_proj = nn.Linear(self.hidden_size, self.kv_dim, bias=False)
222
+
223
+ if use_v_shift:
224
+ self.v_proj = ShiftLinear(self.hidden_size, self.kv_dim, self.num_heads, bias=False)
225
+ else:
226
+ self.v_proj = nn.Linear(self.hidden_size, self.kv_dim, bias=False)
227
+
228
+ self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
229
+ self.use_k_shift = use_k_shift
230
+ self.use_v_shift = use_v_shift
231
+
232
+
233
+ device = next(self.parameters()).device
234
+ # Forget gate
235
+ assert fgate_type in ["full", "bias_only", "fixed", "none"]
236
+ self.fgate_type = fgate_type
237
+ self.fgate_bias_init = fgate_bias_init
238
+ if fgate_type == "full":
239
+ assert not fgate_bias_init
240
+ self.fgate_proj = nn.Linear(self.hidden_size, self.num_heads, bias=True)
241
+ elif fgate_type == "bias_only":
242
+ self.fgate_bias = nn.Parameter(torch.zeros(size=(self.num_heads,), device=device))
243
+ self.fgate_bias._no_weight_decay = True
244
+ elif fgate_type == "fixed":
245
+ assert fgate_bias_init, "You must set fgate_bias_init = True with fixed fgate"
246
+ fgate_bias = torch.zeros(size=(self.num_heads,), device=device)
247
+ self.register_buffer("fgate_bias", fgate_bias)
248
+ elif fgate_type == "none":
249
+ pass
250
+ else:
251
+ raise ValueError(f"Unknown fgate type {fgate_type}")
252
+
253
+
254
+
255
+ # Forget gate intialization for data-independent and fixed forget gates
256
+ if fgate_bias_init:
257
+ assert decay_time_min is not None and decay_time_max is not None
258
+ assert decay_time_min > 0 and decay_time_max > 0
259
+ with torch.no_grad():
260
+ log_decay_time = torch.linspace(math.log(decay_time_min), math.log(decay_time_max), steps=self.num_heads)
261
+ decay_time = torch.exp(log_decay_time)
262
+ # Such that t = -1 / log(sigmoid(b))
263
+ bias_init = -torch.log(torch.expm1(1 / decay_time))
264
+ self.fgate_bias.copy_(bias_init)
265
+ else:
266
+ assert decay_time_min is None and decay_time_max is None
267
+
268
+ if use_output_gate:
269
+ self.ogate_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
270
+ self.ogate_act = ogate_act
271
+ assert ogate_act in ["silu", "sigmoid"]
272
+ else:
273
+ self.ogate_proj = None
274
+
275
+ if use_output_norm:
276
+ self.output_norm = GroupRMSNorm(num_groups=self.num_heads, hidden_size=self.hidden_size, eps=norm_eps)
277
+ else:
278
+ self.output_norm = None
279
+
280
+
281
+ if use_rope:
282
+ self.rotary = RotaryEmbedding(self.head_dim, base=rope_base)
283
+ else:
284
+ self.rotary = None
285
+
286
+
287
+ self.qk_norm = qk_norm
288
+ self.qk_norm_share_param_across_head = qk_norm_share_param_across_head
289
+ if qk_norm:
290
+ if self.qk_norm_share_param_across_head:
291
+ # This is an incorrect implemention kept just for backward compatibility
292
+ self.q_norm = RMSNorm(self.head_dim)
293
+ self.k_norm = RMSNorm(self.head_dim)
294
+ else:
295
+ self.q_norm = GroupRMSNorm(num_groups=self.num_heads, hidden_size=self.hidden_size)
296
+ self.k_norm = GroupRMSNorm(num_groups=self.num_heads, hidden_size=self.hidden_size)
297
+
298
+ self.initializer_range = initializer_range
299
+ self.apply(self._initialize_weights)
300
+
301
+ def _initialize_weights(self, module: nn.Module):
302
+ # This will actually be overwritten by outer init.
303
+ if isinstance(module, nn.Linear):
304
+ nn.init.normal_(module.weight, mean=0.0, std=self.initializer_range)
305
+ if module.bias is not None:
306
+ nn.init.zeros_(module.bias)
307
+
308
+ def forward(
309
+ self,
310
+ hidden_states: torch.Tensor,
311
+ attention_mask: Optional[torch.LongTensor] = None,
312
+ past_key_values: Optional[Cache] = None,
313
+ output_attentions: bool = False,
314
+ use_cache: bool = False,
315
+ **kwargs,
316
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
317
+ """
318
+ We assume that during decoding attention mask is always 1. Otherwise it won't work.
319
+ """
320
+ batch_size, q_len, _ = hidden_states.size()
321
+ if use_cache:
322
+ key_shift_state = past_key_values.key_shift_cache[self.layer_idx]
323
+ value_shift_state = past_key_values.value_shift_cache[self.layer_idx]
324
+ else:
325
+ key_shift_state = value_shift_state = None
326
+
327
+ # Shift states are updated in place
328
+ q = self.q_proj(hidden_states)
329
+ if self.use_k_shift:
330
+ k = self.k_proj(hidden_states, key_shift_state)
331
+ else:
332
+ k = self.k_proj(hidden_states)
333
+ if self.use_v_shift:
334
+ v = self.v_proj(hidden_states, value_shift_state)
335
+ else:
336
+ v = self.v_proj(hidden_states)
337
+
338
+ if self.qk_norm and (not self.qk_norm_share_param_across_head):
339
+ q = self.q_norm(q).to(q.dtype)
340
+ k = self.k_norm(k).to(k.dtype)
341
+
342
+ q = rearrange(q, '... (h d) -> ... h d', h=self.num_heads)
343
+ k = rearrange(k, '... (h d) -> ... h d', h=self.num_kv_heads)
344
+ v = rearrange(v, 'b t (h d) -> b h t d', h=self.num_kv_heads)
345
+
346
+
347
+ if self.qk_norm and (self.qk_norm_share_param_across_head):
348
+ q = self.q_norm(q).to(q.dtype)
349
+ k = self.k_norm(k).to(k.dtype)
350
+
351
+
352
+ seqlen_offset, max_seqlen = 0, q.shape[1]
353
+ if past_key_values is not None:
354
+ seqlen_offset = past_key_values.get_seq_length(self.layer_idx)
355
+ max_seqlen = q.shape[1] + seqlen_offset
356
+
357
+ if attention_mask is not None:
358
+ # to deliminate the offsets of padding tokens
359
+ seqlen_offset = (seqlen_offset + attention_mask.sum(-1) - attention_mask.shape[-1])
360
+ max_seqlen = q.shape[1] + max(seqlen_offset)
361
+
362
+ if self.max_position_embeddings is not None:
363
+ max_seqlen = max(max_seqlen, self.max_position_embeddings)
364
+ if self.rotary is not None:
365
+ q, k = self.rotary(q, k, seqlen_offset, max_seqlen)
366
+
367
+ if self.fgate_type == "full":
368
+ fgate_logit = self.fgate_proj(hidden_states)
369
+ fgate_logit = rearrange(fgate_logit, "b t h -> b h t")
370
+ log_fgate = torch.nn.functional.logsigmoid(fgate_logit.float())
371
+ elif self.fgate_type == "none":
372
+ log_fgate = torch.zeros((batch_size, self.num_heads, q_len), dtype=torch.float32, device=hidden_states.device)
373
+ else:
374
+ assert self.fgate_type in ["fixed", "bias_only"]
375
+ fgate_logit = torch.broadcast_to(self.fgate_bias, (batch_size, q_len, self.num_heads))
376
+ fgate_logit = rearrange(fgate_logit, "b t h -> b h t")
377
+ log_fgate = torch.nn.functional.logsigmoid(fgate_logit.float())
378
+
379
+ k = rearrange(k, 'b t h d -> b h t d')
380
+ if past_key_values is not None:
381
+ k, v, log_fgate = past_key_values.update(k, v, log_fgate, self.layer_idx)
382
+ # k, v = rearrange(k, 'b h t d -> b t h d'), rearrange(v, 'b h t d -> b t h d')
383
+ q = rearrange(q, 'b t h d -> b h t d')
384
+
385
+ if self.num_kv_groups > 1:
386
+ assert False
387
+ k = rearrange(k.unsqueeze(-2).repeat(1, 1, 1, self.num_kv_groups, 1), 'b t h g d -> b t (h g) d')
388
+ v = rearrange(v.unsqueeze(-2).repeat(1, 1, 1, self.num_kv_groups, 1), 'b t h g d -> b t (h g) d')
389
+
390
+ # Contains at least one padding token in the sequence
391
+ if attention_mask is not None:
392
+ B, _, T = log_fgate.size()
393
+ assert attention_mask.size() == (B, T), ((B, T), attention_mask.size())
394
+ seq_start = T - attention_mask.sum(dim=-1)
395
+ o = forgetting_attention(
396
+ q, k, v,
397
+ log_fgate,
398
+ head_first=True,
399
+ seq_start=seq_start,
400
+ sm_scale=1 / math.sqrt(self.head_dim),
401
+ )
402
+ o = rearrange(o, "b h t d -> b t h d")
403
+ else:
404
+ o = forgetting_attention(
405
+ q, k, v,
406
+ log_fgate,
407
+ head_first=True,
408
+ sm_scale=1 / math.sqrt(self.head_dim),
409
+ )
410
+ o = rearrange(o, "b h t d -> b t h d")
411
+
412
+ o = o.reshape(batch_size, q_len, self.hidden_size)
413
+
414
+ if self.output_norm is not None:
415
+ o = self.output_norm(o)
416
+
417
+ if self.ogate_proj is not None:
418
+ # ogate = self.ogate act(self.ogate_proj(hidden_states))
419
+ # o = o * ogate
420
+ # ogate = act_gate(self.ogate_proj(hidden_states), o)
421
+ ogate_logit = self.ogate_proj(hidden_states)
422
+ dtype = ogate_logit.dtype
423
+ if self.ogate_act == "silu":
424
+ o = swiglu_linear(ogate_logit, o, self.o_proj.weight.to(dtype), self.o_proj.bias.to(dtype) if self.o_proj.bias is not None else self.o_proj.bias)
425
+ elif self.ogate_act == "sigmoid":
426
+ o = glu_linear(ogate_logit, o, self.o_proj.weight.to(dtype), self.o_proj.bias.to(dtype) if self.o_proj.bias is not None else self.o_proj.bias)
427
+ else:
428
+ raise ValueError(f"Unknown ogate act {self.ogate_act}")
429
+ else:
430
+ o = self.o_proj(o)
431
+
432
+ if not output_attentions:
433
+ attentions = None
434
+ else:
435
+ SAVE_HEADS = [0, 1, 2, 3]
436
+ # (B, H, T, T)
437
+ score = q[:, SAVE_HEADS] @ k[:, SAVE_HEADS].mT
438
+ log_lambda = torch.cumsum(log_fgate, dim=-1)
439
+ decay_bias = (log_lambda[:, SAVE_HEADS, :, None] - log_lambda[:, SAVE_HEADS, None, :]).to(torch.bfloat16)
440
+ # normalized_score = torch.softmax(score, dim=-1)
441
+ attentions = (score, decay_bias)
442
+
443
+ return o, attentions, past_key_values
444
+
445
+ def init_shift_state(self, batch_size: int):
446
+ param = next(self.parameters())
447
+ state = dict()
448
+ try:
449
+ dtype = torch.get_autocast_dtype("cuda") if torch.is_autocast_enabled("cuda") else torch.float32
450
+ except TypeError:
451
+ # Support legacy torch version
452
+ dtype = torch.get_autocast_gpu_dtype() if torch.is_autocast_enabled() else torch.float32
453
+ if self.use_k_shift:
454
+ state['key_shift'] = param.new_zeros(batch_size, self.kv_dim, dtype=dtype)
455
+ else:
456
+ state['key_shift'] = None
457
+ if self.use_v_shift:
458
+ state['value_shift'] = param.new_zeros(batch_size, self.kv_dim, dtype=dtype)
459
+ else:
460
+ state['value_shift'] = None
461
+ return state
462
+
463
+
464
+ class ForgettingTransformerMLP(nn.Module):
465
+
466
+ def __init__(
467
+ self,
468
+ hidden_size: int,
469
+ hidden_ratio: Optional[float] = None,
470
+ intermediate_size: Optional[int] = None,
471
+ hidden_act: str = 'swish'
472
+ ) -> ForgettingTransformerMLP:
473
+ super().__init__()
474
+
475
+ self.hidden_size = hidden_size
476
+ # the final number of params is `hidden_ratio * hidden_size^2`
477
+ # `intermediate_size` is chosen to be a multiple of 256 closest to `2/3 * hidden_size * hidden_ratio`
478
+ if hidden_ratio is None:
479
+ hidden_ratio = 4
480
+ if intermediate_size is None:
481
+ intermediate_size = int(hidden_size * hidden_ratio * 2 / 3)
482
+ intermediate_size = 256 * ((intermediate_size + 256 - 1) // 256)
483
+ self.hidden_ratio = hidden_ratio
484
+ self.intermediate_size = intermediate_size
485
+
486
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=False)
487
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
488
+ self.act_fn = ACT2FN[hidden_act]
489
+ self.hidden_act = hidden_act
490
+ assert hidden_act in ["swish", "sigmoid"]
491
+
492
+ def forward(self, x):
493
+ y = self.gate_proj(x)
494
+ gate, y = y.chunk(2, -1)
495
+ # TODO: maybe wrap swiglu_linear in custom_fwd/custom_bwd
496
+ if self.hidden_act == "swish":
497
+ return swiglu_linear(
498
+ gate, y,
499
+ self.down_proj.weight.to(y.dtype),
500
+ self.down_proj.bias.to(y.dtype) if self.down_proj.bias is not None else self.down_proj.bias
501
+ )
502
+ elif self.hidden_act == "sigmoid":
503
+ return glu_linear(
504
+ gate, y,
505
+ self.down_proj.weight.to(y.dtype),
506
+ self.down_proj.bias.to(y.dtype) if self.down_proj.bias is not None else self.down_proj.bias
507
+ )
508
+ else:
509
+ raise ValueError()
510
+
511
+
512
+ class ForgettingTransformerBlock(nn.Module):
513
+ def __init__(self, config: ForgettingTransformerConfig, layer_idx: int):
514
+ super().__init__()
515
+ self.hidden_size = config.hidden_size
516
+
517
+ self.attn_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.norm_eps)
518
+ self.attn = ForgettingAttentionLayer(
519
+ hidden_size=config.hidden_size,
520
+ num_heads=config.num_heads,
521
+ num_kv_heads=config.num_kv_heads,
522
+ window_size=config.window_size,
523
+ max_position_embeddings=config.max_position_embeddings,
524
+ rope_base=config.rope_base,
525
+ use_rope=config.use_rope,
526
+ use_output_gate=config.use_output_gate,
527
+ ogate_act=config.ogate_act,
528
+ fgate_type=config.fgate_type,
529
+ fgate_bias_init=config.fgate_bias_init,
530
+ decay_time_min=config.decay_time_min,
531
+ decay_time_max=config.decay_time_max,
532
+ use_output_norm = config.use_output_norm,
533
+ norm_eps=config.norm_eps,
534
+ qk_norm=config.qk_norm,
535
+ qk_norm_share_param_across_head=config.qk_norm_share_param_across_head,
536
+ use_k_shift=config.use_k_shift,
537
+ use_v_shift=config.use_v_shift,
538
+ initializer_range=config.initializer_range,
539
+ layer_idx=layer_idx
540
+ )
541
+ self.mlp_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.norm_eps)
542
+ self.mlp = ForgettingTransformerMLP(
543
+ hidden_size=config.hidden_size,
544
+ hidden_ratio=config.hidden_ratio,
545
+ intermediate_size=config.intermediate_size,
546
+ hidden_act=config.hidden_act
547
+ )
548
+
549
+ def forward_attn(
550
+ self,
551
+ hidden_states: torch.Tensor,
552
+ attention_mask: Optional[torch.Tensor] = None,
553
+ past_key_values: Optional[Tuple[torch.Tensor]] = None,
554
+ output_attentions: Optional[bool] = False,
555
+ use_cache: Optional[bool] = False,
556
+ **kwargs,
557
+ ):
558
+ # residual handled outside of this
559
+ # residual = hidden_states
560
+ hidden_states = self.attn_norm(hidden_states)
561
+ hidden_states, attentions, past_key_values = self.attn(
562
+ hidden_states=hidden_states,
563
+ attention_mask=attention_mask,
564
+ past_key_values=past_key_values,
565
+ use_cache=use_cache,
566
+ output_attentions=output_attentions
567
+ )
568
+ return hidden_states, attentions, past_key_values
569
+
570
+ def forward_mlp(
571
+ self,
572
+ hidden_states: torch.Tensor,
573
+ residual: torch.Tensor,
574
+ ):
575
+ hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
576
+ hidden_states = self.mlp(hidden_states)
577
+ hidden_states = residual + hidden_states
578
+
579
+ return hidden_states
580
+
581
+ def forward(
582
+ self,
583
+ hidden_states: torch.Tensor,
584
+ attention_mask: Optional[torch.Tensor] = None,
585
+ past_key_values: Optional[Tuple[torch.Tensor]] = None,
586
+ output_attentions: Optional[bool] = False,
587
+ use_cache: Optional[bool] = False,
588
+ gradient_checkpointing: bool = False
589
+ # **kwargs,
590
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
591
+
592
+ residual = hidden_states
593
+
594
+
595
+ if gradient_checkpointing:
596
+ forward_attn = partial(torch.utils.checkpoint.checkpoint, self.forward_attn, use_reentrant=False)
597
+ forward_mlp = partial(torch.utils.checkpoint.checkpoint, self.forward_mlp, use_reentrant=False)
598
+ else:
599
+ forward_attn = self.forward_attn
600
+ forward_mlp = self.forward_mlp
601
+
602
+ hidden_states, attentions, past_key_values = forward_attn(
603
+ hidden_states=hidden_states,
604
+ attention_mask=attention_mask,
605
+ past_key_values=past_key_values,
606
+ use_cache=use_cache,
607
+ output_attentions=output_attentions
608
+ )
609
+
610
+ hidden_states = forward_mlp(
611
+ hidden_states,
612
+ residual,
613
+ )
614
+
615
+ outputs = (hidden_states,)
616
+
617
+ if output_attentions:
618
+ outputs += (attentions,)
619
+
620
+ if use_cache:
621
+ outputs += (past_key_values,)
622
+
623
+ return outputs
624
+
625
+
626
+
627
+ class ForgettingTransformerPreTrainedModel(PreTrainedModel):
628
+
629
+ config_class = ForgettingTransformerConfig
630
+ supports_gradient_checkpointing = True
631
+ _no_split_modules = ['ForgettingTransformerBlock']
632
+
633
+ def __init__(self, *inputs, **kwargs):
634
+ super().__init__(*inputs, **kwargs)
635
+
636
+ def _init_weights(
637
+ self,
638
+ module: nn.Module,
639
+ ):
640
+ # if isinstance(module, (nn.Linear, nn.Conv1d)):
641
+ if isinstance(module, (nn.Linear)):
642
+ # Slightly different from the TF version which uses truncated_normal for initialization
643
+ # cf https://github.com/pytorch/pytorch/pull/5617
644
+ nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
645
+ if module.bias is not None:
646
+ nn.init.zeros_(module.bias)
647
+ elif isinstance(module, nn.Embedding):
648
+ nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
649
+ if module.padding_idx is not None:
650
+ module.weight.data[module.padding_idx].zero_()
651
+
652
+
653
+ class ForgettingTransformerModel(ForgettingTransformerPreTrainedModel):
654
+
655
+ def __init__(self, config: ForgettingTransformerConfig):
656
+ super().__init__(config)
657
+ self.padding_idx = config.pad_token_id
658
+ self.vocab_size = config.vocab_size
659
+
660
+ self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
661
+ self.layers = nn.ModuleList([ForgettingTransformerBlock(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
662
+ self.norm = RMSNorm(config.hidden_size, eps=config.norm_eps)
663
+
664
+ self.gradient_checkpointing = False
665
+
666
+ self.post_init()
667
+
668
+ def get_input_embeddings(self):
669
+ return self.embeddings
670
+
671
+ def set_input_embeddings(self, value):
672
+ self.embeddings = value
673
+
674
+ def forward(
675
+ self,
676
+ input_ids: Optional[torch.LongTensor] = None,
677
+ attention_mask: Optional[torch.Tensor] = None,
678
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
679
+ inputs_embeds: Optional[torch.FloatTensor] = None,
680
+ use_cache: Optional[bool] = None,
681
+ output_attentions: Optional[bool] = None,
682
+ output_hidden_states: Optional[bool] = None,
683
+ return_dict: Optional[bool] = None
684
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
685
+ # if output_attentions:
686
+ # warnings.warn(
687
+ # "`ForgettingTransformerModel` does not support output attention weights now, so `output_attentions` is set to `False`."
688
+ # )
689
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
690
+ output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
691
+ use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
692
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
693
+
694
+ # retrieve input_ids and inputs_embeds
695
+ if input_ids is not None and inputs_embeds is not None:
696
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
697
+ elif input_ids is None and inputs_embeds is None:
698
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
699
+
700
+ if use_cache:
701
+ # use_legacy_cache = not isinstance(past_key_values, Cache)
702
+ # if use_legacy_cache:
703
+ # past_key_values = FgateDynamicCache.from_legacy_cache(past_key_values)
704
+ if past_key_values is None:
705
+ past_key_values = FgateDynamicCache()
706
+ for layer_idx, layer in enumerate(self.layers):
707
+ shift_state = layer.attn.init_shift_state(
708
+ batch_size=input_ids.size(0),
709
+ )
710
+ past_key_values.update_shift_cache(
711
+ key_shift_state=shift_state["key_shift"],
712
+ value_shift_state=shift_state["value_shift"],
713
+ layer_idx=layer_idx
714
+ )
715
+ else:
716
+ assert isinstance(past_key_values, FgateDynamicCache)
717
+
718
+ if inputs_embeds is None:
719
+ inputs_embeds = self.embeddings(input_ids)
720
+
721
+ # embed positions
722
+ hidden_states = inputs_embeds
723
+
724
+ if self.gradient_checkpointing and self.training:
725
+ if use_cache:
726
+ logger.warning_once(
727
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
728
+ )
729
+ use_cache = False
730
+
731
+ all_hidden_states = () if output_hidden_states else None
732
+ all_attns = {} if output_attentions else None
733
+ next_decoder_cache = None
734
+
735
+ for layer_id, layer in enumerate(self.layers):
736
+ if output_hidden_states:
737
+ all_hidden_states += (hidden_states,)
738
+
739
+ layer_outputs = layer(
740
+ hidden_states,
741
+ attention_mask=attention_mask,
742
+ past_key_values=past_key_values,
743
+ output_attentions=output_attentions,
744
+ use_cache=use_cache,
745
+ gradient_checkpointing=self.gradient_checkpointing and self.training
746
+ )
747
+
748
+ hidden_states = layer_outputs[0]
749
+
750
+ if use_cache:
751
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
752
+
753
+ if output_attentions:
754
+ OUTPUT_ATTN_LAYERS = [0, 7, 15, 23]
755
+ if layer_id in OUTPUT_ATTN_LAYERS:
756
+ # all_attns += (layer_outputs[1],)
757
+ all_attns[layer_id] = layer_outputs[1]
758
+
759
+ hidden_states = self.norm(hidden_states)
760
+
761
+ # add hidden states from the last decoder layer
762
+ if output_hidden_states:
763
+ all_hidden_states += (hidden_states,)
764
+
765
+ next_cache = None
766
+ if use_cache:
767
+ # next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
768
+ next_cache = next_decoder_cache
769
+ if not return_dict:
770
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_attns] if v is not None)
771
+
772
+ return BaseModelOutputWithPast(
773
+ last_hidden_state=hidden_states,
774
+ past_key_values=next_cache,
775
+ hidden_states=all_hidden_states,
776
+ attentions=all_attns
777
+ )
778
+
779
+
780
+ class ForgettingTransformerForCausalLM(ForgettingTransformerPreTrainedModel):
781
+ _tied_weights_keys = ["lm_head.weight"]
782
+
783
+ def __init__(self, config):
784
+ super().__init__(config)
785
+ self.model = ForgettingTransformerModel(config)
786
+ self.vocab_size = config.vocab_size
787
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
788
+
789
+ # Initialize weights and apply final processing
790
+ self.post_init()
791
+
792
+ def get_input_embeddings(self):
793
+ return self.model.embeddings
794
+
795
+ def set_input_embeddings(self, value):
796
+ self.model.embeddings = value
797
+
798
+ def get_output_embeddings(self):
799
+ return self.lm_head
800
+
801
+ def set_output_embeddings(self, new_embeddings):
802
+ self.lm_head = new_embeddings
803
+
804
+ def set_decoder(self, decoder):
805
+ self.model = decoder
806
+
807
+ def get_decoder(self):
808
+ return self.model
809
+
810
+ def prepare_inputs_for_generation(
811
+ self,
812
+ input_ids: torch.LongTensor = None,
813
+ past_key_values: Optional[torch.Tensor] = None,
814
+ attention_mask: Optional[torch.Tensor] = None,
815
+ inputs_embeds: Optional[torch.Tensor] = None,
816
+ **kwargs
817
+ ):
818
+ # only last token for `inputs_ids` if the `past_key_values` is passed along.
819
+ if past_key_values is not None:
820
+ input_ids = input_ids[:, -1:]
821
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
822
+ if inputs_embeds is not None and past_key_values is None:
823
+ model_inputs = {'inputs_embeds': inputs_embeds}
824
+ else:
825
+ # The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
826
+ # recompiles graphs as the stride of the inputs is a guard.
827
+ # Ref: https://github.com/huggingface/transformers/pull/29114
828
+ # TODO: use `next_tokens` directly instead.
829
+ model_inputs = {'input_ids': input_ids.contiguous()}
830
+
831
+ model_inputs.update({
832
+ 'past_key_values': past_key_values,
833
+ 'use_cache': kwargs.get('use_cache'),
834
+ 'attention_mask': attention_mask,
835
+ })
836
+ return model_inputs
837
+
838
+ def forward(
839
+ self,
840
+ input_ids: torch.LongTensor = None,
841
+ attention_mask: Optional[torch.Tensor] = None,
842
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
843
+ inputs_embeds: Optional[torch.FloatTensor] = None,
844
+ labels: Optional[torch.LongTensor] = None,
845
+ use_cache: Optional[bool] = None,
846
+ output_attentions: Optional[bool] = None,
847
+ output_hidden_states: Optional[bool] = None,
848
+ return_dict: Optional[bool] = None,
849
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
850
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
851
+ output_hidden_states = (
852
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
853
+ )
854
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
855
+
856
+ outputs = self.model(
857
+ input_ids=input_ids,
858
+ attention_mask=attention_mask,
859
+ past_key_values=past_key_values,
860
+ inputs_embeds=inputs_embeds,
861
+ use_cache=use_cache,
862
+ output_attentions=output_attentions,
863
+ output_hidden_states=output_hidden_states,
864
+ return_dict=return_dict
865
+ )
866
+
867
+ hidden_states = outputs[0]
868
+
869
+ loss = None
870
+ if labels is not None:
871
+ if self.config.fuse_cross_entropy:
872
+ loss_fct = FusedCrossEntropyLoss(inplace_backward=True, reduction='none')
873
+ else:
874
+ loss_fct = nn.CrossEntropyLoss(reduction='none')
875
+ logits = self.lm_head(hidden_states)
876
+ # Enable model parallelism
877
+ labels = labels.to(logits.device)
878
+ # labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], loss_fct.ignore_index)), 1)
879
+ loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
880
+ loss = loss.view(*labels.size())
881
+ del logits
882
+ logits = None
883
+ else:
884
+ logits = self.lm_head(hidden_states)
885
+
886
+ if not return_dict:
887
+ raise NotImplementedError
888
+ output = (logits,) + outputs[1:]
889
+ return (loss,) + output if loss is not None else output
890
+
891
+ return CausalLMOutputWithPast(
892
+ loss=loss,
893
+ logits=logits,
894
+ past_key_values=outputs.past_key_values,
895
+ hidden_states=outputs.hidden_states,
896
+ attentions=outputs.attentions,
897
+ )
modeling_forgetting_transformer.py ADDED
@@ -0,0 +1,897 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ from __future__ import annotations
4
+
5
+ import math
6
+ import warnings
7
+ from typing import List, Optional, Tuple, Union
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.utils.checkpoint
12
+ from transformers.activations import ACT2FN
13
+ from transformers.cache_utils import Cache
14
+ from transformers.modeling_outputs import (BaseModelOutputWithPast,
15
+ CausalLMOutputWithPast)
16
+ from transformers.modeling_utils import PreTrainedModel
17
+ from transformers.utils import logging
18
+
19
+ # from fla.layers.attn import Attention
20
+ from fla.modules import FusedCrossEntropyLoss, RMSNorm
21
+ from fla.modules.layernorm import group_norm_fn
22
+ from fla.modules.activations import swiglu_linear
23
+
24
+ from fla.modules import RotaryEmbedding
25
+ from einops import rearrange
26
+
27
+ from .configuration_forgetting_transformer import ForgettingTransformerConfig
28
+ from forgetting_transformer.ops.forgetting_attention_std import forgetting_attention_std as forgetting_attention
29
+ from .fgate_cache import FgateDynamicCache
30
+ from .glu_linear import glu_linear
31
+ from .token_shift import token_shift
32
+
33
+ from functools import partial
34
+
35
+ logger = logging.get_logger(__name__)
36
+
37
+
38
+ class ShiftLinear(nn.Module):
39
+
40
+ def __init__(
41
+ self,
42
+ input_dim: int,
43
+ output_dim: int,
44
+ num_heads: int,
45
+ bias: bool,
46
+ shift_bias: bool = False
47
+ ):
48
+ super().__init__()
49
+
50
+ self.input_dim = input_dim
51
+ self.output_dim = output_dim
52
+ self.num_heads = num_heads
53
+ assert self.output_dim % self.num_heads == 0
54
+
55
+ self.linear = nn.Linear(input_dim, output_dim, bias=bias)
56
+ self.shift_proj = nn.Linear(input_dim, num_heads, bias=shift_bias)
57
+
58
+ def __repr__(self) -> str:
59
+ s = f"{self.__class__.__name__}({self.input_dim}, {self.output_dim})"
60
+ return s
61
+
62
+ def forward(self, x: torch.Tensor, shift_state: Optional[torch.Tensor]) -> torch.Tensor:
63
+ assert x.ndim == 3, "Input must be (B, T, D)"
64
+ B, T, D = x.size()
65
+ out = self.linear(x)
66
+ # (B, T, H, 1)
67
+ alpha = torch.sigmoid(self.shift_proj(x).float()).float()
68
+ # left, right, top, bottom (B, T=H, D=W)
69
+ # out_prev = nn.functional.pad(out, (0, 0, 1, -1))
70
+ # out_prev = torch.roll(out, shifts=1, dims=1)
71
+
72
+ out_per_head = rearrange(out, 'b t (h d) -> b t h d', h=self.num_heads)
73
+ if T > 1:
74
+ # TODO: note in this case cache is not used
75
+ result_per_head = token_shift(out_per_head, alpha, 1.0 - alpha)
76
+ else:
77
+ shift_state_per_head = rearrange(shift_state, 'b (h d) -> b 1 h d', h=self.num_heads)
78
+ result_per_head = (alpha[..., None] * shift_state_per_head + (1 - alpha[..., None]) * out_per_head)
79
+
80
+ result_per_head = result_per_head.to(out.dtype)
81
+
82
+ if shift_state is not None:
83
+ shift_state.copy_(out[:, -1, :])
84
+
85
+ result = rearrange(result_per_head, 'b t h d -> b t (h d)', h=self.num_heads)
86
+ return result
87
+
88
+ class GroupRMSNorm(nn.Module):
89
+ def __init__(
90
+ self,
91
+ num_groups: int,
92
+ hidden_size: int,
93
+ elementwise_affine: bool = True,
94
+ bias: bool = False,
95
+ eps: float = 1e-5
96
+ ) -> GroupRMSNorm:
97
+ super().__init__()
98
+
99
+ if hidden_size % num_groups != 0:
100
+ raise ValueError('num_channels must be divisible by num_groups')
101
+
102
+ self.num_groups = num_groups
103
+ self.hidden_size = hidden_size
104
+ self.elementwise_affine = elementwise_affine
105
+ self.eps = eps
106
+
107
+ self.register_parameter("weight", None)
108
+ self.register_parameter("bias", None)
109
+ if elementwise_affine:
110
+ self.weight = nn.Parameter(torch.ones(hidden_size))
111
+ if bias:
112
+ self.bias = nn.Parameter(torch.zeros(hidden_size))
113
+
114
+ def __repr__(self) -> str:
115
+ s = f"{self.__class__.__name__}({self.num_groups}, {self.hidden_size}"
116
+ if not self.elementwise_affine:
117
+ s += f", elementwise_affine={self.elementwise_affine}"
118
+ s += f", eps={self.eps}"
119
+ s += ")"
120
+ return s
121
+
122
+ def forward(self, x, residual=None, prenorm=False, residual_in_fp32=False):
123
+ return group_norm_fn(
124
+ x,
125
+ self.weight,
126
+ self.bias,
127
+ residual=residual,
128
+ eps=self.eps,
129
+ prenorm=prenorm,
130
+ residual_in_fp32=residual_in_fp32,
131
+ is_rms_norm=True,
132
+ num_groups=self.num_groups
133
+ )
134
+
135
+ class ForgettingAttentionLayer(nn.Module):
136
+
137
+ def __init__(
138
+ self,
139
+ hidden_size: int = 2048,
140
+ num_heads: int = 32,
141
+ num_kv_heads: Optional[int] = None,
142
+ window_size: Optional[int] = None,
143
+ max_position_embeddings: Optional[int] = None,
144
+ use_rope: bool = False,
145
+ rope_base: float = 500000.0,
146
+ use_output_gate: bool = False,
147
+ ogate_act: str = "sigmoid",
148
+ fgate_type: str = "full",
149
+ fgate_bias_init: bool = False,
150
+ decay_time_min: Optional[float] = None,
151
+ decay_time_max: Optional[float] = None,
152
+ use_output_norm: bool = False,
153
+ norm_eps: float = 1e-6,
154
+ qk_norm: bool = False,
155
+ qk_norm_share_param_across_head: bool = False,
156
+ use_k_shift: bool = False,
157
+ use_v_shift: bool = False,
158
+ initializer_range: float = 0.02,
159
+ layer_idx: int = None
160
+ ):
161
+ """
162
+ Forgetting Attention layer.
163
+
164
+ Arguments:
165
+ - hidden_size: Input dimension and qkv dimension
166
+ - num_heads: Number of heads
167
+ - num_kv_heads: Not used. Should be None
168
+ - window_size: Not used. Should be None
169
+ - max_position_embeddings: Not used. Should be None
170
+ - use_rope: Whether to use RoPE. Default is False
171
+ - rope_base: the theta hyperparameter in RoPE. This has no effect if
172
+ use_rope=False
173
+ - use_output_gate: Whether to use output gates. Note that using output gates
174
+ introduces extra parameters and you may want to reduce parameters from
175
+ other components (e.g., MLPs)
176
+ - ogate_act: Activation for the output gate. Either "sigmoid" or "silu"
177
+ - fgate_type: Forget gate type. The following are supported:
178
+ - "full": The default data-dependent forget gate
179
+ - "bias_only": The data-independent forget gate
180
+ - "fixed": Forget gates with fixed values
181
+ - "none": Not using forget gates. Equivalent to forget gates with all
182
+ ones.
183
+ - fgate_bias_init: Whether to use special initalization for the bias terms in
184
+ the forget gate. This should only be used with fgate types in
185
+ ["bias_only", "fixed"].
186
+ - decay_time_min: T_min for the forget gate bias initialization. See paper
187
+ for details.
188
+ - decay_time_max: T_max for the forget gate bias initalization. See paper
189
+ for details.
190
+ - use_output_norm: Whether to use output normalization.
191
+ - norm_eps: Epsilon for the RMSNorms
192
+ - qk_norm: Whether to use qk_norm
193
+ - qk_norm_share_param_across_head: In QK-norm, whether to share the RMSNorm
194
+ scaling parameters across heads. This is just for backward compatibility.
195
+ - use_k_shift: Whether to use data-dependent key shift
196
+ - use_v_shift: Whether to use data-dependent value shift
197
+ - initializer_range: standard deviation for initialization
198
+ - layer_idx: The block index of this layer. Needed for KV-cache
199
+ """
200
+ super().__init__()
201
+
202
+ self.num_heads = num_heads
203
+ if num_kv_heads is None:
204
+ self.num_kv_heads = self.num_heads
205
+ else:
206
+ raise NotImplementedError("GQA has not been tested.")
207
+ self.num_kv_heads = num_kv_heads
208
+ self.num_kv_groups = num_heads // self.num_kv_heads
209
+ self.hidden_size = hidden_size
210
+ self.head_dim = self.hidden_size // self.num_heads
211
+ self.kv_dim = self.num_kv_heads * self.head_dim
212
+ self.kv_dim = self.num_kv_heads * self.head_dim
213
+ self.window_size = window_size
214
+ self.max_position_embeddings = max_position_embeddings
215
+ self.layer_idx = layer_idx
216
+
217
+ self.q_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
218
+ if use_k_shift:
219
+ self.k_proj = ShiftLinear(self.hidden_size, self.kv_dim, self.num_heads, bias=False)
220
+ else:
221
+ self.k_proj = nn.Linear(self.hidden_size, self.kv_dim, bias=False)
222
+
223
+ if use_v_shift:
224
+ self.v_proj = ShiftLinear(self.hidden_size, self.kv_dim, self.num_heads, bias=False)
225
+ else:
226
+ self.v_proj = nn.Linear(self.hidden_size, self.kv_dim, bias=False)
227
+
228
+ self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
229
+ self.use_k_shift = use_k_shift
230
+ self.use_v_shift = use_v_shift
231
+
232
+
233
+ device = next(self.parameters()).device
234
+ # Forget gate
235
+ assert fgate_type in ["full", "bias_only", "fixed", "none"]
236
+ self.fgate_type = fgate_type
237
+ self.fgate_bias_init = fgate_bias_init
238
+ if fgate_type == "full":
239
+ assert not fgate_bias_init
240
+ self.fgate_proj = nn.Linear(self.hidden_size, self.num_heads, bias=True)
241
+ elif fgate_type == "bias_only":
242
+ self.fgate_bias = nn.Parameter(torch.zeros(size=(self.num_heads,), device=device))
243
+ self.fgate_bias._no_weight_decay = True
244
+ elif fgate_type == "fixed":
245
+ assert fgate_bias_init, "You must set fgate_bias_init = True with fixed fgate"
246
+ fgate_bias = torch.zeros(size=(self.num_heads,), device=device)
247
+ self.register_buffer("fgate_bias", fgate_bias)
248
+ elif fgate_type == "none":
249
+ pass
250
+ else:
251
+ raise ValueError(f"Unknown fgate type {fgate_type}")
252
+
253
+
254
+
255
+ # Forget gate intialization for data-independent and fixed forget gates
256
+ if fgate_bias_init:
257
+ assert decay_time_min is not None and decay_time_max is not None
258
+ assert decay_time_min > 0 and decay_time_max > 0
259
+ with torch.no_grad():
260
+ log_decay_time = torch.linspace(math.log(decay_time_min), math.log(decay_time_max), steps=self.num_heads)
261
+ decay_time = torch.exp(log_decay_time)
262
+ # Such that t = -1 / log(sigmoid(b))
263
+ bias_init = -torch.log(torch.expm1(1 / decay_time))
264
+ self.fgate_bias.copy_(bias_init)
265
+ else:
266
+ assert decay_time_min is None and decay_time_max is None
267
+
268
+ if use_output_gate:
269
+ self.ogate_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
270
+ self.ogate_act = ogate_act
271
+ assert ogate_act in ["silu", "sigmoid"]
272
+ else:
273
+ self.ogate_proj = None
274
+
275
+ if use_output_norm:
276
+ self.output_norm = GroupRMSNorm(num_groups=self.num_heads, hidden_size=self.hidden_size, eps=norm_eps)
277
+ else:
278
+ self.output_norm = None
279
+
280
+
281
+ if use_rope:
282
+ self.rotary = RotaryEmbedding(self.head_dim, base=rope_base)
283
+ else:
284
+ self.rotary = None
285
+
286
+
287
+ self.qk_norm = qk_norm
288
+ self.qk_norm_share_param_across_head = qk_norm_share_param_across_head
289
+ if qk_norm:
290
+ if self.qk_norm_share_param_across_head:
291
+ # This is an incorrect implemention kept just for backward compatibility
292
+ self.q_norm = RMSNorm(self.head_dim)
293
+ self.k_norm = RMSNorm(self.head_dim)
294
+ else:
295
+ self.q_norm = GroupRMSNorm(num_groups=self.num_heads, hidden_size=self.hidden_size)
296
+ self.k_norm = GroupRMSNorm(num_groups=self.num_heads, hidden_size=self.hidden_size)
297
+
298
+ self.initializer_range = initializer_range
299
+ self.apply(self._initialize_weights)
300
+
301
+ def _initialize_weights(self, module: nn.Module):
302
+ # This will actually be overwritten by outer init.
303
+ if isinstance(module, nn.Linear):
304
+ nn.init.normal_(module.weight, mean=0.0, std=self.initializer_range)
305
+ if module.bias is not None:
306
+ nn.init.zeros_(module.bias)
307
+
308
+ def forward(
309
+ self,
310
+ hidden_states: torch.Tensor,
311
+ attention_mask: Optional[torch.LongTensor] = None,
312
+ past_key_values: Optional[Cache] = None,
313
+ output_attentions: bool = False,
314
+ use_cache: bool = False,
315
+ **kwargs,
316
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
317
+ """
318
+ We assume that during decoding attention mask is always 1. Otherwise it won't work.
319
+ """
320
+ batch_size, q_len, _ = hidden_states.size()
321
+ if use_cache:
322
+ key_shift_state = past_key_values.key_shift_cache[self.layer_idx]
323
+ value_shift_state = past_key_values.value_shift_cache[self.layer_idx]
324
+ else:
325
+ key_shift_state = value_shift_state = None
326
+
327
+ # Shift states are updated in place
328
+ q = self.q_proj(hidden_states)
329
+ if self.use_k_shift:
330
+ k = self.k_proj(hidden_states, key_shift_state)
331
+ else:
332
+ k = self.k_proj(hidden_states)
333
+ if self.use_v_shift:
334
+ v = self.v_proj(hidden_states, value_shift_state)
335
+ else:
336
+ v = self.v_proj(hidden_states)
337
+
338
+ if self.qk_norm and (not self.qk_norm_share_param_across_head):
339
+ q = self.q_norm(q).to(q.dtype)
340
+ k = self.k_norm(k).to(k.dtype)
341
+
342
+ q = rearrange(q, '... (h d) -> ... h d', h=self.num_heads)
343
+ k = rearrange(k, '... (h d) -> ... h d', h=self.num_kv_heads)
344
+ v = rearrange(v, 'b t (h d) -> b h t d', h=self.num_kv_heads)
345
+
346
+
347
+ if self.qk_norm and (self.qk_norm_share_param_across_head):
348
+ q = self.q_norm(q).to(q.dtype)
349
+ k = self.k_norm(k).to(k.dtype)
350
+
351
+
352
+ seqlen_offset, max_seqlen = 0, q.shape[1]
353
+ if past_key_values is not None:
354
+ seqlen_offset = past_key_values.get_seq_length(self.layer_idx)
355
+ max_seqlen = q.shape[1] + seqlen_offset
356
+
357
+ if attention_mask is not None:
358
+ # to deliminate the offsets of padding tokens
359
+ seqlen_offset = (seqlen_offset + attention_mask.sum(-1) - attention_mask.shape[-1])
360
+ max_seqlen = q.shape[1] + max(seqlen_offset)
361
+
362
+ if self.max_position_embeddings is not None:
363
+ max_seqlen = max(max_seqlen, self.max_position_embeddings)
364
+ if self.rotary is not None:
365
+ q, k = self.rotary(q, k, seqlen_offset, max_seqlen)
366
+
367
+ if self.fgate_type == "full":
368
+ fgate_logit = self.fgate_proj(hidden_states)
369
+ fgate_logit = rearrange(fgate_logit, "b t h -> b h t")
370
+ log_fgate = torch.nn.functional.logsigmoid(fgate_logit.float())
371
+ elif self.fgate_type == "none":
372
+ log_fgate = torch.zeros((batch_size, self.num_heads, q_len), dtype=torch.float32, device=hidden_states.device)
373
+ else:
374
+ assert self.fgate_type in ["fixed", "bias_only"]
375
+ fgate_logit = torch.broadcast_to(self.fgate_bias, (batch_size, q_len, self.num_heads))
376
+ fgate_logit = rearrange(fgate_logit, "b t h -> b h t")
377
+ log_fgate = torch.nn.functional.logsigmoid(fgate_logit.float())
378
+
379
+ k = rearrange(k, 'b t h d -> b h t d')
380
+ if past_key_values is not None:
381
+ k, v, log_fgate = past_key_values.update(k, v, log_fgate, self.layer_idx)
382
+ # k, v = rearrange(k, 'b h t d -> b t h d'), rearrange(v, 'b h t d -> b t h d')
383
+ q = rearrange(q, 'b t h d -> b h t d')
384
+
385
+ if self.num_kv_groups > 1:
386
+ assert False
387
+ k = rearrange(k.unsqueeze(-2).repeat(1, 1, 1, self.num_kv_groups, 1), 'b t h g d -> b t (h g) d')
388
+ v = rearrange(v.unsqueeze(-2).repeat(1, 1, 1, self.num_kv_groups, 1), 'b t h g d -> b t (h g) d')
389
+
390
+ # Contains at least one padding token in the sequence
391
+ if attention_mask is not None:
392
+ B, _, T = log_fgate.size()
393
+ assert attention_mask.size() == (B, T), ((B, T), attention_mask.size())
394
+ seq_start = T - attention_mask.sum(dim=-1)
395
+ o = forgetting_attention(
396
+ q, k, v,
397
+ log_fgate,
398
+ head_first=True,
399
+ seq_start=seq_start,
400
+ sm_scale=1 / math.sqrt(self.head_dim),
401
+ )
402
+ o = rearrange(o, "b h t d -> b t h d")
403
+ else:
404
+ o = forgetting_attention(
405
+ q, k, v,
406
+ log_fgate,
407
+ head_first=True,
408
+ sm_scale=1 / math.sqrt(self.head_dim),
409
+ )
410
+ o = rearrange(o, "b h t d -> b t h d")
411
+
412
+ o = o.reshape(batch_size, q_len, self.hidden_size)
413
+
414
+ if self.output_norm is not None:
415
+ o = self.output_norm(o)
416
+
417
+ if self.ogate_proj is not None:
418
+ # ogate = self.ogate act(self.ogate_proj(hidden_states))
419
+ # o = o * ogate
420
+ # ogate = act_gate(self.ogate_proj(hidden_states), o)
421
+ ogate_logit = self.ogate_proj(hidden_states)
422
+ dtype = ogate_logit.dtype
423
+ if self.ogate_act == "silu":
424
+ o = swiglu_linear(ogate_logit, o, self.o_proj.weight.to(dtype), self.o_proj.bias.to(dtype) if self.o_proj.bias is not None else self.o_proj.bias)
425
+ elif self.ogate_act == "sigmoid":
426
+ o = glu_linear(ogate_logit, o, self.o_proj.weight.to(dtype), self.o_proj.bias.to(dtype) if self.o_proj.bias is not None else self.o_proj.bias)
427
+ else:
428
+ raise ValueError(f"Unknown ogate act {self.ogate_act}")
429
+ else:
430
+ o = self.o_proj(o)
431
+
432
+ if not output_attentions:
433
+ attentions = None
434
+ else:
435
+ SAVE_HEADS = [0, 1, 2, 3]
436
+ # (B, H, T, T)
437
+ score = q[:, SAVE_HEADS] @ k[:, SAVE_HEADS].mT
438
+ log_lambda = torch.cumsum(log_fgate, dim=-1)
439
+ decay_bias = (log_lambda[:, SAVE_HEADS, :, None] - log_lambda[:, SAVE_HEADS, None, :]).to(torch.bfloat16)
440
+ # normalized_score = torch.softmax(score, dim=-1)
441
+ attentions = (score, decay_bias)
442
+
443
+ return o, attentions, past_key_values
444
+
445
+ def init_shift_state(self, batch_size: int):
446
+ param = next(self.parameters())
447
+ state = dict()
448
+ try:
449
+ dtype = torch.get_autocast_dtype("cuda") if torch.is_autocast_enabled("cuda") else torch.float32
450
+ except TypeError:
451
+ # Support legacy torch version
452
+ dtype = torch.get_autocast_gpu_dtype() if torch.is_autocast_enabled() else torch.float32
453
+ if self.use_k_shift:
454
+ state['key_shift'] = param.new_zeros(batch_size, self.kv_dim, dtype=dtype)
455
+ else:
456
+ state['key_shift'] = None
457
+ if self.use_v_shift:
458
+ state['value_shift'] = param.new_zeros(batch_size, self.kv_dim, dtype=dtype)
459
+ else:
460
+ state['value_shift'] = None
461
+ return state
462
+
463
+
464
+ class ForgettingTransformerMLP(nn.Module):
465
+
466
+ def __init__(
467
+ self,
468
+ hidden_size: int,
469
+ hidden_ratio: Optional[float] = None,
470
+ intermediate_size: Optional[int] = None,
471
+ hidden_act: str = 'swish'
472
+ ) -> ForgettingTransformerMLP:
473
+ super().__init__()
474
+
475
+ self.hidden_size = hidden_size
476
+ # the final number of params is `hidden_ratio * hidden_size^2`
477
+ # `intermediate_size` is chosen to be a multiple of 256 closest to `2/3 * hidden_size * hidden_ratio`
478
+ if hidden_ratio is None:
479
+ hidden_ratio = 4
480
+ if intermediate_size is None:
481
+ intermediate_size = int(hidden_size * hidden_ratio * 2 / 3)
482
+ intermediate_size = 256 * ((intermediate_size + 256 - 1) // 256)
483
+ self.hidden_ratio = hidden_ratio
484
+ self.intermediate_size = intermediate_size
485
+
486
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=False)
487
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
488
+ self.act_fn = ACT2FN[hidden_act]
489
+ self.hidden_act = hidden_act
490
+ assert hidden_act in ["swish", "sigmoid"]
491
+
492
+ def forward(self, x):
493
+ y = self.gate_proj(x)
494
+ gate, y = y.chunk(2, -1)
495
+ # TODO: maybe wrap swiglu_linear in custom_fwd/custom_bwd
496
+ if self.hidden_act == "swish":
497
+ return swiglu_linear(
498
+ gate, y,
499
+ self.down_proj.weight.to(y.dtype),
500
+ self.down_proj.bias.to(y.dtype) if self.down_proj.bias is not None else self.down_proj.bias
501
+ )
502
+ elif self.hidden_act == "sigmoid":
503
+ return glu_linear(
504
+ gate, y,
505
+ self.down_proj.weight.to(y.dtype),
506
+ self.down_proj.bias.to(y.dtype) if self.down_proj.bias is not None else self.down_proj.bias
507
+ )
508
+ else:
509
+ raise ValueError()
510
+
511
+
512
+ class ForgettingTransformerBlock(nn.Module):
513
+ def __init__(self, config: ForgettingTransformerConfig, layer_idx: int):
514
+ super().__init__()
515
+ self.hidden_size = config.hidden_size
516
+
517
+ self.attn_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.norm_eps)
518
+ self.attn = ForgettingAttentionLayer(
519
+ hidden_size=config.hidden_size,
520
+ num_heads=config.num_heads,
521
+ num_kv_heads=config.num_kv_heads,
522
+ window_size=config.window_size,
523
+ max_position_embeddings=config.max_position_embeddings,
524
+ rope_base=config.rope_base,
525
+ use_rope=config.use_rope,
526
+ use_output_gate=config.use_output_gate,
527
+ ogate_act=config.ogate_act,
528
+ fgate_type=config.fgate_type,
529
+ fgate_bias_init=config.fgate_bias_init,
530
+ decay_time_min=config.decay_time_min,
531
+ decay_time_max=config.decay_time_max,
532
+ use_output_norm = config.use_output_norm,
533
+ norm_eps=config.norm_eps,
534
+ qk_norm=config.qk_norm,
535
+ qk_norm_share_param_across_head=config.qk_norm_share_param_across_head,
536
+ use_k_shift=config.use_k_shift,
537
+ use_v_shift=config.use_v_shift,
538
+ initializer_range=config.initializer_range,
539
+ layer_idx=layer_idx
540
+ )
541
+ self.mlp_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.norm_eps)
542
+ self.mlp = ForgettingTransformerMLP(
543
+ hidden_size=config.hidden_size,
544
+ hidden_ratio=config.hidden_ratio,
545
+ intermediate_size=config.intermediate_size,
546
+ hidden_act=config.hidden_act
547
+ )
548
+
549
+ def forward_attn(
550
+ self,
551
+ hidden_states: torch.Tensor,
552
+ attention_mask: Optional[torch.Tensor] = None,
553
+ past_key_values: Optional[Tuple[torch.Tensor]] = None,
554
+ output_attentions: Optional[bool] = False,
555
+ use_cache: Optional[bool] = False,
556
+ **kwargs,
557
+ ):
558
+ # residual handled outside of this
559
+ # residual = hidden_states
560
+ hidden_states = self.attn_norm(hidden_states)
561
+ hidden_states, attentions, past_key_values = self.attn(
562
+ hidden_states=hidden_states,
563
+ attention_mask=attention_mask,
564
+ past_key_values=past_key_values,
565
+ use_cache=use_cache,
566
+ output_attentions=output_attentions
567
+ )
568
+ return hidden_states, attentions, past_key_values
569
+
570
+ def forward_mlp(
571
+ self,
572
+ hidden_states: torch.Tensor,
573
+ residual: torch.Tensor,
574
+ ):
575
+ hidden_states, residual = self.mlp_norm(hidden_states, residual, True)
576
+ hidden_states = self.mlp(hidden_states)
577
+ hidden_states = residual + hidden_states
578
+
579
+ return hidden_states
580
+
581
+ def forward(
582
+ self,
583
+ hidden_states: torch.Tensor,
584
+ attention_mask: Optional[torch.Tensor] = None,
585
+ past_key_values: Optional[Tuple[torch.Tensor]] = None,
586
+ output_attentions: Optional[bool] = False,
587
+ use_cache: Optional[bool] = False,
588
+ gradient_checkpointing: bool = False
589
+ # **kwargs,
590
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
591
+
592
+ residual = hidden_states
593
+
594
+
595
+ if gradient_checkpointing:
596
+ forward_attn = partial(torch.utils.checkpoint.checkpoint, self.forward_attn, use_reentrant=False)
597
+ forward_mlp = partial(torch.utils.checkpoint.checkpoint, self.forward_mlp, use_reentrant=False)
598
+ else:
599
+ forward_attn = self.forward_attn
600
+ forward_mlp = self.forward_mlp
601
+
602
+ hidden_states, attentions, past_key_values = forward_attn(
603
+ hidden_states=hidden_states,
604
+ attention_mask=attention_mask,
605
+ past_key_values=past_key_values,
606
+ use_cache=use_cache,
607
+ output_attentions=output_attentions
608
+ )
609
+
610
+ hidden_states = forward_mlp(
611
+ hidden_states,
612
+ residual,
613
+ )
614
+
615
+ outputs = (hidden_states,)
616
+
617
+ if output_attentions:
618
+ outputs += (attentions,)
619
+
620
+ if use_cache:
621
+ outputs += (past_key_values,)
622
+
623
+ return outputs
624
+
625
+
626
+
627
+ class ForgettingTransformerPreTrainedModel(PreTrainedModel):
628
+
629
+ config_class = ForgettingTransformerConfig
630
+ supports_gradient_checkpointing = True
631
+ _no_split_modules = ['ForgettingTransformerBlock']
632
+
633
+ def __init__(self, *inputs, **kwargs):
634
+ super().__init__(*inputs, **kwargs)
635
+
636
+ def _init_weights(
637
+ self,
638
+ module: nn.Module,
639
+ ):
640
+ # if isinstance(module, (nn.Linear, nn.Conv1d)):
641
+ if isinstance(module, (nn.Linear)):
642
+ # Slightly different from the TF version which uses truncated_normal for initialization
643
+ # cf https://github.com/pytorch/pytorch/pull/5617
644
+ nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
645
+ if module.bias is not None:
646
+ nn.init.zeros_(module.bias)
647
+ elif isinstance(module, nn.Embedding):
648
+ nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
649
+ if module.padding_idx is not None:
650
+ module.weight.data[module.padding_idx].zero_()
651
+
652
+
653
+ class ForgettingTransformerModel(ForgettingTransformerPreTrainedModel):
654
+
655
+ def __init__(self, config: ForgettingTransformerConfig):
656
+ super().__init__(config)
657
+ self.padding_idx = config.pad_token_id
658
+ self.vocab_size = config.vocab_size
659
+
660
+ self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
661
+ self.layers = nn.ModuleList([ForgettingTransformerBlock(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
662
+ self.norm = RMSNorm(config.hidden_size, eps=config.norm_eps)
663
+
664
+ self.gradient_checkpointing = False
665
+
666
+ self.post_init()
667
+
668
+ def get_input_embeddings(self):
669
+ return self.embeddings
670
+
671
+ def set_input_embeddings(self, value):
672
+ self.embeddings = value
673
+
674
+ def forward(
675
+ self,
676
+ input_ids: Optional[torch.LongTensor] = None,
677
+ attention_mask: Optional[torch.Tensor] = None,
678
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
679
+ inputs_embeds: Optional[torch.FloatTensor] = None,
680
+ use_cache: Optional[bool] = None,
681
+ output_attentions: Optional[bool] = None,
682
+ output_hidden_states: Optional[bool] = None,
683
+ return_dict: Optional[bool] = None
684
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
685
+ # if output_attentions:
686
+ # warnings.warn(
687
+ # "`ForgettingTransformerModel` does not support output attention weights now, so `output_attentions` is set to `False`."
688
+ # )
689
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
690
+ output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
691
+ use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
692
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
693
+
694
+ # retrieve input_ids and inputs_embeds
695
+ if input_ids is not None and inputs_embeds is not None:
696
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
697
+ elif input_ids is None and inputs_embeds is None:
698
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
699
+
700
+ if use_cache:
701
+ # use_legacy_cache = not isinstance(past_key_values, Cache)
702
+ # if use_legacy_cache:
703
+ # past_key_values = FgateDynamicCache.from_legacy_cache(past_key_values)
704
+ if past_key_values is None:
705
+ past_key_values = FgateDynamicCache()
706
+ for layer_idx, layer in enumerate(self.layers):
707
+ shift_state = layer.attn.init_shift_state(
708
+ batch_size=input_ids.size(0),
709
+ )
710
+ past_key_values.update_shift_cache(
711
+ key_shift_state=shift_state["key_shift"],
712
+ value_shift_state=shift_state["value_shift"],
713
+ layer_idx=layer_idx
714
+ )
715
+ else:
716
+ assert isinstance(past_key_values, FgateDynamicCache)
717
+
718
+ if inputs_embeds is None:
719
+ inputs_embeds = self.embeddings(input_ids)
720
+
721
+ # embed positions
722
+ hidden_states = inputs_embeds
723
+
724
+ if self.gradient_checkpointing and self.training:
725
+ if use_cache:
726
+ logger.warning_once(
727
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
728
+ )
729
+ use_cache = False
730
+
731
+ all_hidden_states = () if output_hidden_states else None
732
+ all_attns = {} if output_attentions else None
733
+ next_decoder_cache = None
734
+
735
+ for layer_id, layer in enumerate(self.layers):
736
+ if output_hidden_states:
737
+ all_hidden_states += (hidden_states,)
738
+
739
+ layer_outputs = layer(
740
+ hidden_states,
741
+ attention_mask=attention_mask,
742
+ past_key_values=past_key_values,
743
+ output_attentions=output_attentions,
744
+ use_cache=use_cache,
745
+ gradient_checkpointing=self.gradient_checkpointing and self.training
746
+ )
747
+
748
+ hidden_states = layer_outputs[0]
749
+
750
+ if use_cache:
751
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
752
+
753
+ if output_attentions:
754
+ OUTPUT_ATTN_LAYERS = [0, 7, 15, 23]
755
+ if layer_id in OUTPUT_ATTN_LAYERS:
756
+ # all_attns += (layer_outputs[1],)
757
+ all_attns[layer_id] = layer_outputs[1]
758
+
759
+ hidden_states = self.norm(hidden_states)
760
+
761
+ # add hidden states from the last decoder layer
762
+ if output_hidden_states:
763
+ all_hidden_states += (hidden_states,)
764
+
765
+ next_cache = None
766
+ if use_cache:
767
+ # next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
768
+ next_cache = next_decoder_cache
769
+ if not return_dict:
770
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_attns] if v is not None)
771
+
772
+ return BaseModelOutputWithPast(
773
+ last_hidden_state=hidden_states,
774
+ past_key_values=next_cache,
775
+ hidden_states=all_hidden_states,
776
+ attentions=all_attns
777
+ )
778
+
779
+
780
+ class ForgettingTransformerForCausalLM(ForgettingTransformerPreTrainedModel):
781
+ _tied_weights_keys = ["lm_head.weight"]
782
+
783
+ def __init__(self, config):
784
+ super().__init__(config)
785
+ self.model = ForgettingTransformerModel(config)
786
+ self.vocab_size = config.vocab_size
787
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
788
+
789
+ # Initialize weights and apply final processing
790
+ self.post_init()
791
+
792
+ def get_input_embeddings(self):
793
+ return self.model.embeddings
794
+
795
+ def set_input_embeddings(self, value):
796
+ self.model.embeddings = value
797
+
798
+ def get_output_embeddings(self):
799
+ return self.lm_head
800
+
801
+ def set_output_embeddings(self, new_embeddings):
802
+ self.lm_head = new_embeddings
803
+
804
+ def set_decoder(self, decoder):
805
+ self.model = decoder
806
+
807
+ def get_decoder(self):
808
+ return self.model
809
+
810
+ def prepare_inputs_for_generation(
811
+ self,
812
+ input_ids: torch.LongTensor = None,
813
+ past_key_values: Optional[torch.Tensor] = None,
814
+ attention_mask: Optional[torch.Tensor] = None,
815
+ inputs_embeds: Optional[torch.Tensor] = None,
816
+ **kwargs
817
+ ):
818
+ # only last token for `inputs_ids` if the `past_key_values` is passed along.
819
+ if past_key_values is not None:
820
+ input_ids = input_ids[:, -1:]
821
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
822
+ if inputs_embeds is not None and past_key_values is None:
823
+ model_inputs = {'inputs_embeds': inputs_embeds}
824
+ else:
825
+ # The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise
826
+ # recompiles graphs as the stride of the inputs is a guard.
827
+ # Ref: https://github.com/huggingface/transformers/pull/29114
828
+ # TODO: use `next_tokens` directly instead.
829
+ model_inputs = {'input_ids': input_ids.contiguous()}
830
+
831
+ model_inputs.update({
832
+ 'past_key_values': past_key_values,
833
+ 'use_cache': kwargs.get('use_cache'),
834
+ 'attention_mask': attention_mask,
835
+ })
836
+ return model_inputs
837
+
838
+ def forward(
839
+ self,
840
+ input_ids: torch.LongTensor = None,
841
+ attention_mask: Optional[torch.Tensor] = None,
842
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
843
+ inputs_embeds: Optional[torch.FloatTensor] = None,
844
+ labels: Optional[torch.LongTensor] = None,
845
+ use_cache: Optional[bool] = None,
846
+ output_attentions: Optional[bool] = None,
847
+ output_hidden_states: Optional[bool] = None,
848
+ return_dict: Optional[bool] = None,
849
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
850
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
851
+ output_hidden_states = (
852
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
853
+ )
854
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
855
+
856
+ outputs = self.model(
857
+ input_ids=input_ids,
858
+ attention_mask=attention_mask,
859
+ past_key_values=past_key_values,
860
+ inputs_embeds=inputs_embeds,
861
+ use_cache=use_cache,
862
+ output_attentions=output_attentions,
863
+ output_hidden_states=output_hidden_states,
864
+ return_dict=return_dict
865
+ )
866
+
867
+ hidden_states = outputs[0]
868
+
869
+ loss = None
870
+ if labels is not None:
871
+ if self.config.fuse_cross_entropy:
872
+ loss_fct = FusedCrossEntropyLoss(inplace_backward=True, reduction='none')
873
+ else:
874
+ loss_fct = nn.CrossEntropyLoss(reduction='none')
875
+ logits = self.lm_head(hidden_states)
876
+ # Enable model parallelism
877
+ labels = labels.to(logits.device)
878
+ # labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], loss_fct.ignore_index)), 1)
879
+ loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
880
+ loss = loss.view(*labels.size())
881
+ del logits
882
+ logits = None
883
+ else:
884
+ logits = self.lm_head(hidden_states)
885
+
886
+ if not return_dict:
887
+ raise NotImplementedError
888
+ output = (logits,) + outputs[1:]
889
+ return (loss,) + output if loss is not None else output
890
+
891
+ return CausalLMOutputWithPast(
892
+ loss=loss,
893
+ logits=logits,
894
+ past_key_values=outputs.past_key_values,
895
+ hidden_states=outputs.hidden_states,
896
+ attentions=outputs.attentions,
897
+ )
ops/.ipynb_checkpoints/forgetting_attention-checkpoint.py ADDED
@@ -0,0 +1,1138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Implementation of Forgetting Attention.
3
+
4
+ Our code is adapted from https://github.com/FlagOpen/FlagAttention/blob/ee91638dec6da8c00c4113d179f469e0ffcd5852/src/flag_attn/flash.py. The code is modified to implement Forgetting Attention.
5
+
6
+ The original license info from FlagAttention:
7
+
8
+ Copyright 2023 BAAI
9
+
10
+ Licensed under the Apache License, Version 2.0 (the "License");
11
+ you may not use this file except in compliance with the License.
12
+ You may obtain a copy of the License at
13
+
14
+ http://www.apache.org/licenses/LICENSE-2.0
15
+
16
+ Unless required by applicable law or agreed to in writing, software
17
+ distributed under the License is distributed on an "AS IS" BASIS,
18
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ See the License for the specific language governing permissions and
20
+ limitations under the License.
21
+ """
22
+ import pytest
23
+ import math
24
+ import torch
25
+ import triton
26
+ import triton.language as tl
27
+ from einops import rearrange
28
+ from typing import Optional
29
+
30
+
31
+ __all__ = ["forgetting_attention"]
32
+
33
+
34
+ # File flash.py
35
+ def maybe_contiguous(x):
36
+ # only when the inner most dimension is contiguous can LDGSTS be used
37
+ # so inner-dimension contiguity is enforced.
38
+ return x.contiguous() if x.stride(-1) != 1 else x
39
+
40
+ def rounded_multiple(a, b):
41
+ return (a + b - 1) // b * b
42
+
43
+ # --------------------------- public API ---------------------------
44
+ class ForgettingAttention(torch.autograd.Function):
45
+ @staticmethod
46
+ def forward(ctx, q, k, v, log_fgate, seq_start, causal, sm_scale, return_log_normalizer):
47
+ assert causal, "Only causal attention is supported"
48
+ Dq, Dk, Dv = q.shape[-1], k.shape[-1], v.shape[-1]
49
+ assert Dq == Dk == Dv, "feature size of q, k, v should be equal"
50
+ assert Dk in {16, 32, 64, 128}, "We only support head dims in {16, 32, 64, 128}"
51
+
52
+ B, H, M, D = q.shape
53
+ if seq_start is not None:
54
+ has_seq_start = True
55
+ assert seq_start.shape == (B,)
56
+ else:
57
+ has_seq_start = False
58
+ seq_start = torch.zeros((B,), device=q.device, dtype=torch.long)
59
+ N = k.shape[2]
60
+ assert log_fgate.shape == (B, H, N)
61
+ log_fgate = log_fgate.float()
62
+ if has_seq_start:
63
+ log_fgate = log_fgate.clone()
64
+ # We absolutely don't want masked value to affect result. If we
65
+ # don't do this then it could via affecting numerical precision of
66
+ # cumsum
67
+ mask_index = (torch.arange(N, device=q.device)[None, None, :] < seq_start[:, None, None])
68
+ mask_index = torch.broadcast_to(mask_index, log_fgate.size())
69
+ log_fgate[mask_index] = 0.0
70
+
71
+ log_lambda = torch.cumsum(log_fgate, dim=-1, dtype=log_fgate.dtype).float()
72
+
73
+ Hk, Hv = k.shape[1], v.shape[1]
74
+ assert Hk == Hv, "num of heads in k and v should be equal"
75
+ assert H == Hk, "groupped query attention has not been tested. You can uncomment this if you know what you are doing."
76
+ assert H % Hk == 0, "number of heads in q must be a multiple of that in k & v"
77
+ num_groups = H // Hk
78
+
79
+ P_SEQ = N - M
80
+ larger_m = M > N
81
+ assert (not larger_m), "The key/value tensors must be longer than the query tensor"
82
+
83
+ if sm_scale is None:
84
+ sm_scale = 1. / math.sqrt(D)
85
+
86
+ # contiguity
87
+ q, k, v = maybe_contiguous(q), maybe_contiguous(k), maybe_contiguous(v)
88
+
89
+ # to work around https://github.com/openai/triton/issues/2441
90
+ device = torch.cuda.device_of(q)
91
+
92
+ with torch.cuda.device(device):
93
+
94
+ config = get_fwd_config(B, H, M, N, D, causal)
95
+ BLOCK_M, BLOCK_N, num_stages, num_warps = config
96
+
97
+ divisible_m = M % BLOCK_M == 0
98
+ divisible_n = N % BLOCK_N == 0
99
+ # consider using 3d grid to avoid div & rem
100
+ grid = (triton.cdiv(M, BLOCK_M), H, B)
101
+ o = torch.empty_like(q)
102
+ L = torch.empty((B, H, M), device=q.device, dtype=torch.float32)
103
+ _fwd_kernel[grid](
104
+ q, k, v, log_lambda, seq_start, sm_scale,
105
+ L, o,
106
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
107
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
108
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3),
109
+ log_lambda.stride(0), log_lambda.stride(1), log_lambda.stride(2),
110
+ o.stride(0), o.stride(1), o.stride(2), o.stride(3),
111
+ B, H, M, N, P_SEQ, num_groups,
112
+ BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N, BLOCK_DMODEL=D,
113
+ IS_CAUSAL=causal, LARGER_M=larger_m, HAS_SEQ_START=has_seq_start,
114
+ DIVISIBLE_M=divisible_m, DIVISIBLE_N=divisible_n,
115
+ num_warps=num_warps, num_stages=num_stages,
116
+ )
117
+
118
+ # autograd context maintenance
119
+ ctx.save_for_backward(q, k, v, o, L, log_lambda, seq_start)
120
+ ctx.sm_scale = sm_scale
121
+ ctx.causal = causal
122
+ ctx.has_seq_start = has_seq_start
123
+
124
+ has_extra_return = return_log_normalizer
125
+ if has_extra_return:
126
+ outs = (
127
+ o,
128
+ L if return_log_normalizer else None,
129
+ )
130
+ return outs
131
+ return o
132
+
133
+ @staticmethod
134
+ def backward(ctx, do, *ignored):
135
+ q, k, v, o, L, log_lambda, seq_start = ctx.saved_tensors
136
+ sm_scale = ctx.sm_scale
137
+ causal = ctx.causal
138
+ has_seq_start = ctx.has_seq_start
139
+
140
+ B, H, M, D = q.shape
141
+ N = k.shape[2]
142
+ Hk = k.shape[1]
143
+ num_groups = H // Hk
144
+ P_SEQ = N - M
145
+ larger_m = M > N
146
+
147
+ if sm_scale is None:
148
+ sm_scale = 1. / math.sqrt(D)
149
+
150
+ # to work around https://github.com/openai/triton/issues/2441
151
+ device = torch.cuda.device_of(q)
152
+ with torch.cuda.device(device):
153
+ config = get_bwd_config(B, H, M, N, D, causal)
154
+ BLOCK_M, BLOCK_N, num_stages, num_warps = config
155
+
156
+ divisible_m = M % BLOCK_M == 0
157
+ divisible_n = N % BLOCK_N == 0
158
+
159
+ delta = torch.empty_like(L)
160
+ grid = (triton.cdiv(M, BLOCK_M), H, B)
161
+ _bwd_preprocess[grid](
162
+ o, do,
163
+ delta,
164
+ o.stride(0), o.stride(1), o.stride(2), o.stride(3),
165
+ do.stride(0), do.stride(1), do.stride(2), do.stride(3),
166
+ delta.stride(0), delta.stride(1), delta.stride(2),
167
+ M,
168
+ BLOCK_M=BLOCK_M, D_HEAD=D,
169
+ DIVISIBLE_M=divisible_m,
170
+ )
171
+
172
+ # NOTE that dk & dv always have the same number of heads as q, instead of q.
173
+ BLOCK_M, BLOCK_N, num_stages, num_warps = get_bwd_kv_config(B, H, M, N, D, causal)
174
+ divisible_m = M % BLOCK_M == 0
175
+ divisible_n = N % BLOCK_N == 0
176
+
177
+ dk = torch.empty((B, H, N, D), dtype=k.dtype, device=q.device)
178
+ dv = torch.empty((B, H, N, D), dtype=v.dtype, device=q.device)
179
+ dlog_lambda = torch.empty((B, H, N), dtype=log_lambda.dtype, device=q.device)
180
+ grid = (triton.cdiv(N, BLOCK_N), H, B)
181
+ _bwd_kv_kernel[grid](
182
+ q, k, v, log_lambda, seq_start, sm_scale, do,
183
+ dk, dv, dlog_lambda,
184
+ L, delta,
185
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
186
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
187
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3),
188
+ log_lambda.stride(0), log_lambda.stride(1), log_lambda.stride(2),
189
+ do.stride(0), do.stride(1), do.stride(2), do.stride(3),
190
+ dk.stride(0), dk.stride(1), dk.stride(2), dk.stride(3),
191
+ dv.stride(0), dv.stride(1), dv.stride(2), dv.stride(3),
192
+ dlog_lambda.stride(0), dlog_lambda.stride(1), dlog_lambda.stride(2),
193
+ B, H, M, N, P_SEQ,
194
+ num_groups,
195
+ BLOCK_M=BLOCK_M, BLOCK_DMODEL=D, BLOCK_N=BLOCK_N, CAUSAL=causal,
196
+ DIVISIBLE_M=divisible_m, DIVISIBLE_N=divisible_n, HAS_SEQ_START=has_seq_start,
197
+ num_stages=num_stages, num_warps=num_warps,
198
+ )
199
+
200
+ BLOCK_M, BLOCK_N, num_stages, num_warps = get_bwd_q_config(B, H, M, N, D, causal)
201
+ divisible_m = M % BLOCK_M == 0
202
+ divisible_n = N % BLOCK_N == 0
203
+ dq = torch.zeros_like(q)
204
+ grid = (triton.cdiv(M, BLOCK_M), H, B)
205
+ _bwd_q_kernel[grid](
206
+ q, k, v, log_lambda, seq_start, sm_scale, do,
207
+ dq, dlog_lambda,
208
+ L, delta,
209
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
210
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
211
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3),
212
+ log_lambda.stride(0), log_lambda.stride(1), log_lambda.stride(2),
213
+ do.stride(0), do.stride(1), do.stride(2), do.stride(3),
214
+ dq.stride(0), dq.stride(1), dq.stride(2), dq.stride(3),
215
+ dlog_lambda.stride(0), dlog_lambda.stride(1), dlog_lambda.stride(2),
216
+ B, H, M, N, P_SEQ,
217
+ num_groups,
218
+ BLOCK_M=BLOCK_M, BLOCK_DMODEL=D, BLOCK_N=BLOCK_N,
219
+ CAUSAL=causal, LARGER_M=larger_m, HAS_SEQ_START=has_seq_start,
220
+ DIVISIBLE_M=divisible_m, DIVISIBLE_N=divisible_n,
221
+ num_stages=num_stages, num_warps = num_warps,
222
+ )
223
+ dk = dk.reshape((B, Hk, num_groups, N, D)).sum(2)
224
+ dv = dv.reshape((B, Hk, num_groups, N, D)).sum(2)
225
+ dcumsum = torch.cumsum(dlog_lambda, dim=-1, dtype=log_lambda.dtype)
226
+ dlog_fgate = dlog_lambda + dcumsum[..., -1:] - dcumsum
227
+ dlog_fgate = dlog_fgate.float()
228
+ return dq, dk, dv, dlog_fgate, None, None, None, None, None, None, None
229
+
230
+
231
+ def forgetting_attention(
232
+ q: torch.Tensor,
233
+ k: torch.Tensor,
234
+ v: torch.Tensor,
235
+ log_fgate: torch.Tensor,
236
+ *,
237
+ head_first: bool = False,
238
+ seq_start: Optional[torch.Tensor] = None,
239
+ sm_scale: Optional[float] = None,
240
+ ):
241
+ """
242
+ A FlashAttention-based implementation of Forgetting Attention.
243
+
244
+ Note:
245
+ - We recommand bfloat16/float16 for q, k, v and float32 for log_fgate. float32 for
246
+ q, k, v is also supported, but the kernel will not use tensor cores if q, k, v are
247
+ in float32 (which would be slow).
248
+ - We only support seqlen_q <= seqlen_k
249
+ - We only support causal attention
250
+ - Head dimension must be in one of {16, 32, 64, 128}
251
+
252
+ Arguments:
253
+ - q: (batch_size, seqlen_q, num_heads, head_dim) unless head_first=True.
254
+ - k: (batch_size, seqlen_k, num_heads, head_dim) unless head_first=True.
255
+ - v: (batch_size, seqlen_k, num_heads, head_dim) unless head_first=True.
256
+ - log_fgate: (batch_size, seqlen_k, num_heads) unless head_first=True.
257
+ This should be the **log** of the forget gates. This is typically the
258
+ output of torch.nn.functional.logsigmoid.
259
+ - head_first: if True, the order the num_heads and seqlen_* axis of the all
260
+ FloatTensor inputs and outputs should be (num_heads, seq_len_*) instead of
261
+ (seq_len_*, num_heads)
262
+ - seq_start: If not None, should be LongTensor with shape (batch_size,)
263
+ and range in [0, seq_len_k). For each batch index batch_id, no attention
264
+ will be allocated to tokens before the token index seq_start[batch_id].
265
+ This is useful for left-padded inputs.
266
+ - sm_scale: The scaling of attention scores before applying softmax. If
267
+ None, it defaults to (1.0 / math.sqrt(head_dim))
268
+
269
+ Returns:
270
+ out (torch.Tensor): (batch_size, seqlen_q, num_heads, head_dim) unless head_first=True.
271
+ """
272
+ if not head_first:
273
+ q, k, v = [rearrange(item, "b t h d -> b h t d") for item in (q, k, v)]
274
+ log_fgate = rearrange(log_fgate, "b t h -> b h t")
275
+ out = ForgettingAttention.apply(q, k, v, log_fgate, seq_start, True, sm_scale, False)
276
+ if not head_first:
277
+ out = rearrange(out, "b h t d -> b t h d")
278
+ return out
279
+
280
+
281
+ # --------------------------- Forward ---------------------------
282
+ # NOTE: this function can be overwritten at runtime to use your custom config
283
+ def get_fwd_config(B, H, M, N, D, causal):
284
+ assert causal
285
+ if torch.cuda.get_device_capability() == (8, 0):
286
+ if D <= 64:
287
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 32, 3, 4
288
+ else:
289
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 4, 4
290
+ elif torch.cuda.get_device_capability() == (9, 0):
291
+ # H100
292
+ if D <= 64:
293
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 8
294
+ else:
295
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 128, 2, 8
296
+ elif torch.cuda.get_device_capability() == (8, 6):
297
+ if not causal:
298
+ if D <= 64:
299
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 4
300
+ else:
301
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 2, 4
302
+ else: # causal
303
+ if D <= 64:
304
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 3, 4
305
+ else:
306
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 2, 4
307
+ elif torch.cuda.get_device_capability() == (8, 9):
308
+ # L40S
309
+ if D <= 64:
310
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 2, 4
311
+ else:
312
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 2, 4
313
+ else:
314
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
315
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
316
+
317
+
318
+ @triton.jit
319
+ def _fwd_kernel(
320
+ Q, K, V, LOG_LAMBDA, SEQ_START, sm_scale,
321
+ L, O,
322
+ stride_qz, stride_qh, stride_qm, stride_qk,
323
+ stride_kz, stride_kh, stride_kn, stride_kk,
324
+ stride_vz, stride_vh, stride_vn, stride_vk,
325
+ stride_log_lambda_z, stride_log_lambda_h, stride_log_lambda_n,
326
+ stride_oz, stride_oh, stride_om, stride_ok,
327
+ Z, H, M, N, P_SEQ,
328
+ num_groups,
329
+ BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr,
330
+ IS_CAUSAL: tl.constexpr, LARGER_M: tl.constexpr, HAS_SEQ_START: tl.constexpr,
331
+ DIVISIBLE_M: tl.constexpr, DIVISIBLE_N: tl.constexpr,
332
+ ):
333
+ input_dtype = Q.dtype.element_ty
334
+ # -- grid id --
335
+ start_m = tl.program_id(0)
336
+ off_h = tl.program_id(1)
337
+ off_z = tl.program_id(2)
338
+
339
+ # scale sm_scale by log_2(e) and use
340
+ # 2^x instead of exp in the loop because CSE and LICM
341
+ # don't work as expected with `exp` in the loop
342
+ log2e: tl.constexpr = 1.4426950408889634
343
+ loge2: tl.constexpr = 0.6931471805599453
344
+ qk_scale = sm_scale * log2e
345
+
346
+ # offset pointers for (batch, head)
347
+ off_hk = off_h // num_groups
348
+ Q += off_z * stride_qz + off_h * stride_qh
349
+ K += off_z * stride_kz + off_hk * stride_kh
350
+ V += off_z * stride_vz + off_hk * stride_vh
351
+ LOG_LAMBDA += off_z * stride_log_lambda_z + off_h * stride_log_lambda_h
352
+ O += off_z * stride_oz + off_h * stride_oh
353
+ L += (off_z * H + off_h) * M # l's shape is (B, H, M)
354
+
355
+ offs_m_base = tl.arange(0, BLOCK_M)
356
+ offs_m = start_m * BLOCK_M + offs_m_base
357
+ offs_n_base = tl.arange(0, BLOCK_N)
358
+ offs_k = tl.arange(0, BLOCK_DMODEL)
359
+
360
+
361
+ # initialize pointers to value-like data
362
+ q_ptrs = Q + (offs_m[:, None] * stride_qm + offs_k[None, :] * stride_qk) # (BLOCK_M, BLOCK_DMODEL)
363
+ log_lambda_out_ptrs = LOG_LAMBDA + (P_SEQ + offs_m) * stride_log_lambda_n
364
+ o_ptrs = O + (offs_m[:, None] * stride_om + offs_k[None, :] * stride_ok) # (BLOCK_M, BLOCK_DMODEL)
365
+ l_ptrs = L + offs_m
366
+
367
+ # initialize pointer to m and l, fp32 for accumulators
368
+ m_i = tl.full([BLOCK_M], value=-float("inf"), dtype=tl.float32)
369
+ l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
370
+ acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
371
+
372
+ # load q
373
+ if DIVISIBLE_M:
374
+ q = tl.load(q_ptrs, cache_modifier=".cg")
375
+ log_lambda_out = tl.load(log_lambda_out_ptrs, cache_modifier=".cg")
376
+ else:
377
+ mask_m = offs_m < M
378
+ q = tl.load(q_ptrs, mask=mask_m[:, None], cache_modifier=".cg")
379
+ log_lambda_out = tl.load(log_lambda_out_ptrs, mask=mask_m, cache_modifier=".cg")
380
+
381
+ #Dot I trick: to place q in registers, it saves shared memory
382
+ # if BLOCK_DMODEL < 128:
383
+ # I = tl.where(offs_k[:, None] == offs_k,
384
+ # tl.full((BLOCK_DMODEL, BLOCK_DMODEL), 1.0, dtype=input_dtype),
385
+ # tl.full((BLOCK_DMODEL, BLOCK_DMODEL), 0.0, dtype=input_dtype))
386
+ # q = tl.dot(q, I, input_precision="ieee").to(input_dtype)
387
+ # else:
388
+ # I = tl.where(offs_m_base[:, None] == offs_m_base,
389
+ # tl.full((BLOCK_M, BLOCK_M), 1.0, dtype=input_dtype),
390
+ # tl.full((BLOCK_M, BLOCK_M), 0.0, dtype=input_dtype))
391
+ # q = tl.dot(I, q, input_precision="ieee").to(input_dtype)
392
+
393
+ # NOTE: Loop-Bound-For-N
394
+ # The indices in m-dimension that this block may access is in `[start_m * BLOCK_M, (start_m + 1) * BLOCK_M)`.
395
+ # According to the rule of causal masking, then max index in n-dimension that this block may access
396
+ # is `P_SEQ + (start_m + 1) * BLOCK_M`.
397
+ # However, the upper bound of index in n-dimension should never exceed the sequence length of k/v(`P_SEQ + N_CTX`).
398
+ # `P_SEQ + (start_m + 1) * BLOCK_M` may be larger than `N`.
399
+ # At this case, there would be illegal memory access when loading k & v tiles
400
+ # if mask_n is not applied for loading(only when `DIVISIBLE_N`` is true).
401
+ # See also https://github.com/FlagOpen/FlagAttention/pull/8
402
+ if IS_CAUSAL:
403
+ hi = tl.minimum(N, P_SEQ + (start_m + 1) * BLOCK_M)
404
+ if LARGER_M:
405
+ hi = tl.maximum(0, hi)
406
+ else:
407
+ hi = N
408
+
409
+ offs_n_init = offs_n_base
410
+ if HAS_SEQ_START:
411
+ SEQ_START += off_z
412
+ seq_start = tl.load(SEQ_START)
413
+ lo = tl.minimum(seq_start, hi)
414
+ lo = (lo // BLOCK_N) * BLOCK_N
415
+ offs_n_init += lo
416
+ else:
417
+ lo = 0
418
+ seq_start = 0
419
+
420
+ # loop over k, v and update accumulators
421
+ k_ptrs = K + (offs_k[:, None] * stride_kk + offs_n_init[None, :] * stride_kn) # (BLOCK_DMODEL, BLOCK_N)
422
+ v_ptrs = V + (offs_n_init[:, None] * stride_vn + offs_k[None, :] * stride_vk) # (BLOCK_N, BLOCK_DMODEL)
423
+ log_lambda_in_ptrs = LOG_LAMBDA + (offs_n_init * stride_log_lambda_n) # (BLOCK_N, BLOCK_DMODEL)
424
+ for start_n in range(lo, hi, BLOCK_N):
425
+ start_n = tl.multiple_of(start_n, BLOCK_N)
426
+ offs_n = start_n + offs_n_base
427
+
428
+ # -- load k, v --
429
+ if DIVISIBLE_N:
430
+ k = tl.load(k_ptrs, cache_modifier=".cg")
431
+ v = tl.load(v_ptrs, cache_modifier=".cg")
432
+ log_lambda_in = tl.load(log_lambda_in_ptrs, cache_modifier=".cg")
433
+ else:
434
+ mask_n = offs_n < N
435
+ k = tl.load(k_ptrs, mask=mask_n[None, :], cache_modifier=".cg")
436
+ v = tl.load(v_ptrs, mask=mask_n[:, None], cache_modifier=".cg")
437
+ log_lambda_in = tl.load(log_lambda_in_ptrs, mask=mask_n, cache_modifier=".cg")
438
+
439
+ # -- compute qk ---
440
+ # s = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
441
+ s = tl.dot(q, k, input_precision="ieee") * qk_scale
442
+ decay_bias = log_lambda_out[:, None] - log_lambda_in[None, :]
443
+ s += decay_bias * log2e
444
+
445
+ if not DIVISIBLE_N:
446
+ s = tl.where(mask_n[None, :], s, float("-inf"))
447
+ if IS_CAUSAL:
448
+ causal_mask = (P_SEQ + offs_m[:, None]) >= offs_n[None, :]
449
+ s = tl.where(causal_mask, s, float("-inf"))
450
+ if HAS_SEQ_START:
451
+ s = tl.where(offs_n[None, :] >= seq_start, s, float("-inf"))
452
+
453
+
454
+ # -- compute scaling constant ---
455
+ m_i_new = tl.maximum(m_i, tl.max(s, 1))
456
+ alpha = tl.math.exp2((m_i - m_i_new))
457
+ p = tl.math.exp2(s - m_i_new[:, None])
458
+
459
+ # -- compute partial sumexpn before applying dropout
460
+ p_sum = tl.sum(p, 1)
461
+
462
+
463
+ # -- scale and update acc: acc *= alpha[:, None]--
464
+ acc *= alpha[:, None]
465
+ acc += tl.dot(p.to(input_dtype), v, input_precision="ieee")
466
+
467
+ # -- update m_i and l_i --
468
+ l_i = l_i * alpha + p_sum
469
+ m_i = m_i_new
470
+ # update pointers
471
+ k_ptrs += BLOCK_N * stride_kn
472
+ v_ptrs += BLOCK_N * stride_vn
473
+ log_lambda_in_ptrs += BLOCK_N * stride_log_lambda_n
474
+
475
+ # write back l & o
476
+ if IS_CAUSAL and (LARGER_M or HAS_SEQ_START):
477
+ is_empty_line = (offs_m + P_SEQ) < seq_start
478
+ acc = tl.where(is_empty_line[:, None], 0.0, acc * (1.0 / l_i[:, None]))
479
+ l = tl.where(is_empty_line, float("-inf"), m_i * loge2 + tl.log(l_i))
480
+ else:
481
+ acc = acc * (1.0 / l_i[:, None])
482
+ l = m_i * loge2 + tl.log(l_i) # log(normalizer)
483
+
484
+
485
+ if DIVISIBLE_M:
486
+ tl.store(l_ptrs, l, cache_modifier=".cg")
487
+ tl.store(o_ptrs, acc.to(input_dtype), cache_modifier=".cg")
488
+ else:
489
+ tl.store(l_ptrs, l, mask=mask_m, cache_modifier=".cg")
490
+ tl.store(o_ptrs, acc.to(input_dtype), mask=mask_m[:, None], cache_modifier=".cg")
491
+
492
+
493
+ # --------------------------- Backward ---------------------------
494
+ # NOTE: this function can be overwritten at runtime to use your custom config
495
+ def get_bwd_config(B, H, M, N, D, causal):
496
+ if torch.cuda.get_device_capability() == (9, 0):
497
+ if not causal:
498
+ BLOCK_M = 128 if D <= 64 else 64
499
+ BLOCK_N = 64
500
+ num_stages = 2
501
+ num_warps = 4
502
+ else:
503
+ BLOCK_M = 64
504
+ BLOCK_N = 64
505
+ num_stages = 3 if D <= 64 else 2
506
+ num_warps = 4
507
+ elif torch.cuda.get_device_capability() == (8, 0):
508
+ if not causal:
509
+ BLOCK_M = 128 if D <= 64 else 64
510
+ BLOCK_N = 64
511
+ num_stages = 2
512
+ num_warps = 4
513
+ else:
514
+ BLOCK_M = 64
515
+ BLOCK_N = 64
516
+ num_stages = 3 if D <= 64 else 2
517
+ num_warps = 4
518
+ elif torch.cuda.get_device_capability() == (8, 6): # tune for RTX-3090, device_capability(8, 6)
519
+ if not causal:
520
+ if D <= 64:
521
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
522
+ else:
523
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 8
524
+ else:
525
+ if D <= 64:
526
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
527
+ else:
528
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 2, 4
529
+ else:
530
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 1, 4
531
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
532
+
533
+ def get_bwd_kv_config(B, H, M, N, D, causal):
534
+ assert causal
535
+ if torch.cuda.get_device_capability() == (8, 0): # A100
536
+ if D <= 64:
537
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 4, 4
538
+ else:
539
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 128, 4, 8
540
+ elif torch.cuda.get_device_capability() == (8, 6): # tune for RTX-3090, device_capability(8, 6)
541
+ if D <= 64:
542
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
543
+ else:
544
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 2, 4
545
+ elif torch.cuda.get_device_capability() == (8, 9): # L40S
546
+ if D <= 64:
547
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 128, 4, 8
548
+ else:
549
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 128, 2, 8
550
+ elif torch.cuda.get_device_capability() == (9, 0): # H100
551
+ if D <= 64:
552
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 4
553
+ else:
554
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
555
+ else:
556
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
557
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
558
+
559
+ def get_bwd_q_config(B, H, M, N, D, causal):
560
+ assert causal
561
+ if torch.cuda.get_device_capability() == (8, 0): # A100
562
+ if D <= 64:
563
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 4
564
+ else:
565
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 4, 8
566
+ elif torch.cuda.get_device_capability() == (8, 6): # tune for RTX-3090, device_capability(8, 6)
567
+ if D <= 64:
568
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
569
+ else:
570
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 2, 4
571
+ elif torch.cuda.get_device_capability() == (8, 9): # L40S
572
+ if D <= 64:
573
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 4, 4
574
+ else:
575
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 3, 4
576
+ elif torch.cuda.get_device_capability() == (9, 0): # H100
577
+ if D <= 64:
578
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 128, 4, 8
579
+ else:
580
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 128, 2, 8
581
+ else:
582
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
583
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
584
+
585
+
586
+ @triton.jit
587
+ def _bwd_preprocess(
588
+ Out, DO,
589
+ Delta,
590
+ stride_oz, stride_oh, stride_om, stride_ok,
591
+ stride_doz, stride_doh, stride_dom, stride_dok,
592
+ stride_dz, stride_dh, stride_dm,
593
+ M,
594
+ BLOCK_M: tl.constexpr, D_HEAD: tl.constexpr,
595
+ DIVISIBLE_M: tl.constexpr,
596
+ ):
597
+ off_h = tl.program_id(1)
598
+ off_z = tl.program_id(2)
599
+ Out += off_z * stride_oz + off_h * stride_oh
600
+ DO += off_z * stride_doz + off_h * stride_doh
601
+ Delta += off_z * stride_dz + off_h * stride_dh
602
+
603
+ # compute (Out * Dout).sum() for vector interpretation
604
+ off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)
605
+ off_n = tl.arange(0, D_HEAD)
606
+
607
+ # load
608
+ o_ptrs = Out + off_m[:, None] * stride_om + off_n[None, :] * stride_ok
609
+ do_ptrs = DO + off_m[:, None] * stride_dom + off_n[None, :] * stride_dok
610
+
611
+ if DIVISIBLE_M:
612
+ o = tl.load(o_ptrs).to(tl.float32)
613
+ do = tl.load(do_ptrs).to(tl.float32)
614
+ else:
615
+ mask_m = off_m < M
616
+ o = tl.load(o_ptrs, mask=mask_m[:, None]).to(tl.float32)
617
+ do = tl.load(do_ptrs, mask=mask_m[:, None]).to(tl.float32)
618
+
619
+ # compute
620
+ delta = tl.sum(o * do, axis=1)
621
+
622
+ # write-back
623
+ d_ptrs = Delta + off_m * stride_dm
624
+ if DIVISIBLE_M:
625
+ tl.store(d_ptrs, delta)
626
+ else:
627
+ tl.store(d_ptrs, delta, mask=mask_m)
628
+
629
+
630
+ @triton.jit
631
+ def _bwd_kv_kernel(
632
+ Q, K, V, LOG_LAMBDA, SEQ_START, sm_scale, DO,
633
+ DK, DV, DLOG_LAMBDA,
634
+ L,
635
+ D,
636
+ stride_qz, stride_qh, stride_qm, stride_qk,
637
+ stride_kz, stride_kh, stride_kn, stride_kk,
638
+ stride_vz, stride_vh, stride_vn, stride_vk,
639
+ stride_log_lambda_z, stride_log_lambda_h, stride_log_lambda_n,
640
+ stride_doz, stride_doh, stride_dom, stride_dok,
641
+ stride_dkz, stride_dkh, stride_dkn, stride_dkk,
642
+ stride_dvz, stride_dvh, stride_dvn, stride_dvk,
643
+ stride_dlog_lambda_z, stride_dlog_lambda_h, stride_dlog_lambda_n,
644
+ Z, H, M, N, P_SEQ,
645
+ num_groups,
646
+ BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr,
647
+ CAUSAL: tl.constexpr,
648
+ DIVISIBLE_M: tl.constexpr, DIVISIBLE_N: tl.constexpr, HAS_SEQ_START: tl.constexpr,
649
+ ):
650
+ input_dtype = Q.dtype.element_ty
651
+ # -- grid id --
652
+ start_n = tl.program_id(0)
653
+ off_h = tl.program_id(1)
654
+ off_z = tl.program_id(2)
655
+ log2e: tl.constexpr = 1.4426950408889634
656
+ qk_scale = sm_scale * log2e
657
+
658
+ # offset pointers for (batch, head)
659
+ off_hk = off_h // num_groups
660
+ Q += off_z * stride_qz + off_h * stride_qh
661
+ K += off_z * stride_kz + off_hk * stride_kh
662
+ V += off_z * stride_vz + off_hk * stride_vh
663
+ LOG_LAMBDA += off_z * stride_log_lambda_z + off_h * stride_log_lambda_h
664
+ DO += off_z * stride_doz + off_h * stride_doh
665
+
666
+ # offset pointers for batch/head
667
+ DK += off_z * stride_dkz + off_h * stride_dkh
668
+ DV += off_z * stride_dvz + off_h * stride_dvh
669
+ DLOG_LAMBDA += off_z * stride_dlog_lambda_z + off_h * stride_dlog_lambda_h
670
+
671
+ # offset pointers for batch/head
672
+ D += (off_z * H + off_h) * M
673
+ L += (off_z * H + off_h) * M
674
+
675
+ if CAUSAL:
676
+ lo = tl.maximum(start_n * BLOCK_N - P_SEQ, 0)
677
+ lo = (lo // BLOCK_M) * BLOCK_M
678
+ else:
679
+ lo = 0
680
+
681
+ offs_m_init = lo + tl.arange(0, BLOCK_M)
682
+ offs_n = start_n * BLOCK_N + tl.arange(0, BLOCK_N)
683
+ offs_m_base = tl.arange(0, BLOCK_M)
684
+ offs_k = tl.arange(0, BLOCK_DMODEL)
685
+
686
+ # initialize pointers to value-like data
687
+ q_ptrs = Q + (offs_m_init[:, None] * stride_qm + offs_k[None, :] * stride_qk) # (BLOCK_M, BLOCK_DMODEL)
688
+ log_lambda_out_ptrs = LOG_LAMBDA + (P_SEQ + offs_m_init) * stride_log_lambda_n # (BLOCK_N, BLOCK_DMODEL)
689
+ k_ptrs = K + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk) # (BLOCK_N, BLOCK_DMODEL)
690
+ v_ptrs = V + (offs_n[:, None] * stride_vn + offs_k[None, :] * stride_vk) # (BLOCK_N, BLOCK_DMODEL)
691
+ log_lambda_in_ptrs = LOG_LAMBDA + (offs_n * stride_log_lambda_n) # (BLOCK_N, BLOCK_DMODEL)
692
+ do_ptrs = DO + (offs_m_init[:, None] * stride_dom + offs_k[None, :] * stride_dok) # (BLOCK_M, BLOCK_DMODEL)
693
+
694
+ dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_k[None, :] * stride_dvk) # (BLOCK_N, BLOCK_DMODEL)
695
+ dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_k[None, :] * stride_dkk) # (BLOCK_N, BLOCK_DMODEL)
696
+ dlog_lambda_in_ptrs = DLOG_LAMBDA + (offs_n * stride_dlog_lambda_n) # (BLOCK_N, BLOCK_DMODEL)
697
+
698
+ # k and v stay in SRAM throughout
699
+ if DIVISIBLE_N:
700
+ v = tl.load(v_ptrs)
701
+ k = tl.load(k_ptrs)
702
+ log_lambda_in = tl.load(log_lambda_in_ptrs)
703
+ else:
704
+ mask_n = offs_n < N
705
+ v = tl.load(v_ptrs, mask=mask_n[:, None])
706
+ k = tl.load(k_ptrs, mask=mask_n[:, None])
707
+ log_lambda_in = tl.load(log_lambda_in_ptrs, mask=mask_n)
708
+
709
+ # If the N block doesn't contain seq_start, no need to loop
710
+ if HAS_SEQ_START:
711
+ SEQ_START += off_z
712
+ seq_start = tl.load(SEQ_START)
713
+ hi = tl.where(start_n * BLOCK_N + BLOCK_N >= seq_start - 1, M, lo)
714
+ else:
715
+ hi = M
716
+
717
+ # initialize dk amd dv
718
+ dk = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32)
719
+ dv = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32)
720
+ dlog_lambda_in = tl.zeros([BLOCK_N], dtype=tl.float32)
721
+
722
+ # loop over a col
723
+ for start_m in range(lo, hi, BLOCK_M):
724
+ start_m = tl.multiple_of(start_m, BLOCK_M)
725
+ offs_m = start_m + offs_m_base
726
+ causal_mask = (P_SEQ + offs_m[None, :]) >= (offs_n[:, None]) # (BLOCK_M, BLOCK_N)
727
+
728
+ # load q1, k1, q2, k2, v, do on-chip
729
+ if DIVISIBLE_M:
730
+ q = tl.load(q_ptrs)
731
+ log_lambda_out = tl.load(log_lambda_out_ptrs)
732
+ else:
733
+ mask_m = offs_m < M
734
+ valid_mask = mask_m[None, :] # & mask_n
735
+ q = tl.load(q_ptrs, mask=mask_m[:, None])
736
+ log_lambda_out = tl.load(log_lambda_out_ptrs, mask=mask_m)
737
+ # recompute p = softmax(qk * sm_scale, dim=-1)
738
+ # s = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
739
+ sT = tl.dot(k, tl.trans(q), input_precision="ieee") * qk_scale
740
+ decay_bias = log_lambda_out[None, :] - log_lambda_in[:, None]
741
+ sT += decay_bias * log2e
742
+ # NOTE: since softmax in backward is pointwise, the normalizer has been saved in fwd)
743
+ # So masking on s is not needed.
744
+ # s = tl.where(valid_mask, s , float("-inf"))
745
+ # if CAUSAL:
746
+ # s = tl.where(causal_mask, s, float("-inf"))
747
+
748
+ # -- recompute p ---
749
+ if DIVISIBLE_M:
750
+ l = tl.load(L + offs_m)
751
+ else:
752
+ l = tl.load(L + offs_m, mask=mask_m)
753
+ pT = tl.math.exp2(sT - l[None, :] * log2e) # (BLOCK_M, BLOCK_N)
754
+
755
+ if not DIVISIBLE_M:
756
+ pT = tl.where(valid_mask, pT, 0.0)
757
+ if CAUSAL:
758
+ pT = tl.where(causal_mask, pT, 0.0)
759
+
760
+ # compute dv = dot(p, do)
761
+ if DIVISIBLE_M:
762
+ do = tl.load(do_ptrs)
763
+ else:
764
+ do = tl.load(do_ptrs, mask=mask_m[:, None]) # (BLOCK_M, BLOCK_DMODEL)
765
+
766
+
767
+ dv += tl.dot(pT.to(input_dtype), do, input_precision="ieee") # (BLOCK_N, BLOCK_DMODEL) # still correct
768
+
769
+ # compute dp = dot(v, do)
770
+ if DIVISIBLE_M:
771
+ delta = tl.load(D + offs_m)
772
+ else:
773
+ delta = tl.load(D + offs_m, mask=mask_m)
774
+ # dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
775
+ dpT = tl.dot(v, tl.trans(do), input_precision="ieee")
776
+
777
+
778
+ # compute ds = p * (dp - delta[:, None])
779
+ dsT = pT * (dpT - delta[None, :]) # (BLOCK_M, BLOCK_N)
780
+
781
+ if not DIVISIBLE_M:
782
+ dsT = tl.where(valid_mask, dsT, 0.0)
783
+ if CAUSAL:
784
+ dsT = tl.where(causal_mask, dsT, 0.0)
785
+
786
+ # compute dk = dot(ds.T, q) masking
787
+ dk += tl.dot(dsT.to(input_dtype), q, input_precision="ieee")
788
+ dlog_lambda_in += -tl.sum(dsT, axis=1)
789
+
790
+ # increment pointers
791
+ q_ptrs += BLOCK_M * stride_qm
792
+ log_lambda_out_ptrs += BLOCK_M * stride_log_lambda_n
793
+ do_ptrs += BLOCK_M * stride_dom
794
+
795
+ dk *= sm_scale
796
+ if HAS_SEQ_START:
797
+ # Mask out
798
+ seq_mask = (offs_n >= seq_start)
799
+ dk = tl.where(seq_mask[:, None], dk, 0.0)
800
+ dv = tl.where(seq_mask[:, None], dv, 0.0)
801
+ dlog_lambda_in = tl.where(seq_mask, dlog_lambda_in, 0.0)
802
+ if DIVISIBLE_N:
803
+ tl.store(dk_ptrs, dk.to(input_dtype)) # (BLOCK_N, BLOCK_DMODEL)
804
+ tl.store(dv_ptrs, dv.to(input_dtype)) # (BLOCK_N, BLOCK_DMODEL,)
805
+ tl.store(dlog_lambda_in_ptrs, dlog_lambda_in.to(tl.float32)) # (BLOCK_N, BLOCK_DMODEL,)
806
+ else:
807
+ tl.store(dk_ptrs, dk.to(input_dtype), mask=mask_n[:, None]) # (BLOCK_N, BLOCK_DMODEL)
808
+ tl.store(dv_ptrs, dv.to(input_dtype), mask=mask_n[:, None]) # (BLOCK_N, BLOCK_DMODEL)
809
+ tl.store(dlog_lambda_in_ptrs, dlog_lambda_in.to(tl.float32), mask=mask_n) # (BLOCK_N, BLOCK_DMODEL,)
810
+
811
+
812
+ @triton.jit
813
+ def _bwd_q_kernel(
814
+ Q, K, V, LOG_LAMBDA, SEQ_START, sm_scale, DO,
815
+ DQ, DLOG_LAMBDA,
816
+ L,
817
+ D,
818
+ stride_qz, stride_qh, stride_qm, stride_qk,
819
+ stride_kz, stride_kh, stride_kn, stride_kk,
820
+ stride_vz, stride_vh, stride_vn, stride_vk,
821
+ stride_log_lambda_z, stride_log_lambda_h, stride_log_lambda_n,
822
+ stride_doz, stride_doh, stride_dom, stride_dok,
823
+ stride_dqz, stride_dqh, stride_dqm, stride_dqk,
824
+ stride_dlog_lambda_z, stride_dlog_lambda_h, stride_dlog_lambda_n,
825
+ Z, H, M, N, P_SEQ,
826
+ num_groups,
827
+ BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr,
828
+ CAUSAL: tl.constexpr, LARGER_M: tl.constexpr, HAS_SEQ_START: tl.constexpr,
829
+ DIVISIBLE_M: tl.constexpr, DIVISIBLE_N: tl.constexpr,
830
+ ):
831
+ input_dtype = Q.dtype.element_ty
832
+ # -- grid id --
833
+ start_m = tl.program_id(0)
834
+ off_h = tl.program_id(1)
835
+ off_z = tl.program_id(2)
836
+
837
+ # scale sm_scale by log_2(e) and use
838
+ # 2^x instead of exp in the loop because CSE and LICM
839
+ # don't work as expected with `exp` in the loop
840
+ log2e: tl.constexpr = 1.4426950408889634
841
+ qk_scale = sm_scale * log2e
842
+
843
+ # offset pointers for (batch, head)
844
+ off_hk = off_h // num_groups
845
+ Q += off_z * stride_qz + off_h * stride_qh
846
+ K += off_z * stride_kz + off_hk * stride_kh
847
+ V += off_z * stride_vz + off_hk * stride_vh
848
+ LOG_LAMBDA += off_z * stride_log_lambda_z + off_h * stride_log_lambda_h
849
+ DO += off_z * stride_doz + off_h * stride_doh
850
+ D += (off_z * H + off_h) * M
851
+ L += (off_z * H + off_h) * M
852
+
853
+ # offset pointers for batch/head
854
+ DQ += off_z * stride_dqz + off_h * stride_dqh
855
+ DLOG_LAMBDA += off_z * stride_dlog_lambda_z + off_h * stride_dlog_lambda_h
856
+
857
+ offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
858
+ offs_k = tl.arange(0, BLOCK_DMODEL)
859
+
860
+ # initialize pointers to value-like data
861
+ q_ptrs = Q + (offs_m[:, None] * stride_qm + offs_k[None, :] * stride_qk) # (BLOCK_M, BLOCK_DMODEL)
862
+ log_lambda_out_ptrs = LOG_LAMBDA + (P_SEQ + offs_m) * stride_log_lambda_n
863
+
864
+ dq_ptrs = DQ + (offs_m[:, None] * stride_dqm + offs_k[None, :] * stride_dqk) # (BLOCK_M, BLOCK_DMODEL)
865
+ dlog_lambda_out_ptrs = DLOG_LAMBDA + (P_SEQ + offs_m) * stride_dlog_lambda_n
866
+ do_ptrs = DO + (offs_m[:, None] * stride_dom + offs_k[None, :] * stride_dok) # (BLOCK_M, BLOCK_DMODEL)
867
+
868
+ # pointer to row-wise quantities in value-like data
869
+ d_ptrs = D + offs_m
870
+ l_ptrs = L + offs_m
871
+
872
+ # load q: it will stay in SRAM throughout
873
+ if DIVISIBLE_M:
874
+ q = tl.load(q_ptrs)
875
+ do = tl.load(do_ptrs)
876
+ delta = tl.load(d_ptrs)
877
+ l = tl.load(l_ptrs)
878
+ log_lambda_out = tl.load(log_lambda_out_ptrs)
879
+ else:
880
+ mask_m = offs_m < M
881
+ q = tl.load(q_ptrs, mask=mask_m[:, None])
882
+ do = tl.load(do_ptrs, mask=mask_m[:, None])
883
+ delta = tl.load(d_ptrs, mask=mask_m)
884
+ l = tl.load(l_ptrs, mask=mask_m)
885
+ log_lambda_out = tl.load(log_lambda_out_ptrs, mask=mask_m)
886
+
887
+ # initialize dq
888
+ dq = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
889
+ dlog_lambda_out = tl.zeros([BLOCK_M], dtype=tl.float32)
890
+
891
+ # loop over k, v and update accumulator
892
+ # see note "Loop-Bound-For-N"
893
+ if CAUSAL:
894
+ hi = tl.minimum(N, P_SEQ + (start_m + 1) * BLOCK_M)
895
+ if LARGER_M:
896
+ hi = tl.maximum(0, hi)
897
+ else:
898
+ hi = N
899
+
900
+ offs_n_base = tl.arange(0, BLOCK_N)
901
+ offs_n_init = offs_n_base
902
+ if HAS_SEQ_START:
903
+ SEQ_START += off_z
904
+ seq_start = tl.load(SEQ_START)
905
+ lo = tl.minimum(seq_start, hi)
906
+ lo = (lo // BLOCK_N) * BLOCK_N
907
+ offs_n_init += lo
908
+ else:
909
+ lo = 0
910
+ k_ptrs = K + (offs_n_init[:, None] * stride_kn + offs_k[None, :] * stride_kk) # (BLOCK_N, BLOCK_DMODEL)
911
+ v_ptrs = V + (offs_n_init[:, None] * stride_vn + offs_k[None, :] * stride_vk) # (BLOCK_N, BLOCK_DMODEL)
912
+ log_lambda_in_ptrs = LOG_LAMBDA + (offs_n_init * stride_log_lambda_n)
913
+
914
+ # loop over a row
915
+ for start_n in range(lo, hi, BLOCK_N):
916
+ offs_n = start_n + offs_n_base
917
+
918
+ # load k1, k2, v on chip
919
+ if DIVISIBLE_N:
920
+ v = tl.load(v_ptrs)
921
+ k = tl.load(k_ptrs)
922
+ log_lambda_in = tl.load(log_lambda_in_ptrs)
923
+ else:
924
+ mask_n = offs_n < N
925
+ v = tl.load(v_ptrs, mask=mask_n[:, None])
926
+ k = tl.load(k_ptrs, mask=mask_n[:, None])
927
+ log_lambda_in = tl.load(log_lambda_in_ptrs, mask=mask_n)
928
+
929
+
930
+ # recompute p = softmax(qk * sm_scale, dim=-1)
931
+ if not DIVISIBLE_N:
932
+ valid_mask = mask_n[None, :] # & mask_m[:, None]
933
+ if CAUSAL:
934
+ causal_mask = (P_SEQ + offs_m[:, None]) >= (offs_n[None, :]) # (BLOCK_M, BLOCK_N)
935
+ # s = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
936
+ s = tl.dot(q, tl.trans(k), input_precision="ieee") * qk_scale
937
+ decay_bias = log_lambda_out[:, None] - log_lambda_in[None, :]
938
+ s += decay_bias * log2e
939
+
940
+ # NOTE: since softmax in backward is pointwise, the normalizer has been saved in fwd)
941
+ # So masking on s is not needed.
942
+ # if CAUSAL:
943
+ # s = tl.where(causal_mask & valid_mask, s, float("-inf"))
944
+ # else:
945
+ # s = tl.where(valid_mask, s, float("-inf"))
946
+ p = tl.math.exp2(s - l[:, None] * log2e) # (BLOCK_M, BLOCK_N)
947
+
948
+ # compute dp = dot(v, do)
949
+ # dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
950
+ dp = tl.dot(do.to(input_dtype), tl.trans(v), input_precision="ieee")
951
+
952
+
953
+ # no need to mask dp
954
+ # if CAUSAL:
955
+ # dp = tl.where(causal_mask & valid_mask, dp, 0.0)
956
+ # else:
957
+ # dp = tl.where(valid_mask, dp, 0.0)
958
+
959
+ # compute ds = p * (dp - delta[:, None])
960
+ # move scale out to dq at last
961
+ ds = p * (dp - delta[:, None]) # (BLOCK_M, BLOCK_N)
962
+
963
+ # mask ds to ensure no small values
964
+ if not DIVISIBLE_N:
965
+ ds = tl.where(valid_mask, ds, 0.0)
966
+ if CAUSAL:
967
+ ds = tl.where(causal_mask, ds, 0.0)
968
+ if HAS_SEQ_START:
969
+ ds = tl.where(offs_n[None, :] >= seq_start, ds, 0.0)
970
+
971
+ dq += tl.dot(ds.to(input_dtype), k, input_precision="ieee")
972
+ dlog_lambda_out += tl.sum(ds, axis=1)
973
+
974
+ # increment pointers
975
+ k_ptrs += BLOCK_N * stride_kn
976
+ v_ptrs += BLOCK_N * stride_vn
977
+ log_lambda_in_ptrs += BLOCK_N * stride_log_lambda_n
978
+
979
+ dq *= sm_scale
980
+ if DIVISIBLE_M:
981
+ tmp = tl.load(dlog_lambda_out_ptrs)
982
+ else:
983
+ tmp = tl.load(dlog_lambda_out_ptrs, mask=mask_m)
984
+ dlog_lambda_out += tmp
985
+ if DIVISIBLE_M:
986
+ tl.store(dq_ptrs, dq.to(input_dtype))
987
+ tl.store(dlog_lambda_out_ptrs, dlog_lambda_out)
988
+ else:
989
+ tl.store(dq_ptrs, dq.to(input_dtype), mask=mask_m[:, None])
990
+ tl.store(dlog_lambda_out_ptrs, dlog_lambda_out, mask=mask_m)
991
+
992
+
993
+
994
+ @pytest.mark.parametrize("Z, H, M, N, HEAD_DIM", [(4, 2, 1020, 2098, 64), (4, 2, 1024, 2048, 64)])
995
+ @pytest.mark.parametrize("causal", [True])
996
+ def test_op(Z, H, M, N, HEAD_DIM, causal, dtype=torch.bfloat16):
997
+ torch.manual_seed(24)
998
+ q = (torch.empty((Z, H, M, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
999
+ k = (torch.empty((Z, H, N, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
1000
+ v = (torch.empty((Z, H, N, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
1001
+ fgate_logit = torch.empty((Z, H, N), dtype=torch.float32, device="cuda").uniform_(5, 10)
1002
+ log_fgate = torch.nn.functional.logsigmoid(fgate_logit).requires_grad_()
1003
+ seq_start = torch.randint(low=0, high=N, size=(Z,), dtype=torch.long, device="cuda")
1004
+ # seq_start = torch.randint(low=0, high=10, size=(Z,), dtype=torch.long, device="cuda")
1005
+ # seq_start = torch.full(fill_value=0, size=(Z,), dtype=torch.long, device="cuda")
1006
+ sm_scale = 0.5
1007
+ dout = torch.randn_like(q)
1008
+ # reference implementation
1009
+ P_SEQ = N - M
1010
+ mask = torch.tril(torch.ones((M, N), device="cuda"), diagonal=P_SEQ)
1011
+ p = torch.matmul(q, k.transpose(2, 3)) * sm_scale
1012
+ p = p.float()
1013
+
1014
+ log_lambda = torch.cumsum(log_fgate, dim=-1)
1015
+ decay_bias = log_lambda[..., -M:, None] - log_lambda[..., None, :]
1016
+ p = p + decay_bias
1017
+ if causal:
1018
+ p[:, :, mask == 0] = float("-inf")
1019
+
1020
+ attention_mask = torch.arange(N, device="cuda") < seq_start[:, None, None, None]
1021
+ p = torch.where(attention_mask, float("-inf"), p)
1022
+ p = torch.softmax(p.float(), dim=-1).to(dtype)
1023
+ p = p.clone()
1024
+ p[torch.isnan(p)] = 0.0
1025
+ # p = torch.exp(p)
1026
+ ref_out = torch.matmul(p, v)
1027
+ ref_out.backward(dout)
1028
+ ref_dv, v.grad = v.grad.clone(), None
1029
+ ref_dk, k.grad = k.grad.clone(), None
1030
+ ref_dq, q.grad = q.grad.clone(), None
1031
+ ref_dlog_fgate, log_fgate.grad = log_fgate.grad.clone(), None
1032
+ # triton implementation
1033
+ tri_out = forgetting_attention(q, k, v, log_fgate, head_first=True, seq_start=seq_start, sm_scale=sm_scale)
1034
+ tri_out = tri_out.to(dtype)
1035
+
1036
+ tri_out.backward(dout)
1037
+ tri_dv, v.grad = v.grad.clone(), None
1038
+ tri_dk, k.grad = k.grad.clone(), None
1039
+ tri_dq, q.grad = q.grad.clone(), None
1040
+ tri_dlog_fgate, log_fgate.grad = log_fgate.grad.clone(), None
1041
+ # compare
1042
+ # assert torch.allclose(tri_log_normalizer[~torch.isnan(tri_log_normalizer)], ref_log_normalizer[~torch.isnan(ref_log_normalizer)], atol=1e-2, rtol=0)
1043
+ assert torch.allclose(ref_out, tri_out, atol=1e-2, rtol=0), (ref_out - tri_out).abs().max()
1044
+ rtol = 0
1045
+ # Relative tolerance workaround for known hardware limitation of MI200 GPU.
1046
+ # For details see https://pytorch.org/docs/stable/notes/numerical_accuracy.html#reduced-precision-fp16-and-bf16-gemms-and-convolutions-on-amd-instinct-mi200-devices
1047
+ # if torch.version.hip is not None and triton.runtime.driver.active.get_current_target().arch == "gfx90a":
1048
+ # rtol = 1e-2
1049
+ assert torch.allclose(ref_dv, tri_dv, atol=1e-2, rtol=rtol), (ref_dv - tri_dv).abs().max()
1050
+ assert torch.allclose(ref_dk, tri_dk, atol=1e-2, rtol=rtol), (ref_dk - tri_dk).abs().max()
1051
+ assert torch.allclose(ref_dq, tri_dq, atol=1e-2, rtol=rtol), (ref_dq - tri_dq).abs().max()
1052
+ assert torch.allclose(ref_dlog_fgate, tri_dlog_fgate, atol=1e-2, rtol=rtol), (ref_dlog_fgate - tri_dlog_fgate).abs().max()
1053
+
1054
+ try:
1055
+ from flash_attn.flash_attn_interface import \
1056
+ flash_attn_qkvpacked_func as flash_attn_func
1057
+ HAS_FLASH = True
1058
+ except BaseException:
1059
+ HAS_FLASH = False
1060
+
1061
+ TORCH_HAS_FP8 = hasattr(torch, 'float8_e5m2')
1062
+ BATCH, N_HEADS, HEAD_DIM = 4, 32, 128
1063
+ # vary seq length for fixed head and batch=4
1064
+ configs = []
1065
+ for mode in ["fwd", "bwd"]:
1066
+ # for mode in ["bwd"]:
1067
+ # for causal in [True, False]:
1068
+ for causal in [True]:
1069
+ if mode == "bwd" and not causal:
1070
+ continue
1071
+ configs.append(
1072
+ triton.testing.Benchmark(
1073
+ x_names=["N_CTX"],
1074
+ # x_vals=[2**i for i in range(10, 15)],
1075
+ x_vals=[2**i for i in range(14, 15)],
1076
+ line_arg="provider",
1077
+ # line_vals=["triton-fp16", "flag"] + (["flash"] if HAS_FLASH else []),
1078
+ # line_names=["Triton [FP16]", "Flag"] + (["Flash-2"] if HAS_FLASH else []),
1079
+ line_vals=["flag"] + (["flash"] if HAS_FLASH else []),
1080
+ line_names=["Flag"] + (["Flash-2"] if HAS_FLASH else []),
1081
+ styles=[("red", "-"), ("blue", "-"), ("green", "-")],
1082
+ ylabel="ms",
1083
+ plot_name=f"fused-attention-batch{BATCH}-head{N_HEADS}-d{HEAD_DIM}-{mode}-causal={causal}",
1084
+ args={
1085
+ "H": N_HEADS,
1086
+ "BATCH": BATCH,
1087
+ "HEAD_DIM": HEAD_DIM,
1088
+ "mode": mode,
1089
+ "causal": causal,
1090
+ },
1091
+ ))
1092
+
1093
+
1094
+ @triton.testing.perf_report(configs)
1095
+ def bench_flash_attention(BATCH, H, N_CTX, HEAD_DIM, causal, mode, provider, device="cuda"):
1096
+ assert mode in ["fwd", "bwd"]
1097
+ warmup = 25
1098
+ rep = 100
1099
+ dtype = torch.bfloat16
1100
+ if "flag" in provider:
1101
+ q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1102
+ k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1103
+ v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1104
+ fgate_logit = torch.empty((BATCH, H, N_CTX), dtype=torch.float32, device="cuda").uniform_(5, 10)
1105
+ log_fgate = torch.nn.functional.logsigmoid(fgate_logit).requires_grad_()
1106
+ # if mode == "fwd" and "fp8" in provider:
1107
+ # q = q.to(torch.float8_e5m2)
1108
+ # k = k.to(torch.float8_e5m2)
1109
+ # v = v.permute(0, 1, 3, 2).contiguous()
1110
+ # v = v.permute(0, 1, 3, 2)
1111
+ # v = v.to(torch.float8_e5m2)
1112
+ sm_scale = 1.3
1113
+ fn = lambda: forgetting_attention(q, k, v, log_fgate, head_first=True, sm_scale=sm_scale)
1114
+ if mode == "bwd":
1115
+ o = fn()
1116
+ do = torch.randn_like(o)
1117
+ fn = lambda: o.backward(do, retain_graph=True)
1118
+ ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
1119
+ if provider == "flash":
1120
+ qkv = torch.randn((BATCH, N_CTX, 3, H, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1121
+ fn = lambda: flash_attn_func(qkv, causal=causal)
1122
+ if mode == "bwd":
1123
+ o = fn()
1124
+ do = torch.randn_like(o)
1125
+ fn = lambda: o.backward(do, retain_graph=True)
1126
+ ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
1127
+ flops_per_matmul = 2.0 * BATCH * H * N_CTX * N_CTX * HEAD_DIM
1128
+ total_flops = 2 * flops_per_matmul
1129
+ if causal:
1130
+ total_flops *= 0.5
1131
+ if mode == "bwd":
1132
+ total_flops *= 2.5 # 2.0(bwd) + 0.5(recompute)
1133
+ return total_flops / ms * 1e-9
1134
+
1135
+
1136
+ if __name__ == "__main__":
1137
+ # only works on post-Ampere GPUs right now
1138
+ bench_flash_attention.run(save_path=".", print_data=True)
ops/.ipynb_checkpoints/forgetting_attention_std-checkpoint.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Forgetting Attention - 标准 Softmax 版本
3
+ 在 forgetting_attention.py 最后添加这个函数
4
+ """
5
+
6
+ import math
7
+ import torch
8
+ import torch.nn.functional as F
9
+ from einops import rearrange
10
+ from typing import Optional
11
+
12
+
13
+ def forgetting_attention_std(
14
+ q: torch.Tensor,
15
+ k: torch.Tensor,
16
+ v: torch.Tensor,
17
+ log_fgate: torch.Tensor,
18
+ *,
19
+ head_first: bool = False,
20
+ seq_start: Optional[torch.Tensor] = None,
21
+ sm_scale: Optional[float] = None,
22
+ ) -> torch.Tensor:
23
+ """标准 Softmax 版本的 Forgetting Attention"""
24
+
25
+ if not head_first:
26
+ q = rearrange(q, "b t h d -> b h t d")
27
+ k = rearrange(k, "b t h d -> b h t d")
28
+ v = rearrange(v, "b t h d -> b h t d")
29
+ log_fgate = rearrange(log_fgate, "b t h -> b h t")
30
+
31
+ B, H, T_q, D = q.shape
32
+ T_k = k.shape[2]
33
+
34
+ if sm_scale is None:
35
+ sm_scale = 1.0 / math.sqrt(D)
36
+
37
+ # 计算 QK 分数
38
+ scores = torch.matmul(q.float(), k.float().transpose(-2, -1)) * sm_scale
39
+
40
+ # 处理 seq_start
41
+ log_fgate_masked = log_fgate.float()
42
+ if seq_start is not None:
43
+ log_fgate_masked = log_fgate_masked.clone()
44
+ mask_idx = torch.arange(T_k, device=q.device)[None, None, :] < seq_start[:, None, None]
45
+ log_fgate_masked[mask_idx] = 0.0
46
+
47
+ # 计算累积衰减
48
+ log_lambda = torch.cumsum(log_fgate_masked, dim=-1)
49
+ decay_bias = log_lambda[:, :, :T_q, None] - log_lambda[:, :, None, :]
50
+ scores = scores + decay_bias
51
+
52
+ # Causal mask
53
+ P_SEQ = T_k - T_q
54
+ causal_mask = torch.triu(torch.ones((T_q, T_k), dtype=torch.bool, device=q.device), diagonal=P_SEQ + 1)
55
+ scores = scores.masked_fill(causal_mask[None, None, :, :], float('-inf'))
56
+
57
+ # seq_start mask
58
+ if seq_start is not None:
59
+ seq_mask = torch.arange(T_k, device=q.device)[None, None, None, :] < seq_start[None, :, None, None]
60
+ scores = scores.masked_fill(seq_mask, float('-inf'))
61
+
62
+ # Softmax
63
+ attn = F.softmax(scores, dim=-1)
64
+ attn = torch.nan_to_num(attn, 0.0)
65
+
66
+ # 计算输出
67
+ out = torch.matmul(attn.to(v.dtype), v)
68
+
69
+ if not head_first:
70
+ out = rearrange(out, "b h t d -> b t h d")
71
+
72
+ return out
ops/__init__.py ADDED
File without changes
ops/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (169 Bytes). View file
 
ops/__pycache__/forgetting_attention.cpython-310.pyc ADDED
Binary file (25.1 kB). View file
 
ops/__pycache__/forgetting_attention_std.cpython-310.pyc ADDED
Binary file (1.84 kB). View file
 
ops/forgetting_attention.py ADDED
@@ -0,0 +1,1138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Implementation of Forgetting Attention.
3
+
4
+ Our code is adapted from https://github.com/FlagOpen/FlagAttention/blob/ee91638dec6da8c00c4113d179f469e0ffcd5852/src/flag_attn/flash.py. The code is modified to implement Forgetting Attention.
5
+
6
+ The original license info from FlagAttention:
7
+
8
+ Copyright 2023 BAAI
9
+
10
+ Licensed under the Apache License, Version 2.0 (the "License");
11
+ you may not use this file except in compliance with the License.
12
+ You may obtain a copy of the License at
13
+
14
+ http://www.apache.org/licenses/LICENSE-2.0
15
+
16
+ Unless required by applicable law or agreed to in writing, software
17
+ distributed under the License is distributed on an "AS IS" BASIS,
18
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ See the License for the specific language governing permissions and
20
+ limitations under the License.
21
+ """
22
+ import pytest
23
+ import math
24
+ import torch
25
+ import triton
26
+ import triton.language as tl
27
+ from einops import rearrange
28
+ from typing import Optional
29
+
30
+
31
+ __all__ = ["forgetting_attention"]
32
+
33
+
34
+ # File flash.py
35
+ def maybe_contiguous(x):
36
+ # only when the inner most dimension is contiguous can LDGSTS be used
37
+ # so inner-dimension contiguity is enforced.
38
+ return x.contiguous() if x.stride(-1) != 1 else x
39
+
40
+ def rounded_multiple(a, b):
41
+ return (a + b - 1) // b * b
42
+
43
+ # --------------------------- public API ---------------------------
44
+ class ForgettingAttention(torch.autograd.Function):
45
+ @staticmethod
46
+ def forward(ctx, q, k, v, log_fgate, seq_start, causal, sm_scale, return_log_normalizer):
47
+ assert causal, "Only causal attention is supported"
48
+ Dq, Dk, Dv = q.shape[-1], k.shape[-1], v.shape[-1]
49
+ assert Dq == Dk == Dv, "feature size of q, k, v should be equal"
50
+ assert Dk in {16, 32, 64, 128}, "We only support head dims in {16, 32, 64, 128}"
51
+
52
+ B, H, M, D = q.shape
53
+ if seq_start is not None:
54
+ has_seq_start = True
55
+ assert seq_start.shape == (B,)
56
+ else:
57
+ has_seq_start = False
58
+ seq_start = torch.zeros((B,), device=q.device, dtype=torch.long)
59
+ N = k.shape[2]
60
+ assert log_fgate.shape == (B, H, N)
61
+ log_fgate = log_fgate.float()
62
+ if has_seq_start:
63
+ log_fgate = log_fgate.clone()
64
+ # We absolutely don't want masked value to affect result. If we
65
+ # don't do this then it could via affecting numerical precision of
66
+ # cumsum
67
+ mask_index = (torch.arange(N, device=q.device)[None, None, :] < seq_start[:, None, None])
68
+ mask_index = torch.broadcast_to(mask_index, log_fgate.size())
69
+ log_fgate[mask_index] = 0.0
70
+
71
+ log_lambda = torch.cumsum(log_fgate, dim=-1, dtype=log_fgate.dtype).float()
72
+
73
+ Hk, Hv = k.shape[1], v.shape[1]
74
+ assert Hk == Hv, "num of heads in k and v should be equal"
75
+ assert H == Hk, "groupped query attention has not been tested. You can uncomment this if you know what you are doing."
76
+ assert H % Hk == 0, "number of heads in q must be a multiple of that in k & v"
77
+ num_groups = H // Hk
78
+
79
+ P_SEQ = N - M
80
+ larger_m = M > N
81
+ assert (not larger_m), "The key/value tensors must be longer than the query tensor"
82
+
83
+ if sm_scale is None:
84
+ sm_scale = 1. / math.sqrt(D)
85
+
86
+ # contiguity
87
+ q, k, v = maybe_contiguous(q), maybe_contiguous(k), maybe_contiguous(v)
88
+
89
+ # to work around https://github.com/openai/triton/issues/2441
90
+ device = torch.cuda.device_of(q)
91
+
92
+ with torch.cuda.device(device):
93
+
94
+ config = get_fwd_config(B, H, M, N, D, causal)
95
+ BLOCK_M, BLOCK_N, num_stages, num_warps = config
96
+
97
+ divisible_m = M % BLOCK_M == 0
98
+ divisible_n = N % BLOCK_N == 0
99
+ # consider using 3d grid to avoid div & rem
100
+ grid = (triton.cdiv(M, BLOCK_M), H, B)
101
+ o = torch.empty_like(q)
102
+ L = torch.empty((B, H, M), device=q.device, dtype=torch.float32)
103
+ _fwd_kernel[grid](
104
+ q, k, v, log_lambda, seq_start, sm_scale,
105
+ L, o,
106
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
107
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
108
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3),
109
+ log_lambda.stride(0), log_lambda.stride(1), log_lambda.stride(2),
110
+ o.stride(0), o.stride(1), o.stride(2), o.stride(3),
111
+ B, H, M, N, P_SEQ, num_groups,
112
+ BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N, BLOCK_DMODEL=D,
113
+ IS_CAUSAL=causal, LARGER_M=larger_m, HAS_SEQ_START=has_seq_start,
114
+ DIVISIBLE_M=divisible_m, DIVISIBLE_N=divisible_n,
115
+ num_warps=num_warps, num_stages=num_stages,
116
+ )
117
+
118
+ # autograd context maintenance
119
+ ctx.save_for_backward(q, k, v, o, L, log_lambda, seq_start)
120
+ ctx.sm_scale = sm_scale
121
+ ctx.causal = causal
122
+ ctx.has_seq_start = has_seq_start
123
+
124
+ has_extra_return = return_log_normalizer
125
+ if has_extra_return:
126
+ outs = (
127
+ o,
128
+ L if return_log_normalizer else None,
129
+ )
130
+ return outs
131
+ return o
132
+
133
+ @staticmethod
134
+ def backward(ctx, do, *ignored):
135
+ q, k, v, o, L, log_lambda, seq_start = ctx.saved_tensors
136
+ sm_scale = ctx.sm_scale
137
+ causal = ctx.causal
138
+ has_seq_start = ctx.has_seq_start
139
+
140
+ B, H, M, D = q.shape
141
+ N = k.shape[2]
142
+ Hk = k.shape[1]
143
+ num_groups = H // Hk
144
+ P_SEQ = N - M
145
+ larger_m = M > N
146
+
147
+ if sm_scale is None:
148
+ sm_scale = 1. / math.sqrt(D)
149
+
150
+ # to work around https://github.com/openai/triton/issues/2441
151
+ device = torch.cuda.device_of(q)
152
+ with torch.cuda.device(device):
153
+ config = get_bwd_config(B, H, M, N, D, causal)
154
+ BLOCK_M, BLOCK_N, num_stages, num_warps = config
155
+
156
+ divisible_m = M % BLOCK_M == 0
157
+ divisible_n = N % BLOCK_N == 0
158
+
159
+ delta = torch.empty_like(L)
160
+ grid = (triton.cdiv(M, BLOCK_M), H, B)
161
+ _bwd_preprocess[grid](
162
+ o, do,
163
+ delta,
164
+ o.stride(0), o.stride(1), o.stride(2), o.stride(3),
165
+ do.stride(0), do.stride(1), do.stride(2), do.stride(3),
166
+ delta.stride(0), delta.stride(1), delta.stride(2),
167
+ M,
168
+ BLOCK_M=BLOCK_M, D_HEAD=D,
169
+ DIVISIBLE_M=divisible_m,
170
+ )
171
+
172
+ # NOTE that dk & dv always have the same number of heads as q, instead of q.
173
+ BLOCK_M, BLOCK_N, num_stages, num_warps = get_bwd_kv_config(B, H, M, N, D, causal)
174
+ divisible_m = M % BLOCK_M == 0
175
+ divisible_n = N % BLOCK_N == 0
176
+
177
+ dk = torch.empty((B, H, N, D), dtype=k.dtype, device=q.device)
178
+ dv = torch.empty((B, H, N, D), dtype=v.dtype, device=q.device)
179
+ dlog_lambda = torch.empty((B, H, N), dtype=log_lambda.dtype, device=q.device)
180
+ grid = (triton.cdiv(N, BLOCK_N), H, B)
181
+ _bwd_kv_kernel[grid](
182
+ q, k, v, log_lambda, seq_start, sm_scale, do,
183
+ dk, dv, dlog_lambda,
184
+ L, delta,
185
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
186
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
187
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3),
188
+ log_lambda.stride(0), log_lambda.stride(1), log_lambda.stride(2),
189
+ do.stride(0), do.stride(1), do.stride(2), do.stride(3),
190
+ dk.stride(0), dk.stride(1), dk.stride(2), dk.stride(3),
191
+ dv.stride(0), dv.stride(1), dv.stride(2), dv.stride(3),
192
+ dlog_lambda.stride(0), dlog_lambda.stride(1), dlog_lambda.stride(2),
193
+ B, H, M, N, P_SEQ,
194
+ num_groups,
195
+ BLOCK_M=BLOCK_M, BLOCK_DMODEL=D, BLOCK_N=BLOCK_N, CAUSAL=causal,
196
+ DIVISIBLE_M=divisible_m, DIVISIBLE_N=divisible_n, HAS_SEQ_START=has_seq_start,
197
+ num_stages=num_stages, num_warps=num_warps,
198
+ )
199
+
200
+ BLOCK_M, BLOCK_N, num_stages, num_warps = get_bwd_q_config(B, H, M, N, D, causal)
201
+ divisible_m = M % BLOCK_M == 0
202
+ divisible_n = N % BLOCK_N == 0
203
+ dq = torch.zeros_like(q)
204
+ grid = (triton.cdiv(M, BLOCK_M), H, B)
205
+ _bwd_q_kernel[grid](
206
+ q, k, v, log_lambda, seq_start, sm_scale, do,
207
+ dq, dlog_lambda,
208
+ L, delta,
209
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
210
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
211
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3),
212
+ log_lambda.stride(0), log_lambda.stride(1), log_lambda.stride(2),
213
+ do.stride(0), do.stride(1), do.stride(2), do.stride(3),
214
+ dq.stride(0), dq.stride(1), dq.stride(2), dq.stride(3),
215
+ dlog_lambda.stride(0), dlog_lambda.stride(1), dlog_lambda.stride(2),
216
+ B, H, M, N, P_SEQ,
217
+ num_groups,
218
+ BLOCK_M=BLOCK_M, BLOCK_DMODEL=D, BLOCK_N=BLOCK_N,
219
+ CAUSAL=causal, LARGER_M=larger_m, HAS_SEQ_START=has_seq_start,
220
+ DIVISIBLE_M=divisible_m, DIVISIBLE_N=divisible_n,
221
+ num_stages=num_stages, num_warps = num_warps,
222
+ )
223
+ dk = dk.reshape((B, Hk, num_groups, N, D)).sum(2)
224
+ dv = dv.reshape((B, Hk, num_groups, N, D)).sum(2)
225
+ dcumsum = torch.cumsum(dlog_lambda, dim=-1, dtype=log_lambda.dtype)
226
+ dlog_fgate = dlog_lambda + dcumsum[..., -1:] - dcumsum
227
+ dlog_fgate = dlog_fgate.float()
228
+ return dq, dk, dv, dlog_fgate, None, None, None, None, None, None, None
229
+
230
+
231
+ def forgetting_attention(
232
+ q: torch.Tensor,
233
+ k: torch.Tensor,
234
+ v: torch.Tensor,
235
+ log_fgate: torch.Tensor,
236
+ *,
237
+ head_first: bool = False,
238
+ seq_start: Optional[torch.Tensor] = None,
239
+ sm_scale: Optional[float] = None,
240
+ ):
241
+ """
242
+ A FlashAttention-based implementation of Forgetting Attention.
243
+
244
+ Note:
245
+ - We recommand bfloat16/float16 for q, k, v and float32 for log_fgate. float32 for
246
+ q, k, v is also supported, but the kernel will not use tensor cores if q, k, v are
247
+ in float32 (which would be slow).
248
+ - We only support seqlen_q <= seqlen_k
249
+ - We only support causal attention
250
+ - Head dimension must be in one of {16, 32, 64, 128}
251
+
252
+ Arguments:
253
+ - q: (batch_size, seqlen_q, num_heads, head_dim) unless head_first=True.
254
+ - k: (batch_size, seqlen_k, num_heads, head_dim) unless head_first=True.
255
+ - v: (batch_size, seqlen_k, num_heads, head_dim) unless head_first=True.
256
+ - log_fgate: (batch_size, seqlen_k, num_heads) unless head_first=True.
257
+ This should be the **log** of the forget gates. This is typically the
258
+ output of torch.nn.functional.logsigmoid.
259
+ - head_first: if True, the order the num_heads and seqlen_* axis of the all
260
+ FloatTensor inputs and outputs should be (num_heads, seq_len_*) instead of
261
+ (seq_len_*, num_heads)
262
+ - seq_start: If not None, should be LongTensor with shape (batch_size,)
263
+ and range in [0, seq_len_k). For each batch index batch_id, no attention
264
+ will be allocated to tokens before the token index seq_start[batch_id].
265
+ This is useful for left-padded inputs.
266
+ - sm_scale: The scaling of attention scores before applying softmax. If
267
+ None, it defaults to (1.0 / math.sqrt(head_dim))
268
+
269
+ Returns:
270
+ out (torch.Tensor): (batch_size, seqlen_q, num_heads, head_dim) unless head_first=True.
271
+ """
272
+ if not head_first:
273
+ q, k, v = [rearrange(item, "b t h d -> b h t d") for item in (q, k, v)]
274
+ log_fgate = rearrange(log_fgate, "b t h -> b h t")
275
+ out = ForgettingAttention.apply(q, k, v, log_fgate, seq_start, True, sm_scale, False)
276
+ if not head_first:
277
+ out = rearrange(out, "b h t d -> b t h d")
278
+ return out
279
+
280
+
281
+ # --------------------------- Forward ---------------------------
282
+ # NOTE: this function can be overwritten at runtime to use your custom config
283
+ def get_fwd_config(B, H, M, N, D, causal):
284
+ assert causal
285
+ if torch.cuda.get_device_capability() == (8, 0):
286
+ if D <= 64:
287
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 32, 3, 4
288
+ else:
289
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 4, 4
290
+ elif torch.cuda.get_device_capability() == (9, 0):
291
+ # H100
292
+ if D <= 64:
293
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 8
294
+ else:
295
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 128, 2, 8
296
+ elif torch.cuda.get_device_capability() == (8, 6):
297
+ if not causal:
298
+ if D <= 64:
299
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 4
300
+ else:
301
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 2, 4
302
+ else: # causal
303
+ if D <= 64:
304
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 3, 4
305
+ else:
306
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 2, 4
307
+ elif torch.cuda.get_device_capability() == (8, 9):
308
+ # L40S
309
+ if D <= 64:
310
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 2, 4
311
+ else:
312
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 2, 4
313
+ else:
314
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
315
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
316
+
317
+
318
+ @triton.jit
319
+ def _fwd_kernel(
320
+ Q, K, V, LOG_LAMBDA, SEQ_START, sm_scale,
321
+ L, O,
322
+ stride_qz, stride_qh, stride_qm, stride_qk,
323
+ stride_kz, stride_kh, stride_kn, stride_kk,
324
+ stride_vz, stride_vh, stride_vn, stride_vk,
325
+ stride_log_lambda_z, stride_log_lambda_h, stride_log_lambda_n,
326
+ stride_oz, stride_oh, stride_om, stride_ok,
327
+ Z, H, M, N, P_SEQ,
328
+ num_groups,
329
+ BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr,
330
+ IS_CAUSAL: tl.constexpr, LARGER_M: tl.constexpr, HAS_SEQ_START: tl.constexpr,
331
+ DIVISIBLE_M: tl.constexpr, DIVISIBLE_N: tl.constexpr,
332
+ ):
333
+ input_dtype = Q.dtype.element_ty
334
+ # -- grid id --
335
+ start_m = tl.program_id(0)
336
+ off_h = tl.program_id(1)
337
+ off_z = tl.program_id(2)
338
+
339
+ # scale sm_scale by log_2(e) and use
340
+ # 2^x instead of exp in the loop because CSE and LICM
341
+ # don't work as expected with `exp` in the loop
342
+ log2e: tl.constexpr = 1.4426950408889634
343
+ loge2: tl.constexpr = 0.6931471805599453
344
+ qk_scale = sm_scale * log2e
345
+
346
+ # offset pointers for (batch, head)
347
+ off_hk = off_h // num_groups
348
+ Q += off_z * stride_qz + off_h * stride_qh
349
+ K += off_z * stride_kz + off_hk * stride_kh
350
+ V += off_z * stride_vz + off_hk * stride_vh
351
+ LOG_LAMBDA += off_z * stride_log_lambda_z + off_h * stride_log_lambda_h
352
+ O += off_z * stride_oz + off_h * stride_oh
353
+ L += (off_z * H + off_h) * M # l's shape is (B, H, M)
354
+
355
+ offs_m_base = tl.arange(0, BLOCK_M)
356
+ offs_m = start_m * BLOCK_M + offs_m_base
357
+ offs_n_base = tl.arange(0, BLOCK_N)
358
+ offs_k = tl.arange(0, BLOCK_DMODEL)
359
+
360
+
361
+ # initialize pointers to value-like data
362
+ q_ptrs = Q + (offs_m[:, None] * stride_qm + offs_k[None, :] * stride_qk) # (BLOCK_M, BLOCK_DMODEL)
363
+ log_lambda_out_ptrs = LOG_LAMBDA + (P_SEQ + offs_m) * stride_log_lambda_n
364
+ o_ptrs = O + (offs_m[:, None] * stride_om + offs_k[None, :] * stride_ok) # (BLOCK_M, BLOCK_DMODEL)
365
+ l_ptrs = L + offs_m
366
+
367
+ # initialize pointer to m and l, fp32 for accumulators
368
+ m_i = tl.full([BLOCK_M], value=-float("inf"), dtype=tl.float32)
369
+ l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
370
+ acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
371
+
372
+ # load q
373
+ if DIVISIBLE_M:
374
+ q = tl.load(q_ptrs, cache_modifier=".cg")
375
+ log_lambda_out = tl.load(log_lambda_out_ptrs, cache_modifier=".cg")
376
+ else:
377
+ mask_m = offs_m < M
378
+ q = tl.load(q_ptrs, mask=mask_m[:, None], cache_modifier=".cg")
379
+ log_lambda_out = tl.load(log_lambda_out_ptrs, mask=mask_m, cache_modifier=".cg")
380
+
381
+ #Dot I trick: to place q in registers, it saves shared memory
382
+ # if BLOCK_DMODEL < 128:
383
+ # I = tl.where(offs_k[:, None] == offs_k,
384
+ # tl.full((BLOCK_DMODEL, BLOCK_DMODEL), 1.0, dtype=input_dtype),
385
+ # tl.full((BLOCK_DMODEL, BLOCK_DMODEL), 0.0, dtype=input_dtype))
386
+ # q = tl.dot(q, I, input_precision="ieee").to(input_dtype)
387
+ # else:
388
+ # I = tl.where(offs_m_base[:, None] == offs_m_base,
389
+ # tl.full((BLOCK_M, BLOCK_M), 1.0, dtype=input_dtype),
390
+ # tl.full((BLOCK_M, BLOCK_M), 0.0, dtype=input_dtype))
391
+ # q = tl.dot(I, q, input_precision="ieee").to(input_dtype)
392
+
393
+ # NOTE: Loop-Bound-For-N
394
+ # The indices in m-dimension that this block may access is in `[start_m * BLOCK_M, (start_m + 1) * BLOCK_M)`.
395
+ # According to the rule of causal masking, then max index in n-dimension that this block may access
396
+ # is `P_SEQ + (start_m + 1) * BLOCK_M`.
397
+ # However, the upper bound of index in n-dimension should never exceed the sequence length of k/v(`P_SEQ + N_CTX`).
398
+ # `P_SEQ + (start_m + 1) * BLOCK_M` may be larger than `N`.
399
+ # At this case, there would be illegal memory access when loading k & v tiles
400
+ # if mask_n is not applied for loading(only when `DIVISIBLE_N`` is true).
401
+ # See also https://github.com/FlagOpen/FlagAttention/pull/8
402
+ if IS_CAUSAL:
403
+ hi = tl.minimum(N, P_SEQ + (start_m + 1) * BLOCK_M)
404
+ if LARGER_M:
405
+ hi = tl.maximum(0, hi)
406
+ else:
407
+ hi = N
408
+
409
+ offs_n_init = offs_n_base
410
+ if HAS_SEQ_START:
411
+ SEQ_START += off_z
412
+ seq_start = tl.load(SEQ_START)
413
+ lo = tl.minimum(seq_start, hi)
414
+ lo = (lo // BLOCK_N) * BLOCK_N
415
+ offs_n_init += lo
416
+ else:
417
+ lo = 0
418
+ seq_start = 0
419
+
420
+ # loop over k, v and update accumulators
421
+ k_ptrs = K + (offs_k[:, None] * stride_kk + offs_n_init[None, :] * stride_kn) # (BLOCK_DMODEL, BLOCK_N)
422
+ v_ptrs = V + (offs_n_init[:, None] * stride_vn + offs_k[None, :] * stride_vk) # (BLOCK_N, BLOCK_DMODEL)
423
+ log_lambda_in_ptrs = LOG_LAMBDA + (offs_n_init * stride_log_lambda_n) # (BLOCK_N, BLOCK_DMODEL)
424
+ for start_n in range(lo, hi, BLOCK_N):
425
+ start_n = tl.multiple_of(start_n, BLOCK_N)
426
+ offs_n = start_n + offs_n_base
427
+
428
+ # -- load k, v --
429
+ if DIVISIBLE_N:
430
+ k = tl.load(k_ptrs, cache_modifier=".cg")
431
+ v = tl.load(v_ptrs, cache_modifier=".cg")
432
+ log_lambda_in = tl.load(log_lambda_in_ptrs, cache_modifier=".cg")
433
+ else:
434
+ mask_n = offs_n < N
435
+ k = tl.load(k_ptrs, mask=mask_n[None, :], cache_modifier=".cg")
436
+ v = tl.load(v_ptrs, mask=mask_n[:, None], cache_modifier=".cg")
437
+ log_lambda_in = tl.load(log_lambda_in_ptrs, mask=mask_n, cache_modifier=".cg")
438
+
439
+ # -- compute qk ---
440
+ # s = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
441
+ s = tl.dot(q, k, input_precision="ieee") * qk_scale
442
+ decay_bias = log_lambda_out[:, None] - log_lambda_in[None, :]
443
+ s += decay_bias * log2e
444
+
445
+ if not DIVISIBLE_N:
446
+ s = tl.where(mask_n[None, :], s, float("-inf"))
447
+ if IS_CAUSAL:
448
+ causal_mask = (P_SEQ + offs_m[:, None]) >= offs_n[None, :]
449
+ s = tl.where(causal_mask, s, float("-inf"))
450
+ if HAS_SEQ_START:
451
+ s = tl.where(offs_n[None, :] >= seq_start, s, float("-inf"))
452
+
453
+
454
+ # -- compute scaling constant ---
455
+ m_i_new = tl.maximum(m_i, tl.max(s, 1))
456
+ alpha = tl.math.exp2((m_i - m_i_new))
457
+ p = tl.math.exp2(s - m_i_new[:, None])
458
+
459
+ # -- compute partial sumexpn before applying dropout
460
+ p_sum = tl.sum(p, 1)
461
+
462
+
463
+ # -- scale and update acc: acc *= alpha[:, None]--
464
+ acc *= alpha[:, None]
465
+ acc += tl.dot(p.to(input_dtype), v, input_precision="ieee")
466
+
467
+ # -- update m_i and l_i --
468
+ l_i = l_i * alpha + p_sum
469
+ m_i = m_i_new
470
+ # update pointers
471
+ k_ptrs += BLOCK_N * stride_kn
472
+ v_ptrs += BLOCK_N * stride_vn
473
+ log_lambda_in_ptrs += BLOCK_N * stride_log_lambda_n
474
+
475
+ # write back l & o
476
+ if IS_CAUSAL and (LARGER_M or HAS_SEQ_START):
477
+ is_empty_line = (offs_m + P_SEQ) < seq_start
478
+ acc = tl.where(is_empty_line[:, None], 0.0, acc * (1.0 / l_i[:, None]))
479
+ l = tl.where(is_empty_line, float("-inf"), m_i * loge2 + tl.log(l_i))
480
+ else:
481
+ acc = acc * (1.0 / l_i[:, None])
482
+ l = m_i * loge2 + tl.log(l_i) # log(normalizer)
483
+
484
+
485
+ if DIVISIBLE_M:
486
+ tl.store(l_ptrs, l, cache_modifier=".cg")
487
+ tl.store(o_ptrs, acc.to(input_dtype), cache_modifier=".cg")
488
+ else:
489
+ tl.store(l_ptrs, l, mask=mask_m, cache_modifier=".cg")
490
+ tl.store(o_ptrs, acc.to(input_dtype), mask=mask_m[:, None], cache_modifier=".cg")
491
+
492
+
493
+ # --------------------------- Backward ---------------------------
494
+ # NOTE: this function can be overwritten at runtime to use your custom config
495
+ def get_bwd_config(B, H, M, N, D, causal):
496
+ if torch.cuda.get_device_capability() == (9, 0):
497
+ if not causal:
498
+ BLOCK_M = 128 if D <= 64 else 64
499
+ BLOCK_N = 64
500
+ num_stages = 2
501
+ num_warps = 4
502
+ else:
503
+ BLOCK_M = 64
504
+ BLOCK_N = 64
505
+ num_stages = 3 if D <= 64 else 2
506
+ num_warps = 4
507
+ elif torch.cuda.get_device_capability() == (8, 0):
508
+ if not causal:
509
+ BLOCK_M = 128 if D <= 64 else 64
510
+ BLOCK_N = 64
511
+ num_stages = 2
512
+ num_warps = 4
513
+ else:
514
+ BLOCK_M = 64
515
+ BLOCK_N = 64
516
+ num_stages = 3 if D <= 64 else 2
517
+ num_warps = 4
518
+ elif torch.cuda.get_device_capability() == (8, 6): # tune for RTX-3090, device_capability(8, 6)
519
+ if not causal:
520
+ if D <= 64:
521
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
522
+ else:
523
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 8
524
+ else:
525
+ if D <= 64:
526
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
527
+ else:
528
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 2, 4
529
+ else:
530
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 1, 4
531
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
532
+
533
+ def get_bwd_kv_config(B, H, M, N, D, causal):
534
+ assert causal
535
+ if torch.cuda.get_device_capability() == (8, 0): # A100
536
+ if D <= 64:
537
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 4, 4
538
+ else:
539
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 128, 4, 8
540
+ elif torch.cuda.get_device_capability() == (8, 6): # tune for RTX-3090, device_capability(8, 6)
541
+ if D <= 64:
542
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
543
+ else:
544
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 2, 4
545
+ elif torch.cuda.get_device_capability() == (8, 9): # L40S
546
+ if D <= 64:
547
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 128, 4, 8
548
+ else:
549
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 128, 2, 8
550
+ elif torch.cuda.get_device_capability() == (9, 0): # H100
551
+ if D <= 64:
552
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 4
553
+ else:
554
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
555
+ else:
556
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
557
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
558
+
559
+ def get_bwd_q_config(B, H, M, N, D, causal):
560
+ assert causal
561
+ if torch.cuda.get_device_capability() == (8, 0): # A100
562
+ if D <= 64:
563
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 4
564
+ else:
565
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 4, 8
566
+ elif torch.cuda.get_device_capability() == (8, 6): # tune for RTX-3090, device_capability(8, 6)
567
+ if D <= 64:
568
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
569
+ else:
570
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 2, 4
571
+ elif torch.cuda.get_device_capability() == (8, 9): # L40S
572
+ if D <= 64:
573
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 4, 4
574
+ else:
575
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 3, 4
576
+ elif torch.cuda.get_device_capability() == (9, 0): # H100
577
+ if D <= 64:
578
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 128, 4, 8
579
+ else:
580
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 128, 2, 8
581
+ else:
582
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
583
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
584
+
585
+
586
+ @triton.jit
587
+ def _bwd_preprocess(
588
+ Out, DO,
589
+ Delta,
590
+ stride_oz, stride_oh, stride_om, stride_ok,
591
+ stride_doz, stride_doh, stride_dom, stride_dok,
592
+ stride_dz, stride_dh, stride_dm,
593
+ M,
594
+ BLOCK_M: tl.constexpr, D_HEAD: tl.constexpr,
595
+ DIVISIBLE_M: tl.constexpr,
596
+ ):
597
+ off_h = tl.program_id(1)
598
+ off_z = tl.program_id(2)
599
+ Out += off_z * stride_oz + off_h * stride_oh
600
+ DO += off_z * stride_doz + off_h * stride_doh
601
+ Delta += off_z * stride_dz + off_h * stride_dh
602
+
603
+ # compute (Out * Dout).sum() for vector interpretation
604
+ off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)
605
+ off_n = tl.arange(0, D_HEAD)
606
+
607
+ # load
608
+ o_ptrs = Out + off_m[:, None] * stride_om + off_n[None, :] * stride_ok
609
+ do_ptrs = DO + off_m[:, None] * stride_dom + off_n[None, :] * stride_dok
610
+
611
+ if DIVISIBLE_M:
612
+ o = tl.load(o_ptrs).to(tl.float32)
613
+ do = tl.load(do_ptrs).to(tl.float32)
614
+ else:
615
+ mask_m = off_m < M
616
+ o = tl.load(o_ptrs, mask=mask_m[:, None]).to(tl.float32)
617
+ do = tl.load(do_ptrs, mask=mask_m[:, None]).to(tl.float32)
618
+
619
+ # compute
620
+ delta = tl.sum(o * do, axis=1)
621
+
622
+ # write-back
623
+ d_ptrs = Delta + off_m * stride_dm
624
+ if DIVISIBLE_M:
625
+ tl.store(d_ptrs, delta)
626
+ else:
627
+ tl.store(d_ptrs, delta, mask=mask_m)
628
+
629
+
630
+ @triton.jit
631
+ def _bwd_kv_kernel(
632
+ Q, K, V, LOG_LAMBDA, SEQ_START, sm_scale, DO,
633
+ DK, DV, DLOG_LAMBDA,
634
+ L,
635
+ D,
636
+ stride_qz, stride_qh, stride_qm, stride_qk,
637
+ stride_kz, stride_kh, stride_kn, stride_kk,
638
+ stride_vz, stride_vh, stride_vn, stride_vk,
639
+ stride_log_lambda_z, stride_log_lambda_h, stride_log_lambda_n,
640
+ stride_doz, stride_doh, stride_dom, stride_dok,
641
+ stride_dkz, stride_dkh, stride_dkn, stride_dkk,
642
+ stride_dvz, stride_dvh, stride_dvn, stride_dvk,
643
+ stride_dlog_lambda_z, stride_dlog_lambda_h, stride_dlog_lambda_n,
644
+ Z, H, M, N, P_SEQ,
645
+ num_groups,
646
+ BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr,
647
+ CAUSAL: tl.constexpr,
648
+ DIVISIBLE_M: tl.constexpr, DIVISIBLE_N: tl.constexpr, HAS_SEQ_START: tl.constexpr,
649
+ ):
650
+ input_dtype = Q.dtype.element_ty
651
+ # -- grid id --
652
+ start_n = tl.program_id(0)
653
+ off_h = tl.program_id(1)
654
+ off_z = tl.program_id(2)
655
+ log2e: tl.constexpr = 1.4426950408889634
656
+ qk_scale = sm_scale * log2e
657
+
658
+ # offset pointers for (batch, head)
659
+ off_hk = off_h // num_groups
660
+ Q += off_z * stride_qz + off_h * stride_qh
661
+ K += off_z * stride_kz + off_hk * stride_kh
662
+ V += off_z * stride_vz + off_hk * stride_vh
663
+ LOG_LAMBDA += off_z * stride_log_lambda_z + off_h * stride_log_lambda_h
664
+ DO += off_z * stride_doz + off_h * stride_doh
665
+
666
+ # offset pointers for batch/head
667
+ DK += off_z * stride_dkz + off_h * stride_dkh
668
+ DV += off_z * stride_dvz + off_h * stride_dvh
669
+ DLOG_LAMBDA += off_z * stride_dlog_lambda_z + off_h * stride_dlog_lambda_h
670
+
671
+ # offset pointers for batch/head
672
+ D += (off_z * H + off_h) * M
673
+ L += (off_z * H + off_h) * M
674
+
675
+ if CAUSAL:
676
+ lo = tl.maximum(start_n * BLOCK_N - P_SEQ, 0)
677
+ lo = (lo // BLOCK_M) * BLOCK_M
678
+ else:
679
+ lo = 0
680
+
681
+ offs_m_init = lo + tl.arange(0, BLOCK_M)
682
+ offs_n = start_n * BLOCK_N + tl.arange(0, BLOCK_N)
683
+ offs_m_base = tl.arange(0, BLOCK_M)
684
+ offs_k = tl.arange(0, BLOCK_DMODEL)
685
+
686
+ # initialize pointers to value-like data
687
+ q_ptrs = Q + (offs_m_init[:, None] * stride_qm + offs_k[None, :] * stride_qk) # (BLOCK_M, BLOCK_DMODEL)
688
+ log_lambda_out_ptrs = LOG_LAMBDA + (P_SEQ + offs_m_init) * stride_log_lambda_n # (BLOCK_N, BLOCK_DMODEL)
689
+ k_ptrs = K + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk) # (BLOCK_N, BLOCK_DMODEL)
690
+ v_ptrs = V + (offs_n[:, None] * stride_vn + offs_k[None, :] * stride_vk) # (BLOCK_N, BLOCK_DMODEL)
691
+ log_lambda_in_ptrs = LOG_LAMBDA + (offs_n * stride_log_lambda_n) # (BLOCK_N, BLOCK_DMODEL)
692
+ do_ptrs = DO + (offs_m_init[:, None] * stride_dom + offs_k[None, :] * stride_dok) # (BLOCK_M, BLOCK_DMODEL)
693
+
694
+ dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_k[None, :] * stride_dvk) # (BLOCK_N, BLOCK_DMODEL)
695
+ dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_k[None, :] * stride_dkk) # (BLOCK_N, BLOCK_DMODEL)
696
+ dlog_lambda_in_ptrs = DLOG_LAMBDA + (offs_n * stride_dlog_lambda_n) # (BLOCK_N, BLOCK_DMODEL)
697
+
698
+ # k and v stay in SRAM throughout
699
+ if DIVISIBLE_N:
700
+ v = tl.load(v_ptrs)
701
+ k = tl.load(k_ptrs)
702
+ log_lambda_in = tl.load(log_lambda_in_ptrs)
703
+ else:
704
+ mask_n = offs_n < N
705
+ v = tl.load(v_ptrs, mask=mask_n[:, None])
706
+ k = tl.load(k_ptrs, mask=mask_n[:, None])
707
+ log_lambda_in = tl.load(log_lambda_in_ptrs, mask=mask_n)
708
+
709
+ # If the N block doesn't contain seq_start, no need to loop
710
+ if HAS_SEQ_START:
711
+ SEQ_START += off_z
712
+ seq_start = tl.load(SEQ_START)
713
+ hi = tl.where(start_n * BLOCK_N + BLOCK_N >= seq_start - 1, M, lo)
714
+ else:
715
+ hi = M
716
+
717
+ # initialize dk amd dv
718
+ dk = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32)
719
+ dv = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32)
720
+ dlog_lambda_in = tl.zeros([BLOCK_N], dtype=tl.float32)
721
+
722
+ # loop over a col
723
+ for start_m in range(lo, hi, BLOCK_M):
724
+ start_m = tl.multiple_of(start_m, BLOCK_M)
725
+ offs_m = start_m + offs_m_base
726
+ causal_mask = (P_SEQ + offs_m[None, :]) >= (offs_n[:, None]) # (BLOCK_M, BLOCK_N)
727
+
728
+ # load q1, k1, q2, k2, v, do on-chip
729
+ if DIVISIBLE_M:
730
+ q = tl.load(q_ptrs)
731
+ log_lambda_out = tl.load(log_lambda_out_ptrs)
732
+ else:
733
+ mask_m = offs_m < M
734
+ valid_mask = mask_m[None, :] # & mask_n
735
+ q = tl.load(q_ptrs, mask=mask_m[:, None])
736
+ log_lambda_out = tl.load(log_lambda_out_ptrs, mask=mask_m)
737
+ # recompute p = softmax(qk * sm_scale, dim=-1)
738
+ # s = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
739
+ sT = tl.dot(k, tl.trans(q), input_precision="ieee") * qk_scale
740
+ decay_bias = log_lambda_out[None, :] - log_lambda_in[:, None]
741
+ sT += decay_bias * log2e
742
+ # NOTE: since softmax in backward is pointwise, the normalizer has been saved in fwd)
743
+ # So masking on s is not needed.
744
+ # s = tl.where(valid_mask, s , float("-inf"))
745
+ # if CAUSAL:
746
+ # s = tl.where(causal_mask, s, float("-inf"))
747
+
748
+ # -- recompute p ---
749
+ if DIVISIBLE_M:
750
+ l = tl.load(L + offs_m)
751
+ else:
752
+ l = tl.load(L + offs_m, mask=mask_m)
753
+ pT = tl.math.exp2(sT - l[None, :] * log2e) # (BLOCK_M, BLOCK_N)
754
+
755
+ if not DIVISIBLE_M:
756
+ pT = tl.where(valid_mask, pT, 0.0)
757
+ if CAUSAL:
758
+ pT = tl.where(causal_mask, pT, 0.0)
759
+
760
+ # compute dv = dot(p, do)
761
+ if DIVISIBLE_M:
762
+ do = tl.load(do_ptrs)
763
+ else:
764
+ do = tl.load(do_ptrs, mask=mask_m[:, None]) # (BLOCK_M, BLOCK_DMODEL)
765
+
766
+
767
+ dv += tl.dot(pT.to(input_dtype), do, input_precision="ieee") # (BLOCK_N, BLOCK_DMODEL) # still correct
768
+
769
+ # compute dp = dot(v, do)
770
+ if DIVISIBLE_M:
771
+ delta = tl.load(D + offs_m)
772
+ else:
773
+ delta = tl.load(D + offs_m, mask=mask_m)
774
+ # dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
775
+ dpT = tl.dot(v, tl.trans(do), input_precision="ieee")
776
+
777
+
778
+ # compute ds = p * (dp - delta[:, None])
779
+ dsT = pT * (dpT - delta[None, :]) # (BLOCK_M, BLOCK_N)
780
+
781
+ if not DIVISIBLE_M:
782
+ dsT = tl.where(valid_mask, dsT, 0.0)
783
+ if CAUSAL:
784
+ dsT = tl.where(causal_mask, dsT, 0.0)
785
+
786
+ # compute dk = dot(ds.T, q) masking
787
+ dk += tl.dot(dsT.to(input_dtype), q, input_precision="ieee")
788
+ dlog_lambda_in += -tl.sum(dsT, axis=1)
789
+
790
+ # increment pointers
791
+ q_ptrs += BLOCK_M * stride_qm
792
+ log_lambda_out_ptrs += BLOCK_M * stride_log_lambda_n
793
+ do_ptrs += BLOCK_M * stride_dom
794
+
795
+ dk *= sm_scale
796
+ if HAS_SEQ_START:
797
+ # Mask out
798
+ seq_mask = (offs_n >= seq_start)
799
+ dk = tl.where(seq_mask[:, None], dk, 0.0)
800
+ dv = tl.where(seq_mask[:, None], dv, 0.0)
801
+ dlog_lambda_in = tl.where(seq_mask, dlog_lambda_in, 0.0)
802
+ if DIVISIBLE_N:
803
+ tl.store(dk_ptrs, dk.to(input_dtype)) # (BLOCK_N, BLOCK_DMODEL)
804
+ tl.store(dv_ptrs, dv.to(input_dtype)) # (BLOCK_N, BLOCK_DMODEL,)
805
+ tl.store(dlog_lambda_in_ptrs, dlog_lambda_in.to(tl.float32)) # (BLOCK_N, BLOCK_DMODEL,)
806
+ else:
807
+ tl.store(dk_ptrs, dk.to(input_dtype), mask=mask_n[:, None]) # (BLOCK_N, BLOCK_DMODEL)
808
+ tl.store(dv_ptrs, dv.to(input_dtype), mask=mask_n[:, None]) # (BLOCK_N, BLOCK_DMODEL)
809
+ tl.store(dlog_lambda_in_ptrs, dlog_lambda_in.to(tl.float32), mask=mask_n) # (BLOCK_N, BLOCK_DMODEL,)
810
+
811
+
812
+ @triton.jit
813
+ def _bwd_q_kernel(
814
+ Q, K, V, LOG_LAMBDA, SEQ_START, sm_scale, DO,
815
+ DQ, DLOG_LAMBDA,
816
+ L,
817
+ D,
818
+ stride_qz, stride_qh, stride_qm, stride_qk,
819
+ stride_kz, stride_kh, stride_kn, stride_kk,
820
+ stride_vz, stride_vh, stride_vn, stride_vk,
821
+ stride_log_lambda_z, stride_log_lambda_h, stride_log_lambda_n,
822
+ stride_doz, stride_doh, stride_dom, stride_dok,
823
+ stride_dqz, stride_dqh, stride_dqm, stride_dqk,
824
+ stride_dlog_lambda_z, stride_dlog_lambda_h, stride_dlog_lambda_n,
825
+ Z, H, M, N, P_SEQ,
826
+ num_groups,
827
+ BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr,
828
+ CAUSAL: tl.constexpr, LARGER_M: tl.constexpr, HAS_SEQ_START: tl.constexpr,
829
+ DIVISIBLE_M: tl.constexpr, DIVISIBLE_N: tl.constexpr,
830
+ ):
831
+ input_dtype = Q.dtype.element_ty
832
+ # -- grid id --
833
+ start_m = tl.program_id(0)
834
+ off_h = tl.program_id(1)
835
+ off_z = tl.program_id(2)
836
+
837
+ # scale sm_scale by log_2(e) and use
838
+ # 2^x instead of exp in the loop because CSE and LICM
839
+ # don't work as expected with `exp` in the loop
840
+ log2e: tl.constexpr = 1.4426950408889634
841
+ qk_scale = sm_scale * log2e
842
+
843
+ # offset pointers for (batch, head)
844
+ off_hk = off_h // num_groups
845
+ Q += off_z * stride_qz + off_h * stride_qh
846
+ K += off_z * stride_kz + off_hk * stride_kh
847
+ V += off_z * stride_vz + off_hk * stride_vh
848
+ LOG_LAMBDA += off_z * stride_log_lambda_z + off_h * stride_log_lambda_h
849
+ DO += off_z * stride_doz + off_h * stride_doh
850
+ D += (off_z * H + off_h) * M
851
+ L += (off_z * H + off_h) * M
852
+
853
+ # offset pointers for batch/head
854
+ DQ += off_z * stride_dqz + off_h * stride_dqh
855
+ DLOG_LAMBDA += off_z * stride_dlog_lambda_z + off_h * stride_dlog_lambda_h
856
+
857
+ offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
858
+ offs_k = tl.arange(0, BLOCK_DMODEL)
859
+
860
+ # initialize pointers to value-like data
861
+ q_ptrs = Q + (offs_m[:, None] * stride_qm + offs_k[None, :] * stride_qk) # (BLOCK_M, BLOCK_DMODEL)
862
+ log_lambda_out_ptrs = LOG_LAMBDA + (P_SEQ + offs_m) * stride_log_lambda_n
863
+
864
+ dq_ptrs = DQ + (offs_m[:, None] * stride_dqm + offs_k[None, :] * stride_dqk) # (BLOCK_M, BLOCK_DMODEL)
865
+ dlog_lambda_out_ptrs = DLOG_LAMBDA + (P_SEQ + offs_m) * stride_dlog_lambda_n
866
+ do_ptrs = DO + (offs_m[:, None] * stride_dom + offs_k[None, :] * stride_dok) # (BLOCK_M, BLOCK_DMODEL)
867
+
868
+ # pointer to row-wise quantities in value-like data
869
+ d_ptrs = D + offs_m
870
+ l_ptrs = L + offs_m
871
+
872
+ # load q: it will stay in SRAM throughout
873
+ if DIVISIBLE_M:
874
+ q = tl.load(q_ptrs)
875
+ do = tl.load(do_ptrs)
876
+ delta = tl.load(d_ptrs)
877
+ l = tl.load(l_ptrs)
878
+ log_lambda_out = tl.load(log_lambda_out_ptrs)
879
+ else:
880
+ mask_m = offs_m < M
881
+ q = tl.load(q_ptrs, mask=mask_m[:, None])
882
+ do = tl.load(do_ptrs, mask=mask_m[:, None])
883
+ delta = tl.load(d_ptrs, mask=mask_m)
884
+ l = tl.load(l_ptrs, mask=mask_m)
885
+ log_lambda_out = tl.load(log_lambda_out_ptrs, mask=mask_m)
886
+
887
+ # initialize dq
888
+ dq = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
889
+ dlog_lambda_out = tl.zeros([BLOCK_M], dtype=tl.float32)
890
+
891
+ # loop over k, v and update accumulator
892
+ # see note "Loop-Bound-For-N"
893
+ if CAUSAL:
894
+ hi = tl.minimum(N, P_SEQ + (start_m + 1) * BLOCK_M)
895
+ if LARGER_M:
896
+ hi = tl.maximum(0, hi)
897
+ else:
898
+ hi = N
899
+
900
+ offs_n_base = tl.arange(0, BLOCK_N)
901
+ offs_n_init = offs_n_base
902
+ if HAS_SEQ_START:
903
+ SEQ_START += off_z
904
+ seq_start = tl.load(SEQ_START)
905
+ lo = tl.minimum(seq_start, hi)
906
+ lo = (lo // BLOCK_N) * BLOCK_N
907
+ offs_n_init += lo
908
+ else:
909
+ lo = 0
910
+ k_ptrs = K + (offs_n_init[:, None] * stride_kn + offs_k[None, :] * stride_kk) # (BLOCK_N, BLOCK_DMODEL)
911
+ v_ptrs = V + (offs_n_init[:, None] * stride_vn + offs_k[None, :] * stride_vk) # (BLOCK_N, BLOCK_DMODEL)
912
+ log_lambda_in_ptrs = LOG_LAMBDA + (offs_n_init * stride_log_lambda_n)
913
+
914
+ # loop over a row
915
+ for start_n in range(lo, hi, BLOCK_N):
916
+ offs_n = start_n + offs_n_base
917
+
918
+ # load k1, k2, v on chip
919
+ if DIVISIBLE_N:
920
+ v = tl.load(v_ptrs)
921
+ k = tl.load(k_ptrs)
922
+ log_lambda_in = tl.load(log_lambda_in_ptrs)
923
+ else:
924
+ mask_n = offs_n < N
925
+ v = tl.load(v_ptrs, mask=mask_n[:, None])
926
+ k = tl.load(k_ptrs, mask=mask_n[:, None])
927
+ log_lambda_in = tl.load(log_lambda_in_ptrs, mask=mask_n)
928
+
929
+
930
+ # recompute p = softmax(qk * sm_scale, dim=-1)
931
+ if not DIVISIBLE_N:
932
+ valid_mask = mask_n[None, :] # & mask_m[:, None]
933
+ if CAUSAL:
934
+ causal_mask = (P_SEQ + offs_m[:, None]) >= (offs_n[None, :]) # (BLOCK_M, BLOCK_N)
935
+ # s = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
936
+ s = tl.dot(q, tl.trans(k), input_precision="ieee") * qk_scale
937
+ decay_bias = log_lambda_out[:, None] - log_lambda_in[None, :]
938
+ s += decay_bias * log2e
939
+
940
+ # NOTE: since softmax in backward is pointwise, the normalizer has been saved in fwd)
941
+ # So masking on s is not needed.
942
+ # if CAUSAL:
943
+ # s = tl.where(causal_mask & valid_mask, s, float("-inf"))
944
+ # else:
945
+ # s = tl.where(valid_mask, s, float("-inf"))
946
+ p = tl.math.exp2(s - l[:, None] * log2e) # (BLOCK_M, BLOCK_N)
947
+
948
+ # compute dp = dot(v, do)
949
+ # dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
950
+ dp = tl.dot(do.to(input_dtype), tl.trans(v), input_precision="ieee")
951
+
952
+
953
+ # no need to mask dp
954
+ # if CAUSAL:
955
+ # dp = tl.where(causal_mask & valid_mask, dp, 0.0)
956
+ # else:
957
+ # dp = tl.where(valid_mask, dp, 0.0)
958
+
959
+ # compute ds = p * (dp - delta[:, None])
960
+ # move scale out to dq at last
961
+ ds = p * (dp - delta[:, None]) # (BLOCK_M, BLOCK_N)
962
+
963
+ # mask ds to ensure no small values
964
+ if not DIVISIBLE_N:
965
+ ds = tl.where(valid_mask, ds, 0.0)
966
+ if CAUSAL:
967
+ ds = tl.where(causal_mask, ds, 0.0)
968
+ if HAS_SEQ_START:
969
+ ds = tl.where(offs_n[None, :] >= seq_start, ds, 0.0)
970
+
971
+ dq += tl.dot(ds.to(input_dtype), k, input_precision="ieee")
972
+ dlog_lambda_out += tl.sum(ds, axis=1)
973
+
974
+ # increment pointers
975
+ k_ptrs += BLOCK_N * stride_kn
976
+ v_ptrs += BLOCK_N * stride_vn
977
+ log_lambda_in_ptrs += BLOCK_N * stride_log_lambda_n
978
+
979
+ dq *= sm_scale
980
+ if DIVISIBLE_M:
981
+ tmp = tl.load(dlog_lambda_out_ptrs)
982
+ else:
983
+ tmp = tl.load(dlog_lambda_out_ptrs, mask=mask_m)
984
+ dlog_lambda_out += tmp
985
+ if DIVISIBLE_M:
986
+ tl.store(dq_ptrs, dq.to(input_dtype))
987
+ tl.store(dlog_lambda_out_ptrs, dlog_lambda_out)
988
+ else:
989
+ tl.store(dq_ptrs, dq.to(input_dtype), mask=mask_m[:, None])
990
+ tl.store(dlog_lambda_out_ptrs, dlog_lambda_out, mask=mask_m)
991
+
992
+
993
+
994
+ @pytest.mark.parametrize("Z, H, M, N, HEAD_DIM", [(4, 2, 1020, 2098, 64), (4, 2, 1024, 2048, 64)])
995
+ @pytest.mark.parametrize("causal", [True])
996
+ def test_op(Z, H, M, N, HEAD_DIM, causal, dtype=torch.bfloat16):
997
+ torch.manual_seed(24)
998
+ q = (torch.empty((Z, H, M, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
999
+ k = (torch.empty((Z, H, N, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
1000
+ v = (torch.empty((Z, H, N, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
1001
+ fgate_logit = torch.empty((Z, H, N), dtype=torch.float32, device="cuda").uniform_(5, 10)
1002
+ log_fgate = torch.nn.functional.logsigmoid(fgate_logit).requires_grad_()
1003
+ seq_start = torch.randint(low=0, high=N, size=(Z,), dtype=torch.long, device="cuda")
1004
+ # seq_start = torch.randint(low=0, high=10, size=(Z,), dtype=torch.long, device="cuda")
1005
+ # seq_start = torch.full(fill_value=0, size=(Z,), dtype=torch.long, device="cuda")
1006
+ sm_scale = 0.5
1007
+ dout = torch.randn_like(q)
1008
+ # reference implementation
1009
+ P_SEQ = N - M
1010
+ mask = torch.tril(torch.ones((M, N), device="cuda"), diagonal=P_SEQ)
1011
+ p = torch.matmul(q, k.transpose(2, 3)) * sm_scale
1012
+ p = p.float()
1013
+
1014
+ log_lambda = torch.cumsum(log_fgate, dim=-1)
1015
+ decay_bias = log_lambda[..., -M:, None] - log_lambda[..., None, :]
1016
+ p = p + decay_bias
1017
+ if causal:
1018
+ p[:, :, mask == 0] = float("-inf")
1019
+
1020
+ attention_mask = torch.arange(N, device="cuda") < seq_start[:, None, None, None]
1021
+ p = torch.where(attention_mask, float("-inf"), p)
1022
+ p = torch.softmax(p.float(), dim=-1).to(dtype)
1023
+ p = p.clone()
1024
+ p[torch.isnan(p)] = 0.0
1025
+ # p = torch.exp(p)
1026
+ ref_out = torch.matmul(p, v)
1027
+ ref_out.backward(dout)
1028
+ ref_dv, v.grad = v.grad.clone(), None
1029
+ ref_dk, k.grad = k.grad.clone(), None
1030
+ ref_dq, q.grad = q.grad.clone(), None
1031
+ ref_dlog_fgate, log_fgate.grad = log_fgate.grad.clone(), None
1032
+ # triton implementation
1033
+ tri_out = forgetting_attention(q, k, v, log_fgate, head_first=True, seq_start=seq_start, sm_scale=sm_scale)
1034
+ tri_out = tri_out.to(dtype)
1035
+
1036
+ tri_out.backward(dout)
1037
+ tri_dv, v.grad = v.grad.clone(), None
1038
+ tri_dk, k.grad = k.grad.clone(), None
1039
+ tri_dq, q.grad = q.grad.clone(), None
1040
+ tri_dlog_fgate, log_fgate.grad = log_fgate.grad.clone(), None
1041
+ # compare
1042
+ # assert torch.allclose(tri_log_normalizer[~torch.isnan(tri_log_normalizer)], ref_log_normalizer[~torch.isnan(ref_log_normalizer)], atol=1e-2, rtol=0)
1043
+ assert torch.allclose(ref_out, tri_out, atol=1e-2, rtol=0), (ref_out - tri_out).abs().max()
1044
+ rtol = 0
1045
+ # Relative tolerance workaround for known hardware limitation of MI200 GPU.
1046
+ # For details see https://pytorch.org/docs/stable/notes/numerical_accuracy.html#reduced-precision-fp16-and-bf16-gemms-and-convolutions-on-amd-instinct-mi200-devices
1047
+ # if torch.version.hip is not None and triton.runtime.driver.active.get_current_target().arch == "gfx90a":
1048
+ # rtol = 1e-2
1049
+ assert torch.allclose(ref_dv, tri_dv, atol=1e-2, rtol=rtol), (ref_dv - tri_dv).abs().max()
1050
+ assert torch.allclose(ref_dk, tri_dk, atol=1e-2, rtol=rtol), (ref_dk - tri_dk).abs().max()
1051
+ assert torch.allclose(ref_dq, tri_dq, atol=1e-2, rtol=rtol), (ref_dq - tri_dq).abs().max()
1052
+ assert torch.allclose(ref_dlog_fgate, tri_dlog_fgate, atol=1e-2, rtol=rtol), (ref_dlog_fgate - tri_dlog_fgate).abs().max()
1053
+
1054
+ try:
1055
+ from flash_attn.flash_attn_interface import \
1056
+ flash_attn_qkvpacked_func as flash_attn_func
1057
+ HAS_FLASH = True
1058
+ except BaseException:
1059
+ HAS_FLASH = False
1060
+
1061
+ TORCH_HAS_FP8 = hasattr(torch, 'float8_e5m2')
1062
+ BATCH, N_HEADS, HEAD_DIM = 4, 32, 128
1063
+ # vary seq length for fixed head and batch=4
1064
+ configs = []
1065
+ for mode in ["fwd", "bwd"]:
1066
+ # for mode in ["bwd"]:
1067
+ # for causal in [True, False]:
1068
+ for causal in [True]:
1069
+ if mode == "bwd" and not causal:
1070
+ continue
1071
+ configs.append(
1072
+ triton.testing.Benchmark(
1073
+ x_names=["N_CTX"],
1074
+ # x_vals=[2**i for i in range(10, 15)],
1075
+ x_vals=[2**i for i in range(14, 15)],
1076
+ line_arg="provider",
1077
+ # line_vals=["triton-fp16", "flag"] + (["flash"] if HAS_FLASH else []),
1078
+ # line_names=["Triton [FP16]", "Flag"] + (["Flash-2"] if HAS_FLASH else []),
1079
+ line_vals=["flag"] + (["flash"] if HAS_FLASH else []),
1080
+ line_names=["Flag"] + (["Flash-2"] if HAS_FLASH else []),
1081
+ styles=[("red", "-"), ("blue", "-"), ("green", "-")],
1082
+ ylabel="ms",
1083
+ plot_name=f"fused-attention-batch{BATCH}-head{N_HEADS}-d{HEAD_DIM}-{mode}-causal={causal}",
1084
+ args={
1085
+ "H": N_HEADS,
1086
+ "BATCH": BATCH,
1087
+ "HEAD_DIM": HEAD_DIM,
1088
+ "mode": mode,
1089
+ "causal": causal,
1090
+ },
1091
+ ))
1092
+
1093
+
1094
+ @triton.testing.perf_report(configs)
1095
+ def bench_flash_attention(BATCH, H, N_CTX, HEAD_DIM, causal, mode, provider, device="cuda"):
1096
+ assert mode in ["fwd", "bwd"]
1097
+ warmup = 25
1098
+ rep = 100
1099
+ dtype = torch.bfloat16
1100
+ if "flag" in provider:
1101
+ q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1102
+ k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1103
+ v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1104
+ fgate_logit = torch.empty((BATCH, H, N_CTX), dtype=torch.float32, device="cuda").uniform_(5, 10)
1105
+ log_fgate = torch.nn.functional.logsigmoid(fgate_logit).requires_grad_()
1106
+ # if mode == "fwd" and "fp8" in provider:
1107
+ # q = q.to(torch.float8_e5m2)
1108
+ # k = k.to(torch.float8_e5m2)
1109
+ # v = v.permute(0, 1, 3, 2).contiguous()
1110
+ # v = v.permute(0, 1, 3, 2)
1111
+ # v = v.to(torch.float8_e5m2)
1112
+ sm_scale = 1.3
1113
+ fn = lambda: forgetting_attention(q, k, v, log_fgate, head_first=True, sm_scale=sm_scale)
1114
+ if mode == "bwd":
1115
+ o = fn()
1116
+ do = torch.randn_like(o)
1117
+ fn = lambda: o.backward(do, retain_graph=True)
1118
+ ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
1119
+ if provider == "flash":
1120
+ qkv = torch.randn((BATCH, N_CTX, 3, H, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1121
+ fn = lambda: flash_attn_func(qkv, causal=causal)
1122
+ if mode == "bwd":
1123
+ o = fn()
1124
+ do = torch.randn_like(o)
1125
+ fn = lambda: o.backward(do, retain_graph=True)
1126
+ ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
1127
+ flops_per_matmul = 2.0 * BATCH * H * N_CTX * N_CTX * HEAD_DIM
1128
+ total_flops = 2 * flops_per_matmul
1129
+ if causal:
1130
+ total_flops *= 0.5
1131
+ if mode == "bwd":
1132
+ total_flops *= 2.5 # 2.0(bwd) + 0.5(recompute)
1133
+ return total_flops / ms * 1e-9
1134
+
1135
+
1136
+ if __name__ == "__main__":
1137
+ # only works on post-Ampere GPUs right now
1138
+ bench_flash_attention.run(save_path=".", print_data=True)
ops/forgetting_attention_std.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Forgetting Attention - 标准 Softmax 版本
3
+ 在 forgetting_attention.py 最后添加这个函数
4
+ """
5
+
6
+ import math
7
+ import torch
8
+ import torch.nn.functional as F
9
+ from einops import rearrange
10
+ from typing import Optional
11
+
12
+
13
+ def forgetting_attention_std(
14
+ q: torch.Tensor,
15
+ k: torch.Tensor,
16
+ v: torch.Tensor,
17
+ log_fgate: torch.Tensor,
18
+ *,
19
+ head_first: bool = False,
20
+ seq_start: Optional[torch.Tensor] = None,
21
+ sm_scale: Optional[float] = None,
22
+ ) -> torch.Tensor:
23
+ """标准 Softmax 版本的 Forgetting Attention"""
24
+
25
+ if not head_first:
26
+ q = rearrange(q, "b t h d -> b h t d")
27
+ k = rearrange(k, "b t h d -> b h t d")
28
+ v = rearrange(v, "b t h d -> b h t d")
29
+ log_fgate = rearrange(log_fgate, "b t h -> b h t")
30
+
31
+ B, H, T_q, D = q.shape
32
+ T_k = k.shape[2]
33
+
34
+ if sm_scale is None:
35
+ sm_scale = 1.0 / math.sqrt(D)
36
+
37
+ # 计算 QK 分数
38
+ scores = torch.matmul(q.float(), k.float().transpose(-2, -1)) * sm_scale
39
+
40
+ # 处理 seq_start
41
+ log_fgate_masked = log_fgate.float()
42
+ if seq_start is not None:
43
+ log_fgate_masked = log_fgate_masked.clone()
44
+ mask_idx = torch.arange(T_k, device=q.device)[None, None, :] < seq_start[:, None, None]
45
+ log_fgate_masked[mask_idx] = 0.0
46
+
47
+ # 计算累积衰减
48
+ log_lambda = torch.cumsum(log_fgate_masked, dim=-1)
49
+ decay_bias = log_lambda[:, :, :T_q, None] - log_lambda[:, :, None, :]
50
+ scores = scores + decay_bias
51
+
52
+ # Causal mask
53
+ P_SEQ = T_k - T_q
54
+ causal_mask = torch.triu(torch.ones((T_q, T_k), dtype=torch.bool, device=q.device), diagonal=P_SEQ + 1)
55
+ scores = scores.masked_fill(causal_mask[None, None, :, :], float('-inf'))
56
+
57
+ # seq_start mask
58
+ if seq_start is not None:
59
+ seq_mask = torch.arange(T_k, device=q.device)[None, None, None, :] < seq_start[None, :, None, None]
60
+ scores = scores.masked_fill(seq_mask, float('-inf'))
61
+
62
+ # Softmax
63
+ attn = F.softmax(scores, dim=-1)
64
+ attn = torch.nan_to_num(attn, 0.0)
65
+
66
+ # 计算输出
67
+ out = torch.matmul(attn.to(v.dtype), v)
68
+
69
+ if not head_first:
70
+ out = rearrange(out, "b h t d -> b t h d")
71
+
72
+ return out
token_shift.py ADDED
@@ -0,0 +1,314 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ import triton
4
+ import triton.language as tl
5
+ import pytest
6
+
7
+ def maybe_contiguous(x):
8
+ # only when the inner most dimension is contiguous can LDGSTS be used
9
+ # so inner-dimension contiguity is enforced.
10
+ return x.contiguous() if x.stride(-1) != 1 else x
11
+
12
+ @triton.jit
13
+ def shift_fwd_kernel(
14
+ X_PTR,
15
+ PREV_WEIGHT_PTR,
16
+ CURR_WEIGHT_PTR,
17
+ OUT_PTR,
18
+
19
+ stride_x_b, stride_x_t, stride_x_h, stride_x_d,
20
+ stride_weight_b, stride_weight_t, stride_weight_h,
21
+ T: tl.constexpr, D: tl.constexpr,
22
+ BLOCK_T: tl.constexpr,
23
+ ):
24
+ """
25
+ everything is (B, T, D)
26
+ """
27
+ b_offset = tl.program_id(axis=0).to(tl.int64)
28
+ t_offset = tl.program_id(axis=1).to(tl.int64) * BLOCK_T
29
+ h_offset = tl.program_id(axis=2).to(tl.int64)
30
+
31
+
32
+ x_ptr_offset = b_offset * stride_x_b + t_offset * stride_x_t + h_offset * stride_x_h
33
+ X_PTR += x_ptr_offset
34
+ OUT_PTR += x_ptr_offset
35
+
36
+ weight_ptr_offset = b_offset * stride_weight_b + t_offset * stride_weight_t + h_offset * stride_weight_h
37
+ CURR_WEIGHT_PTR += weight_ptr_offset
38
+ PREV_WEIGHT_PTR += weight_ptr_offset
39
+
40
+ x_ptr = X_PTR + tl.arange(0, BLOCK_T)[:, None] * stride_x_t + tl.arange(0, D)[None, :] * stride_x_d
41
+ t_offset_block = t_offset + tl.arange(0, BLOCK_T)[:, None]
42
+ x_mask = t_offset_block < T
43
+
44
+ # Yeah this is correct
45
+ x_prev_ptr = x_ptr - stride_x_t
46
+ t_prev_offset_block = t_offset_block - 1
47
+ x_prev_mask = ((t_prev_offset_block) < T) & (t_prev_offset_block >= 0)
48
+
49
+ curr_weight_ptr = CURR_WEIGHT_PTR + tl.arange(0, BLOCK_T)[:, None] * stride_weight_t
50
+ prev_weight_ptr = PREV_WEIGHT_PTR + tl.arange(0, BLOCK_T)[:, None] * stride_weight_t
51
+
52
+
53
+ x = tl.load(x_ptr, mask=x_mask, other=0.0)
54
+ x_prev = tl.load(x_prev_ptr, mask=x_prev_mask, other=0.0)
55
+ curr_weight = tl.load(curr_weight_ptr, mask=x_mask, other=0.0)
56
+ prev_weight = tl.load(prev_weight_ptr, mask=x_mask, other=0.0)
57
+
58
+ result = x * curr_weight.to(tl.float32) + x_prev * prev_weight.to(tl.float32)
59
+ result = result.to(x.dtype)
60
+
61
+ out_ptr = OUT_PTR + tl.arange(0, BLOCK_T)[:, None] * stride_x_t + tl.arange(0, D)[None, :] * stride_x_d
62
+ tl.store(out_ptr, result, mask=x_mask)
63
+
64
+
65
+ @triton.jit
66
+ def shift_bwd_kernel(
67
+ X_PTR,
68
+ PREV_WEIGHT_PTR,
69
+ CURR_WEIGHT_PTR,
70
+
71
+ DOUT_PTR,
72
+ DX_PTR,
73
+ DPREV_WEIGHT_PTR,
74
+ DCURR_WEIGHT_PTR,
75
+
76
+ stride_x_b, stride_x_t, stride_x_h, stride_x_d,
77
+ stride_weight_b, stride_weight_t, stride_weight_h,
78
+ T: tl.constexpr, D: tl.constexpr,
79
+ BLOCK_T: tl.constexpr,
80
+ ):
81
+ """
82
+ everything is (B, T, D)
83
+ """
84
+ b_offset = tl.program_id(axis=0).to(tl.int64)
85
+ t_offset = tl.program_id(axis=1).to(tl.int64) * BLOCK_T
86
+ h_offset = tl.program_id(axis=2).to(tl.int64)
87
+
88
+
89
+ x_ptr_offset = b_offset * stride_x_b + t_offset * stride_x_t + h_offset * stride_x_h
90
+ X_PTR += x_ptr_offset
91
+ DX_PTR += x_ptr_offset
92
+ DOUT_PTR += x_ptr_offset
93
+
94
+ weight_ptr_offset = b_offset * stride_weight_b + t_offset * stride_weight_t + h_offset * stride_weight_h
95
+ CURR_WEIGHT_PTR += weight_ptr_offset
96
+ PREV_WEIGHT_PTR += weight_ptr_offset
97
+ DCURR_WEIGHT_PTR += weight_ptr_offset
98
+ DPREV_WEIGHT_PTR += weight_ptr_offset
99
+
100
+ x_ptr = X_PTR + tl.arange(0, BLOCK_T)[:, None] * stride_x_t + tl.arange(0, D)[None, :] * stride_x_d
101
+ t_offset_block = t_offset + tl.arange(0, BLOCK_T)[:, None]
102
+ x_mask = t_offset_block < T
103
+
104
+ dout_ptr = DOUT_PTR + tl.arange(0, BLOCK_T)[:, None] * stride_x_t + tl.arange(0, D)[None, :] * stride_x_d
105
+
106
+ # Yeah this is correct
107
+ dout_next_ptr = dout_ptr + stride_x_t
108
+ t_next_offset_block = t_offset_block + 1
109
+ x_next_mask = (t_next_offset_block) < T
110
+
111
+
112
+ # Yeah this is correct
113
+ x_prev_ptr = x_ptr - stride_x_t
114
+ t_prev_offset_block = t_offset_block - 1
115
+ x_prev_mask = ((t_prev_offset_block) < T) & (t_prev_offset_block >= 0)
116
+
117
+ curr_weight_ptr = CURR_WEIGHT_PTR + tl.arange(0, BLOCK_T)[:, None] * stride_weight_t
118
+ prev_weight_ptr = PREV_WEIGHT_PTR + tl.arange(0, BLOCK_T)[:, None] * stride_weight_t
119
+ next_prev_weight_ptr = prev_weight_ptr + stride_weight_t
120
+
121
+
122
+ x = tl.load(x_ptr, mask=x_mask, other=0.0)
123
+ x_prev = tl.load(x_prev_ptr, mask=x_prev_mask, other=0.0)
124
+ dout = tl.load(dout_ptr, mask=x_mask, other=0.0)
125
+ dout_next= tl.load(dout_next_ptr, mask=x_next_mask, other=0.0)
126
+
127
+ curr_weight = tl.load(curr_weight_ptr, mask=x_mask, other=0.0)
128
+ next_prev_weight = tl.load(next_prev_weight_ptr, mask=x_next_mask, other=0.0)
129
+
130
+ dx = dout * curr_weight.to(tl.float32) + dout_next * next_prev_weight.to(tl.float32)
131
+ dx = dx.to(x.dtype)
132
+
133
+ dcurr_weight = tl.sum(dout.to(tl.float32) * x, axis=1, keep_dims=True)
134
+ dprev_weight = tl.sum(dout.to(tl.float32) * x_prev, axis=1, keep_dims=True)
135
+
136
+ dx_ptr = DX_PTR + tl.arange(0, BLOCK_T)[:, None] * stride_x_t + tl.arange(0, D)[None, :] * stride_x_d
137
+ tl.store(dx_ptr, dx, mask=x_mask)
138
+ dcurr_weight_ptr = DCURR_WEIGHT_PTR + tl.arange(0, BLOCK_T)[:, None] * stride_weight_t
139
+ tl.store(dcurr_weight_ptr, dcurr_weight, mask=x_mask)
140
+ dprev_weight_ptr = DPREV_WEIGHT_PTR + tl.arange(0, BLOCK_T)[:, None] * stride_weight_t
141
+ tl.store(dprev_weight_ptr, dprev_weight, mask=x_mask)
142
+
143
+
144
+
145
+ class TokenShift(torch.autograd.Function):
146
+
147
+ @staticmethod
148
+ def forward(ctx, x: torch.Tensor, prev_weight: torch.Tensor, curr_weight: torch.Tensor):
149
+
150
+ B, T, H, D = x.size()
151
+ assert D in {16, 32, 64, 128}
152
+ assert prev_weight.size() == curr_weight.size() == (B, T, H)
153
+ assert prev_weight.stride() == curr_weight.stride()
154
+ x = maybe_contiguous(x)
155
+ out = torch.empty_like(x)
156
+
157
+ BLOCK_T = triton.next_power_of_2(min(64, T))
158
+
159
+ grid = lambda meta: (B, triton.cdiv(T, meta["BLOCK_T"]), H)
160
+ # NOTE:
161
+ # - Each torch.tensor object is implicitly converted into a pointer to its first element.
162
+ # - `triton.jit`'ed functions can be indexed with a launch grid to obtain a callable GPU kernel.
163
+ # - Don't forget to pass meta-parameters as keywords arguments.
164
+ shift_fwd_kernel[grid](
165
+ x,
166
+ prev_weight,
167
+ curr_weight,
168
+ out,
169
+ *x.stride(),
170
+ *curr_weight.stride(),
171
+ T=T, D=D,
172
+ BLOCK_T=BLOCK_T,
173
+ )
174
+ ctx.save_for_backward(x, prev_weight, curr_weight)
175
+ # We return a handle to z but, since `torch.cuda.synchronize()` hasn't been called, the kernel is still
176
+ # running asynchronously at this point.
177
+ return out
178
+
179
+ @staticmethod
180
+ def backward(ctx, dout: torch.Tensor):
181
+
182
+ x, prev_weight, curr_weight = ctx.saved_tensors
183
+ B, T, H, D = x.size()
184
+ assert D in {16, 32, 64, 128}
185
+ assert prev_weight.size() == curr_weight.size() == (B, T, H)
186
+ assert prev_weight.stride() == curr_weight.stride()
187
+ x = maybe_contiguous(x)
188
+ assert dout.stride() == x.stride()
189
+ dx = torch.empty_like(x)
190
+ dcurr_weight = torch.empty_like(curr_weight)
191
+ dprev_weight = torch.empty_like(prev_weight)
192
+
193
+ BLOCK_T = triton.next_power_of_2(min(64, T))
194
+
195
+ grid = lambda meta: (B, triton.cdiv(T, meta["BLOCK_T"]), H)
196
+ # NOTE:
197
+ # - Each torch.tensor object is implicitly converted into a pointer to its first element.
198
+ # - `triton.jit`'ed functions can be indexed with a launch grid to obtain a callable GPU kernel.
199
+ # - Don't forget to pass meta-parameters as keywords arguments.
200
+ shift_bwd_kernel[grid](
201
+ x,
202
+ prev_weight,
203
+ curr_weight,
204
+ dout,
205
+ dx,
206
+ dprev_weight,
207
+ dcurr_weight,
208
+ *x.stride(),
209
+ *curr_weight.stride(),
210
+ T=T,
211
+ D=D,
212
+ BLOCK_T=BLOCK_T,
213
+ )
214
+ # We return a handle to z but, since `torch.cuda.synchronize()` hasn't been called, the kernel is still
215
+ # running asynchronously at this point.
216
+ return dx, dprev_weight, dcurr_weight
217
+
218
+ def token_shift(x, prev_weight, curr_weight):
219
+ return TokenShift.apply(x, prev_weight, curr_weight)
220
+
221
+
222
+
223
+ @pytest.mark.parametrize("B, T, H, D", [(4, 2048, 12, 128)])
224
+ def test_op(B, T, H, D, dtype=torch.float32):
225
+ torch.manual_seed(24)
226
+ B = 4
227
+ T = 2088
228
+ H = 12
229
+ D = 128
230
+ # x = torch.rand(size, device='cuda')
231
+ x = torch.randn(B, T, H, D, device="cuda", dtype=dtype, requires_grad=True)
232
+ dout = torch.randn(B, T, H, D, device="cuda", dtype=dtype)
233
+ curr_weight = torch.rand(B, T, H, device="cuda", requires_grad=True)
234
+
235
+ prev_weight = 1.0 - curr_weight
236
+ x_prev = torch.roll(x, shifts=1, dims=1)
237
+ x_prev[:, 0, :, :] = 0.0
238
+ ref_out = (x_prev * prev_weight[..., None] + x * curr_weight[..., None]).to(dtype)
239
+
240
+ ref_out.backward(dout)
241
+ ref_dx, x.grad = x.grad.clone(), None
242
+ ref_dcurr_weight, curr_weight.grad = curr_weight.grad.clone(), None
243
+
244
+
245
+ prev_weight = 1.0 - curr_weight
246
+ # out_torch = x if x.sum() > 0.0 else y
247
+
248
+ tri_out = token_shift(x, prev_weight, curr_weight)
249
+
250
+
251
+ tri_out.backward(dout)
252
+ tri_dx, x.grad = x.grad.clone(), None
253
+ tri_dcurr_weight, curr_weight.grad = curr_weight.grad.clone(), None
254
+
255
+ # out_torch = x if x.sum() > 0.0 else y
256
+
257
+ # import pdb; pdb.set_trace()
258
+
259
+ assert torch.allclose(ref_out, tri_out, atol=1e-2, rtol=0), (ref_out - tri_out).abs().max()
260
+ assert torch.allclose(ref_dx, tri_dx, atol=1e-2, rtol=0), (ref_dx - tri_dx).abs().max()
261
+ assert torch.allclose(ref_dcurr_weight, tri_dcurr_weight, atol=1e-2, rtol=0), (ref_dcurr_weight - tri_dcurr_weight).abs().max()
262
+
263
+ if __name__ == "__main__":
264
+ torch.manual_seed(0)
265
+ B = 4
266
+ T = 2088
267
+ H = 12
268
+ D = 128
269
+ # x = torch.rand(size, device='cuda')
270
+ x = torch.randn(B, T, H, D, device="cuda")
271
+ dout = torch.randn(B, T, H, D, device="cuda")
272
+ curr_weight = torch.rand(B, T, H, device="cuda")
273
+ prev_weight = 1.0 - curr_weight
274
+ # out_torch = x if x.sum() > 0.0 else y
275
+ result = shift_fwd(x, prev_weight, curr_weight)
276
+ print(result[0, :, 0, 0])
277
+ import ipdb; ipdb.set_trace()
278
+ # # for mode in ["fwd", "bwd"]:
279
+ # configs.append(
280
+ # triton.testing.Benchmark(
281
+ # x_names=["SIZE"],
282
+ # # x_vals=[2**i for i in range(10, 15)],
283
+ # x_vals=[98432],
284
+ # line_arg="provider",
285
+ # # line_vals=["triton-fp16", "flag"] + (["flash"] if HAS_FLASH else []),
286
+ # # line_names=["Triton [FP16]", "Flag"] + (["Flash-2"] if HAS_FLASH else []),
287
+ # line_vals=["debug"],
288
+ # line_names=["Debug"],
289
+ # styles=[("red", "-")],
290
+ # ylabel="ms",
291
+ # plot_name="hi",
292
+ # args={},
293
+ # )
294
+ # )
295
+
296
+
297
+ # @triton.testing.perf_report(configs)
298
+ # def bench_flash_attention(SIZE, provider, device="cuda"):
299
+ # warmup = 25
300
+ # rep = 100
301
+ # torch.manual_seed(0)
302
+ # size = 98432
303
+ # # x = torch.rand(size, device='cuda')
304
+ # x = torch.ones(size, device="cuda")
305
+ # y = torch.rand(size, device="cuda")
306
+ # # out_torch = x if x.sum() > 0.0 else y
307
+ # fn = lambda: add(x, y)
308
+ # ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
309
+ # return ms
310
+
311
+
312
+ # if __name__ == "__main__":
313
+ # # only works on post-Ampere GPUs right now
314
+ # bench_flash_attention.run(save_path=".", print_data=True)