Lanni-ni commited on
Commit
eff90fa
·
verified ·
1 Parent(s): 10ab981

add remote code + model files

Browse files
Files changed (48) hide show
  1. .ipynb_checkpoints/modeling_sliding_window-checkpoint.py +194 -0
  2. __init__.py +1 -0
  3. __pycache__/__init__.cpython-310.pyc +0 -0
  4. __pycache__/configuration_sliding_window.cpython-310.pyc +0 -0
  5. __pycache__/modeling_sliding_window.cpython-310.pyc +0 -0
  6. configuration_sliding_window.py +70 -0
  7. modeling_sliding_window.py +194 -0
  8. ops/.ipynb_checkpoints/forgetting_attention-checkpoint.py +1138 -0
  9. ops/.ipynb_checkpoints/forgetting_attention_std-checkpoint.py +72 -0
  10. ops/.ipynb_checkpoints/geometric_attention_std-checkpoint.py +179 -0
  11. ops/.ipynb_checkpoints/sliding_window_attention_std-checkpoint.py +88 -0
  12. ops/.ipynb_checkpoints/stickbreaking_attention_std-checkpoint.py +117 -0
  13. ops/.ipynb_checkpoints/vanilla_attention_std-checkpoint.py +171 -0
  14. ops/__init__.py +3 -0
  15. ops/__pycache__/__init__.cpython-310.pyc +0 -0
  16. ops/__pycache__/direction_sensitive_geometric.cpython-310.pyc +0 -0
  17. ops/__pycache__/forgetting_attention.cpython-310.pyc +0 -0
  18. ops/__pycache__/forgetting_attention_std.cpython-310.pyc +0 -0
  19. ops/__pycache__/framework_mock.cpython-310.pyc +0 -0
  20. ops/__pycache__/geometric_attention_final.cpython-310.pyc +0 -0
  21. ops/__pycache__/geometric_attention_std.cpython-310.pyc +0 -0
  22. ops/__pycache__/layer_with_visualization.cpython-310.pyc +0 -0
  23. ops/__pycache__/multi_head_attention.cpython-310.pyc +0 -0
  24. ops/__pycache__/multi_head_relative_pos_attention.cpython-310.pyc +0 -0
  25. ops/__pycache__/sliding_window_attention_std.cpython-310.pyc +0 -0
  26. ops/__pycache__/stickbreaking_attention_std.cpython-310.pyc +0 -0
  27. ops/__pycache__/vanilla_attention_std.cpython-310.pyc +0 -0
  28. ops/direction_sensitive_geometric.py +115 -0
  29. ops/direction_sensitive_geometric.py.bak +115 -0
  30. ops/forgetting_attention.py +1138 -0
  31. ops/forgetting_attention_std.py +72 -0
  32. ops/framework_mock.py +25 -0
  33. ops/geometric_attention/__init__.py +1 -0
  34. ops/geometric_attention/__pycache__/__init__.cpython-310.pyc +0 -0
  35. ops/geometric_attention/__pycache__/cuda_interface.cpython-310.pyc +0 -0
  36. ops/geometric_attention/cuda_interface.cu +177 -0
  37. ops/geometric_attention/cuda_interface.py +93 -0
  38. ops/geometric_attention/cuda_interface.py.bak +94 -0
  39. ops/geometric_attention_final.py +109 -0
  40. ops/geometric_attention_std.py +179 -0
  41. ops/layer_with_visualization.py +43 -0
  42. ops/multi_head_attention.py +149 -0
  43. ops/multi_head_relative_pos_attention.py +185 -0
  44. ops/multi_head_relative_pos_attention.py.bak +185 -0
  45. ops/sliding_window_attention_std.py +88 -0
  46. ops/stickbreaking_attention_std.py +46 -0
  47. ops/transformer.py +165 -0
  48. ops/vanilla_attention_std.py +171 -0
.ipynb_checkpoints/modeling_sliding_window-checkpoint.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from transformers import PreTrainedModel
5
+ from transformers.modeling_outputs import CausalLMOutputWithPast
6
+
7
+ from .configuration_sliding_window import SlidingWindowConfig
8
+ from forgetting_transformer.ops.sliding_window_attention_std import sliding_window_attention_std
9
+
10
+
11
+ class SlidingWindowAttention(nn.Module):
12
+ def __init__(self, config: SlidingWindowConfig, layer_idx: int):
13
+ super().__init__()
14
+ self.config = config
15
+ self.layer_idx = layer_idx
16
+
17
+ self.hidden_size = config.hidden_size
18
+ self.num_heads = config.num_heads
19
+ self.head_dim = self.hidden_size // self.num_heads
20
+ self.num_kv_heads = config.num_kv_heads or self.num_heads
21
+ self.window_size = config.window_size
22
+
23
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
24
+ self.k_proj = nn.Linear(self.hidden_size, self.num_kv_heads * self.head_dim, bias=config.attention_bias)
25
+ self.v_proj = nn.Linear(self.hidden_size, self.num_kv_heads * self.head_dim, bias=config.attention_bias)
26
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias)
27
+
28
+ def forward(self, hidden_states, attention_mask=None, **kwargs):
29
+ B, T, H = hidden_states.shape
30
+
31
+ # Project
32
+ q = self.q_proj(hidden_states)
33
+ k = self.k_proj(hidden_states)
34
+ v = self.v_proj(hidden_states)
35
+
36
+ # Reshape
37
+ q = q.view(B, T, self.num_heads, self.head_dim)
38
+ k = k.view(B, T, self.num_kv_heads, self.head_dim)
39
+ v = v.view(B, T, self.num_kv_heads, self.head_dim)
40
+
41
+ # Sliding window attention
42
+ attn_output = sliding_window_attention_std(
43
+ q, k, v,
44
+ head_first=False,
45
+ window_size=self.window_size,
46
+ )
47
+
48
+ # Output projection
49
+ attn_output = attn_output.reshape(B, T, self.hidden_size)
50
+ output = self.o_proj(attn_output)
51
+
52
+ return output, None
53
+
54
+
55
+ class SlidingWindowMLP(nn.Module):
56
+ def __init__(self, config: SlidingWindowConfig):
57
+ super().__init__()
58
+ self.hidden_size = config.hidden_size
59
+ self.intermediate_size = config.intermediate_size
60
+
61
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
62
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
63
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
64
+ self.act_fn = nn.SiLU()
65
+
66
+ def forward(self, x):
67
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
68
+
69
+
70
+ class SlidingWindowDecoderLayer(nn.Module):
71
+ def __init__(self, config: SlidingWindowConfig, layer_idx: int):
72
+ super().__init__()
73
+ self.hidden_size = config.hidden_size
74
+
75
+ self.attn = SlidingWindowAttention(config, layer_idx)
76
+ self.mlp = SlidingWindowMLP(config)
77
+
78
+ self.input_layernorm = nn.LayerNorm(self.hidden_size, eps=config.norm_eps, elementwise_affine=config.elementwise_affine)
79
+ self.post_attention_layernorm = nn.LayerNorm(self.hidden_size, eps=config.norm_eps, elementwise_affine=config.elementwise_affine)
80
+
81
+ def forward(self, hidden_states, attention_mask=None, **kwargs):
82
+ # Attention
83
+ residual = hidden_states
84
+ hidden_states = self.input_layernorm(hidden_states)
85
+ hidden_states, _ = self.attn(hidden_states, attention_mask)
86
+ hidden_states = residual + hidden_states
87
+
88
+ # MLP
89
+ residual = hidden_states
90
+ hidden_states = self.post_attention_layernorm(hidden_states)
91
+ hidden_states = self.mlp(hidden_states)
92
+ hidden_states = residual + hidden_states
93
+
94
+ return hidden_states, None
95
+
96
+
97
+ class SlidingWindowModel(PreTrainedModel):
98
+ config_class = SlidingWindowConfig
99
+ _no_split_modules = ["SlidingWindowDecoderLayer"] # ← 关键修复1
100
+
101
+ def __init__(self, config: SlidingWindowConfig):
102
+ super().__init__(config)
103
+ self.padding_idx = config.pad_token_id
104
+ self.vocab_size = config.vocab_size
105
+
106
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
107
+ self.layers = nn.ModuleList([
108
+ SlidingWindowDecoderLayer(config, layer_idx)
109
+ for layer_idx in range(config.num_hidden_layers)
110
+ ])
111
+ self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, elementwise_affine=config.elementwise_affine)
112
+
113
+ self.gradient_checkpointing = False
114
+ self.post_init()
115
+
116
+ def forward(self, input_ids, attention_mask=None, **kwargs):
117
+ hidden_states = self.embed_tokens(input_ids)
118
+
119
+ for decoder_layer in self.layers:
120
+ hidden_states, _ = decoder_layer(hidden_states, attention_mask)
121
+
122
+ hidden_states = self.norm(hidden_states)
123
+ return hidden_states
124
+
125
+
126
+ class SlidingWindowForCausalLM(PreTrainedModel):
127
+ config_class = SlidingWindowConfig
128
+ _tied_weights_keys = ["lm_head.weight"]
129
+ _no_split_modules = ["SlidingWindowDecoderLayer"] # ← 关键修复2
130
+
131
+ def __init__(self, config):
132
+ super().__init__(config)
133
+ self.model = SlidingWindowModel(config)
134
+ self.vocab_size = config.vocab_size
135
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
136
+
137
+ if config.tie_word_embeddings:
138
+ self.lm_head.weight = self.model.embed_tokens.weight
139
+
140
+ self.post_init()
141
+
142
+ def get_input_embeddings(self):
143
+ return self.model.embed_tokens
144
+
145
+ def set_input_embeddings(self, value):
146
+ self.model.embed_tokens = value
147
+
148
+ def get_output_embeddings(self):
149
+ return self.lm_head
150
+
151
+ def set_output_embeddings(self, new_embeddings):
152
+ self.lm_head = new_embeddings
153
+
154
+ def set_decoder(self, decoder):
155
+ self.model = decoder
156
+
157
+ def get_decoder(self):
158
+ return self.model
159
+
160
+ def forward(
161
+ self,
162
+ input_ids=None,
163
+ attention_mask=None,
164
+ labels=None,
165
+ **kwargs
166
+ ):
167
+ hidden_states = self.model(input_ids, attention_mask)
168
+ logits = self.lm_head(hidden_states)
169
+
170
+ loss = None
171
+ if labels is not None:
172
+ shift_logits = logits[..., :-1, :].contiguous()
173
+ shift_labels = labels[..., 1:].contiguous()
174
+
175
+ # Return per-token loss with shape [B, T-1]
176
+ loss_fct = nn.CrossEntropyLoss(reduction='none')
177
+ loss = loss_fct(
178
+ shift_logits.view(-1, self.config.vocab_size),
179
+ shift_labels.view(-1)
180
+ )
181
+ # Reshape to [B, T-1]
182
+ B, T = shift_logits.size(0), shift_logits.size(1)
183
+ loss = loss.view(B, T)
184
+
185
+ # Pad last position to make shape [B, T] instead of [B, T-1]
186
+ loss = F.pad(loss, (0, 1), value=0.0)
187
+
188
+ return CausalLMOutputWithPast(
189
+ loss=loss,
190
+ logits=logits,
191
+ )
192
+
193
+ def prepare_inputs_for_generation(self, input_ids, **kwargs):
194
+ return {"input_ids": input_ids}
__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # for HF remote code
__pycache__/__init__.cpython-310.pyc ADDED
Binary file (359 Bytes). View file
 
__pycache__/configuration_sliding_window.cpython-310.pyc ADDED
Binary file (1.65 kB). View file
 
__pycache__/modeling_sliding_window.cpython-310.pyc ADDED
Binary file (6.7 kB). View file
 
configuration_sliding_window.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+
3
+
4
+ class SlidingWindowConfig(PretrainedConfig):
5
+ model_type = "sliding_window"
6
+
7
+ def __init__(
8
+ self,
9
+ vocab_size=50304,
10
+ hidden_size=768,
11
+ intermediate_size=None,
12
+ hidden_ratio=4,
13
+ num_hidden_layers=12,
14
+ num_heads=12,
15
+ num_kv_heads=None,
16
+ hidden_act="swish",
17
+ max_position_embeddings=2048,
18
+ initializer_range=0.02,
19
+ norm_eps=1e-6,
20
+ use_cache=True,
21
+ pad_token_id=None,
22
+ bos_token_id=1,
23
+ eos_token_id=2,
24
+ tie_word_embeddings=False,
25
+ attention_bias=False,
26
+ fuse_norm=True,
27
+ fuse_cross_entropy=True,
28
+ use_rope=False,
29
+ # Sliding window specific
30
+ window_size=2, # 默认2-gram
31
+ qk_norm=False,
32
+ qk_norm_share_param_across_head=False,
33
+ use_k_shift=False,
34
+ use_v_shift=False,
35
+ elementwise_affine=True,
36
+ **kwargs,
37
+ ):
38
+ self.vocab_size = vocab_size
39
+ self.hidden_size = hidden_size
40
+ self.intermediate_size = intermediate_size or hidden_ratio * hidden_size
41
+ self.hidden_ratio = hidden_ratio
42
+ self.num_hidden_layers = num_hidden_layers
43
+ self.num_heads = num_heads
44
+ self.num_kv_heads = num_kv_heads
45
+ self.hidden_act = hidden_act
46
+ self.max_position_embeddings = max_position_embeddings
47
+ self.initializer_range = initializer_range
48
+ self.norm_eps = norm_eps
49
+ self.use_cache = use_cache
50
+ self.tie_word_embeddings = tie_word_embeddings
51
+ self.attention_bias = attention_bias
52
+ self.fuse_norm = fuse_norm
53
+ self.fuse_cross_entropy = fuse_cross_entropy
54
+ self.use_rope = use_rope
55
+
56
+ # Sliding window
57
+ self.window_size = window_size
58
+
59
+ self.qk_norm = qk_norm
60
+ self.qk_norm_share_param_across_head = qk_norm_share_param_across_head
61
+ self.use_k_shift = use_k_shift
62
+ self.use_v_shift = use_v_shift
63
+ self.elementwise_affine = elementwise_affine
64
+
65
+ super().__init__(
66
+ pad_token_id=pad_token_id,
67
+ bos_token_id=bos_token_id,
68
+ eos_token_id=eos_token_id,
69
+ **kwargs,
70
+ )
modeling_sliding_window.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from transformers import PreTrainedModel
5
+ from transformers.modeling_outputs import CausalLMOutputWithPast
6
+
7
+ from .configuration_sliding_window import SlidingWindowConfig
8
+ from forgetting_transformer.ops.sliding_window_attention_std import sliding_window_attention_std
9
+
10
+
11
+ class SlidingWindowAttention(nn.Module):
12
+ def __init__(self, config: SlidingWindowConfig, layer_idx: int):
13
+ super().__init__()
14
+ self.config = config
15
+ self.layer_idx = layer_idx
16
+
17
+ self.hidden_size = config.hidden_size
18
+ self.num_heads = config.num_heads
19
+ self.head_dim = self.hidden_size // self.num_heads
20
+ self.num_kv_heads = config.num_kv_heads or self.num_heads
21
+ self.window_size = config.window_size
22
+
23
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
24
+ self.k_proj = nn.Linear(self.hidden_size, self.num_kv_heads * self.head_dim, bias=config.attention_bias)
25
+ self.v_proj = nn.Linear(self.hidden_size, self.num_kv_heads * self.head_dim, bias=config.attention_bias)
26
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias)
27
+
28
+ def forward(self, hidden_states, attention_mask=None, **kwargs):
29
+ B, T, H = hidden_states.shape
30
+
31
+ # Project
32
+ q = self.q_proj(hidden_states)
33
+ k = self.k_proj(hidden_states)
34
+ v = self.v_proj(hidden_states)
35
+
36
+ # Reshape
37
+ q = q.view(B, T, self.num_heads, self.head_dim)
38
+ k = k.view(B, T, self.num_kv_heads, self.head_dim)
39
+ v = v.view(B, T, self.num_kv_heads, self.head_dim)
40
+
41
+ # Sliding window attention
42
+ attn_output = sliding_window_attention_std(
43
+ q, k, v,
44
+ head_first=False,
45
+ window_size=self.window_size,
46
+ )
47
+
48
+ # Output projection
49
+ attn_output = attn_output.reshape(B, T, self.hidden_size)
50
+ output = self.o_proj(attn_output)
51
+
52
+ return output, None
53
+
54
+
55
+ class SlidingWindowMLP(nn.Module):
56
+ def __init__(self, config: SlidingWindowConfig):
57
+ super().__init__()
58
+ self.hidden_size = config.hidden_size
59
+ self.intermediate_size = config.intermediate_size
60
+
61
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
62
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
63
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
64
+ self.act_fn = nn.SiLU()
65
+
66
+ def forward(self, x):
67
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
68
+
69
+
70
+ class SlidingWindowDecoderLayer(nn.Module):
71
+ def __init__(self, config: SlidingWindowConfig, layer_idx: int):
72
+ super().__init__()
73
+ self.hidden_size = config.hidden_size
74
+
75
+ self.attn = SlidingWindowAttention(config, layer_idx)
76
+ self.mlp = SlidingWindowMLP(config)
77
+
78
+ self.input_layernorm = nn.LayerNorm(self.hidden_size, eps=config.norm_eps, elementwise_affine=config.elementwise_affine)
79
+ self.post_attention_layernorm = nn.LayerNorm(self.hidden_size, eps=config.norm_eps, elementwise_affine=config.elementwise_affine)
80
+
81
+ def forward(self, hidden_states, attention_mask=None, **kwargs):
82
+ # Attention
83
+ residual = hidden_states
84
+ hidden_states = self.input_layernorm(hidden_states)
85
+ hidden_states, _ = self.attn(hidden_states, attention_mask)
86
+ hidden_states = residual + hidden_states
87
+
88
+ # MLP
89
+ residual = hidden_states
90
+ hidden_states = self.post_attention_layernorm(hidden_states)
91
+ hidden_states = self.mlp(hidden_states)
92
+ hidden_states = residual + hidden_states
93
+
94
+ return hidden_states, None
95
+
96
+
97
+ class SlidingWindowModel(PreTrainedModel):
98
+ config_class = SlidingWindowConfig
99
+ _no_split_modules = ["SlidingWindowDecoderLayer"] # ← 关键修复1
100
+
101
+ def __init__(self, config: SlidingWindowConfig):
102
+ super().__init__(config)
103
+ self.padding_idx = config.pad_token_id
104
+ self.vocab_size = config.vocab_size
105
+
106
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
107
+ self.layers = nn.ModuleList([
108
+ SlidingWindowDecoderLayer(config, layer_idx)
109
+ for layer_idx in range(config.num_hidden_layers)
110
+ ])
111
+ self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, elementwise_affine=config.elementwise_affine)
112
+
113
+ self.gradient_checkpointing = False
114
+ self.post_init()
115
+
116
+ def forward(self, input_ids, attention_mask=None, **kwargs):
117
+ hidden_states = self.embed_tokens(input_ids)
118
+
119
+ for decoder_layer in self.layers:
120
+ hidden_states, _ = decoder_layer(hidden_states, attention_mask)
121
+
122
+ hidden_states = self.norm(hidden_states)
123
+ return hidden_states
124
+
125
+
126
+ class SlidingWindowForCausalLM(PreTrainedModel):
127
+ config_class = SlidingWindowConfig
128
+ _tied_weights_keys = ["lm_head.weight"]
129
+ _no_split_modules = ["SlidingWindowDecoderLayer"] # ← 关键修复2
130
+
131
+ def __init__(self, config):
132
+ super().__init__(config)
133
+ self.model = SlidingWindowModel(config)
134
+ self.vocab_size = config.vocab_size
135
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
136
+
137
+ if config.tie_word_embeddings:
138
+ self.lm_head.weight = self.model.embed_tokens.weight
139
+
140
+ self.post_init()
141
+
142
+ def get_input_embeddings(self):
143
+ return self.model.embed_tokens
144
+
145
+ def set_input_embeddings(self, value):
146
+ self.model.embed_tokens = value
147
+
148
+ def get_output_embeddings(self):
149
+ return self.lm_head
150
+
151
+ def set_output_embeddings(self, new_embeddings):
152
+ self.lm_head = new_embeddings
153
+
154
+ def set_decoder(self, decoder):
155
+ self.model = decoder
156
+
157
+ def get_decoder(self):
158
+ return self.model
159
+
160
+ def forward(
161
+ self,
162
+ input_ids=None,
163
+ attention_mask=None,
164
+ labels=None,
165
+ **kwargs
166
+ ):
167
+ hidden_states = self.model(input_ids, attention_mask)
168
+ logits = self.lm_head(hidden_states)
169
+
170
+ loss = None
171
+ if labels is not None:
172
+ shift_logits = logits[..., :-1, :].contiguous()
173
+ shift_labels = labels[..., 1:].contiguous()
174
+
175
+ # Return per-token loss with shape [B, T-1]
176
+ loss_fct = nn.CrossEntropyLoss(reduction='none')
177
+ loss = loss_fct(
178
+ shift_logits.view(-1, self.config.vocab_size),
179
+ shift_labels.view(-1)
180
+ )
181
+ # Reshape to [B, T-1]
182
+ B, T = shift_logits.size(0), shift_logits.size(1)
183
+ loss = loss.view(B, T)
184
+
185
+ # Pad last position to make shape [B, T] instead of [B, T-1]
186
+ loss = F.pad(loss, (0, 1), value=0.0)
187
+
188
+ return CausalLMOutputWithPast(
189
+ loss=loss,
190
+ logits=logits,
191
+ )
192
+
193
+ def prepare_inputs_for_generation(self, input_ids, **kwargs):
194
+ return {"input_ids": input_ids}
ops/.ipynb_checkpoints/forgetting_attention-checkpoint.py ADDED
@@ -0,0 +1,1138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Implementation of Forgetting Attention.
3
+
4
+ Our code is adapted from https://github.com/FlagOpen/FlagAttention/blob/ee91638dec6da8c00c4113d179f469e0ffcd5852/src/flag_attn/flash.py. The code is modified to implement Forgetting Attention.
5
+
6
+ The original license info from FlagAttention:
7
+
8
+ Copyright 2023 BAAI
9
+
10
+ Licensed under the Apache License, Version 2.0 (the "License");
11
+ you may not use this file except in compliance with the License.
12
+ You may obtain a copy of the License at
13
+
14
+ http://www.apache.org/licenses/LICENSE-2.0
15
+
16
+ Unless required by applicable law or agreed to in writing, software
17
+ distributed under the License is distributed on an "AS IS" BASIS,
18
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ See the License for the specific language governing permissions and
20
+ limitations under the License.
21
+ """
22
+ import pytest
23
+ import math
24
+ import torch
25
+ import triton
26
+ import triton.language as tl
27
+ from einops import rearrange
28
+ from typing import Optional
29
+
30
+
31
+ __all__ = ["forgetting_attention"]
32
+
33
+
34
+ # File flash.py
35
+ def maybe_contiguous(x):
36
+ # only when the inner most dimension is contiguous can LDGSTS be used
37
+ # so inner-dimension contiguity is enforced.
38
+ return x.contiguous() if x.stride(-1) != 1 else x
39
+
40
+ def rounded_multiple(a, b):
41
+ return (a + b - 1) // b * b
42
+
43
+ # --------------------------- public API ---------------------------
44
+ class ForgettingAttention(torch.autograd.Function):
45
+ @staticmethod
46
+ def forward(ctx, q, k, v, log_fgate, seq_start, causal, sm_scale, return_log_normalizer):
47
+ assert causal, "Only causal attention is supported"
48
+ Dq, Dk, Dv = q.shape[-1], k.shape[-1], v.shape[-1]
49
+ assert Dq == Dk == Dv, "feature size of q, k, v should be equal"
50
+ assert Dk in {16, 32, 64, 128}, "We only support head dims in {16, 32, 64, 128}"
51
+
52
+ B, H, M, D = q.shape
53
+ if seq_start is not None:
54
+ has_seq_start = True
55
+ assert seq_start.shape == (B,)
56
+ else:
57
+ has_seq_start = False
58
+ seq_start = torch.zeros((B,), device=q.device, dtype=torch.long)
59
+ N = k.shape[2]
60
+ assert log_fgate.shape == (B, H, N)
61
+ log_fgate = log_fgate.float()
62
+ if has_seq_start:
63
+ log_fgate = log_fgate.clone()
64
+ # We absolutely don't want masked value to affect result. If we
65
+ # don't do this then it could via affecting numerical precision of
66
+ # cumsum
67
+ mask_index = (torch.arange(N, device=q.device)[None, None, :] < seq_start[:, None, None])
68
+ mask_index = torch.broadcast_to(mask_index, log_fgate.size())
69
+ log_fgate[mask_index] = 0.0
70
+
71
+ log_lambda = torch.cumsum(log_fgate, dim=-1, dtype=log_fgate.dtype).float()
72
+
73
+ Hk, Hv = k.shape[1], v.shape[1]
74
+ assert Hk == Hv, "num of heads in k and v should be equal"
75
+ assert H == Hk, "groupped query attention has not been tested. You can uncomment this if you know what you are doing."
76
+ assert H % Hk == 0, "number of heads in q must be a multiple of that in k & v"
77
+ num_groups = H // Hk
78
+
79
+ P_SEQ = N - M
80
+ larger_m = M > N
81
+ assert (not larger_m), "The key/value tensors must be longer than the query tensor"
82
+
83
+ if sm_scale is None:
84
+ sm_scale = 1. / math.sqrt(D)
85
+
86
+ # contiguity
87
+ q, k, v = maybe_contiguous(q), maybe_contiguous(k), maybe_contiguous(v)
88
+
89
+ # to work around https://github.com/openai/triton/issues/2441
90
+ device = torch.cuda.device_of(q)
91
+
92
+ with torch.cuda.device(device):
93
+
94
+ config = get_fwd_config(B, H, M, N, D, causal)
95
+ BLOCK_M, BLOCK_N, num_stages, num_warps = config
96
+
97
+ divisible_m = M % BLOCK_M == 0
98
+ divisible_n = N % BLOCK_N == 0
99
+ # consider using 3d grid to avoid div & rem
100
+ grid = (triton.cdiv(M, BLOCK_M), H, B)
101
+ o = torch.empty_like(q)
102
+ L = torch.empty((B, H, M), device=q.device, dtype=torch.float32)
103
+ _fwd_kernel[grid](
104
+ q, k, v, log_lambda, seq_start, sm_scale,
105
+ L, o,
106
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
107
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
108
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3),
109
+ log_lambda.stride(0), log_lambda.stride(1), log_lambda.stride(2),
110
+ o.stride(0), o.stride(1), o.stride(2), o.stride(3),
111
+ B, H, M, N, P_SEQ, num_groups,
112
+ BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N, BLOCK_DMODEL=D,
113
+ IS_CAUSAL=causal, LARGER_M=larger_m, HAS_SEQ_START=has_seq_start,
114
+ DIVISIBLE_M=divisible_m, DIVISIBLE_N=divisible_n,
115
+ num_warps=num_warps, num_stages=num_stages,
116
+ )
117
+
118
+ # autograd context maintenance
119
+ ctx.save_for_backward(q, k, v, o, L, log_lambda, seq_start)
120
+ ctx.sm_scale = sm_scale
121
+ ctx.causal = causal
122
+ ctx.has_seq_start = has_seq_start
123
+
124
+ has_extra_return = return_log_normalizer
125
+ if has_extra_return:
126
+ outs = (
127
+ o,
128
+ L if return_log_normalizer else None,
129
+ )
130
+ return outs
131
+ return o
132
+
133
+ @staticmethod
134
+ def backward(ctx, do, *ignored):
135
+ q, k, v, o, L, log_lambda, seq_start = ctx.saved_tensors
136
+ sm_scale = ctx.sm_scale
137
+ causal = ctx.causal
138
+ has_seq_start = ctx.has_seq_start
139
+
140
+ B, H, M, D = q.shape
141
+ N = k.shape[2]
142
+ Hk = k.shape[1]
143
+ num_groups = H // Hk
144
+ P_SEQ = N - M
145
+ larger_m = M > N
146
+
147
+ if sm_scale is None:
148
+ sm_scale = 1. / math.sqrt(D)
149
+
150
+ # to work around https://github.com/openai/triton/issues/2441
151
+ device = torch.cuda.device_of(q)
152
+ with torch.cuda.device(device):
153
+ config = get_bwd_config(B, H, M, N, D, causal)
154
+ BLOCK_M, BLOCK_N, num_stages, num_warps = config
155
+
156
+ divisible_m = M % BLOCK_M == 0
157
+ divisible_n = N % BLOCK_N == 0
158
+
159
+ delta = torch.empty_like(L)
160
+ grid = (triton.cdiv(M, BLOCK_M), H, B)
161
+ _bwd_preprocess[grid](
162
+ o, do,
163
+ delta,
164
+ o.stride(0), o.stride(1), o.stride(2), o.stride(3),
165
+ do.stride(0), do.stride(1), do.stride(2), do.stride(3),
166
+ delta.stride(0), delta.stride(1), delta.stride(2),
167
+ M,
168
+ BLOCK_M=BLOCK_M, D_HEAD=D,
169
+ DIVISIBLE_M=divisible_m,
170
+ )
171
+
172
+ # NOTE that dk & dv always have the same number of heads as q, instead of q.
173
+ BLOCK_M, BLOCK_N, num_stages, num_warps = get_bwd_kv_config(B, H, M, N, D, causal)
174
+ divisible_m = M % BLOCK_M == 0
175
+ divisible_n = N % BLOCK_N == 0
176
+
177
+ dk = torch.empty((B, H, N, D), dtype=k.dtype, device=q.device)
178
+ dv = torch.empty((B, H, N, D), dtype=v.dtype, device=q.device)
179
+ dlog_lambda = torch.empty((B, H, N), dtype=log_lambda.dtype, device=q.device)
180
+ grid = (triton.cdiv(N, BLOCK_N), H, B)
181
+ _bwd_kv_kernel[grid](
182
+ q, k, v, log_lambda, seq_start, sm_scale, do,
183
+ dk, dv, dlog_lambda,
184
+ L, delta,
185
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
186
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
187
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3),
188
+ log_lambda.stride(0), log_lambda.stride(1), log_lambda.stride(2),
189
+ do.stride(0), do.stride(1), do.stride(2), do.stride(3),
190
+ dk.stride(0), dk.stride(1), dk.stride(2), dk.stride(3),
191
+ dv.stride(0), dv.stride(1), dv.stride(2), dv.stride(3),
192
+ dlog_lambda.stride(0), dlog_lambda.stride(1), dlog_lambda.stride(2),
193
+ B, H, M, N, P_SEQ,
194
+ num_groups,
195
+ BLOCK_M=BLOCK_M, BLOCK_DMODEL=D, BLOCK_N=BLOCK_N, CAUSAL=causal,
196
+ DIVISIBLE_M=divisible_m, DIVISIBLE_N=divisible_n, HAS_SEQ_START=has_seq_start,
197
+ num_stages=num_stages, num_warps=num_warps,
198
+ )
199
+
200
+ BLOCK_M, BLOCK_N, num_stages, num_warps = get_bwd_q_config(B, H, M, N, D, causal)
201
+ divisible_m = M % BLOCK_M == 0
202
+ divisible_n = N % BLOCK_N == 0
203
+ dq = torch.zeros_like(q)
204
+ grid = (triton.cdiv(M, BLOCK_M), H, B)
205
+ _bwd_q_kernel[grid](
206
+ q, k, v, log_lambda, seq_start, sm_scale, do,
207
+ dq, dlog_lambda,
208
+ L, delta,
209
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
210
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
211
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3),
212
+ log_lambda.stride(0), log_lambda.stride(1), log_lambda.stride(2),
213
+ do.stride(0), do.stride(1), do.stride(2), do.stride(3),
214
+ dq.stride(0), dq.stride(1), dq.stride(2), dq.stride(3),
215
+ dlog_lambda.stride(0), dlog_lambda.stride(1), dlog_lambda.stride(2),
216
+ B, H, M, N, P_SEQ,
217
+ num_groups,
218
+ BLOCK_M=BLOCK_M, BLOCK_DMODEL=D, BLOCK_N=BLOCK_N,
219
+ CAUSAL=causal, LARGER_M=larger_m, HAS_SEQ_START=has_seq_start,
220
+ DIVISIBLE_M=divisible_m, DIVISIBLE_N=divisible_n,
221
+ num_stages=num_stages, num_warps = num_warps,
222
+ )
223
+ dk = dk.reshape((B, Hk, num_groups, N, D)).sum(2)
224
+ dv = dv.reshape((B, Hk, num_groups, N, D)).sum(2)
225
+ dcumsum = torch.cumsum(dlog_lambda, dim=-1, dtype=log_lambda.dtype)
226
+ dlog_fgate = dlog_lambda + dcumsum[..., -1:] - dcumsum
227
+ dlog_fgate = dlog_fgate.float()
228
+ return dq, dk, dv, dlog_fgate, None, None, None, None, None, None, None
229
+
230
+
231
+ def forgetting_attention(
232
+ q: torch.Tensor,
233
+ k: torch.Tensor,
234
+ v: torch.Tensor,
235
+ log_fgate: torch.Tensor,
236
+ *,
237
+ head_first: bool = False,
238
+ seq_start: Optional[torch.Tensor] = None,
239
+ sm_scale: Optional[float] = None,
240
+ ):
241
+ """
242
+ A FlashAttention-based implementation of Forgetting Attention.
243
+
244
+ Note:
245
+ - We recommand bfloat16/float16 for q, k, v and float32 for log_fgate. float32 for
246
+ q, k, v is also supported, but the kernel will not use tensor cores if q, k, v are
247
+ in float32 (which would be slow).
248
+ - We only support seqlen_q <= seqlen_k
249
+ - We only support causal attention
250
+ - Head dimension must be in one of {16, 32, 64, 128}
251
+
252
+ Arguments:
253
+ - q: (batch_size, seqlen_q, num_heads, head_dim) unless head_first=True.
254
+ - k: (batch_size, seqlen_k, num_heads, head_dim) unless head_first=True.
255
+ - v: (batch_size, seqlen_k, num_heads, head_dim) unless head_first=True.
256
+ - log_fgate: (batch_size, seqlen_k, num_heads) unless head_first=True.
257
+ This should be the **log** of the forget gates. This is typically the
258
+ output of torch.nn.functional.logsigmoid.
259
+ - head_first: if True, the order the num_heads and seqlen_* axis of the all
260
+ FloatTensor inputs and outputs should be (num_heads, seq_len_*) instead of
261
+ (seq_len_*, num_heads)
262
+ - seq_start: If not None, should be LongTensor with shape (batch_size,)
263
+ and range in [0, seq_len_k). For each batch index batch_id, no attention
264
+ will be allocated to tokens before the token index seq_start[batch_id].
265
+ This is useful for left-padded inputs.
266
+ - sm_scale: The scaling of attention scores before applying softmax. If
267
+ None, it defaults to (1.0 / math.sqrt(head_dim))
268
+
269
+ Returns:
270
+ out (torch.Tensor): (batch_size, seqlen_q, num_heads, head_dim) unless head_first=True.
271
+ """
272
+ if not head_first:
273
+ q, k, v = [rearrange(item, "b t h d -> b h t d") for item in (q, k, v)]
274
+ log_fgate = rearrange(log_fgate, "b t h -> b h t")
275
+ out = ForgettingAttention.apply(q, k, v, log_fgate, seq_start, True, sm_scale, False)
276
+ if not head_first:
277
+ out = rearrange(out, "b h t d -> b t h d")
278
+ return out
279
+
280
+
281
+ # --------------------------- Forward ---------------------------
282
+ # NOTE: this function can be overwritten at runtime to use your custom config
283
+ def get_fwd_config(B, H, M, N, D, causal):
284
+ assert causal
285
+ if torch.cuda.get_device_capability() == (8, 0):
286
+ if D <= 64:
287
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 32, 3, 4
288
+ else:
289
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 4, 4
290
+ elif torch.cuda.get_device_capability() == (9, 0):
291
+ # H100
292
+ if D <= 64:
293
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 8
294
+ else:
295
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 128, 2, 8
296
+ elif torch.cuda.get_device_capability() == (8, 6):
297
+ if not causal:
298
+ if D <= 64:
299
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 4
300
+ else:
301
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 2, 4
302
+ else: # causal
303
+ if D <= 64:
304
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 3, 4
305
+ else:
306
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 2, 4
307
+ elif torch.cuda.get_device_capability() == (8, 9):
308
+ # L40S
309
+ if D <= 64:
310
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 2, 4
311
+ else:
312
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 2, 4
313
+ else:
314
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
315
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
316
+
317
+
318
+ @triton.jit
319
+ def _fwd_kernel(
320
+ Q, K, V, LOG_LAMBDA, SEQ_START, sm_scale,
321
+ L, O,
322
+ stride_qz, stride_qh, stride_qm, stride_qk,
323
+ stride_kz, stride_kh, stride_kn, stride_kk,
324
+ stride_vz, stride_vh, stride_vn, stride_vk,
325
+ stride_log_lambda_z, stride_log_lambda_h, stride_log_lambda_n,
326
+ stride_oz, stride_oh, stride_om, stride_ok,
327
+ Z, H, M, N, P_SEQ,
328
+ num_groups,
329
+ BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr,
330
+ IS_CAUSAL: tl.constexpr, LARGER_M: tl.constexpr, HAS_SEQ_START: tl.constexpr,
331
+ DIVISIBLE_M: tl.constexpr, DIVISIBLE_N: tl.constexpr,
332
+ ):
333
+ input_dtype = Q.dtype.element_ty
334
+ # -- grid id --
335
+ start_m = tl.program_id(0)
336
+ off_h = tl.program_id(1)
337
+ off_z = tl.program_id(2)
338
+
339
+ # scale sm_scale by log_2(e) and use
340
+ # 2^x instead of exp in the loop because CSE and LICM
341
+ # don't work as expected with `exp` in the loop
342
+ log2e: tl.constexpr = 1.4426950408889634
343
+ loge2: tl.constexpr = 0.6931471805599453
344
+ qk_scale = sm_scale * log2e
345
+
346
+ # offset pointers for (batch, head)
347
+ off_hk = off_h // num_groups
348
+ Q += off_z * stride_qz + off_h * stride_qh
349
+ K += off_z * stride_kz + off_hk * stride_kh
350
+ V += off_z * stride_vz + off_hk * stride_vh
351
+ LOG_LAMBDA += off_z * stride_log_lambda_z + off_h * stride_log_lambda_h
352
+ O += off_z * stride_oz + off_h * stride_oh
353
+ L += (off_z * H + off_h) * M # l's shape is (B, H, M)
354
+
355
+ offs_m_base = tl.arange(0, BLOCK_M)
356
+ offs_m = start_m * BLOCK_M + offs_m_base
357
+ offs_n_base = tl.arange(0, BLOCK_N)
358
+ offs_k = tl.arange(0, BLOCK_DMODEL)
359
+
360
+
361
+ # initialize pointers to value-like data
362
+ q_ptrs = Q + (offs_m[:, None] * stride_qm + offs_k[None, :] * stride_qk) # (BLOCK_M, BLOCK_DMODEL)
363
+ log_lambda_out_ptrs = LOG_LAMBDA + (P_SEQ + offs_m) * stride_log_lambda_n
364
+ o_ptrs = O + (offs_m[:, None] * stride_om + offs_k[None, :] * stride_ok) # (BLOCK_M, BLOCK_DMODEL)
365
+ l_ptrs = L + offs_m
366
+
367
+ # initialize pointer to m and l, fp32 for accumulators
368
+ m_i = tl.full([BLOCK_M], value=-float("inf"), dtype=tl.float32)
369
+ l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
370
+ acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
371
+
372
+ # load q
373
+ if DIVISIBLE_M:
374
+ q = tl.load(q_ptrs, cache_modifier=".cg")
375
+ log_lambda_out = tl.load(log_lambda_out_ptrs, cache_modifier=".cg")
376
+ else:
377
+ mask_m = offs_m < M
378
+ q = tl.load(q_ptrs, mask=mask_m[:, None], cache_modifier=".cg")
379
+ log_lambda_out = tl.load(log_lambda_out_ptrs, mask=mask_m, cache_modifier=".cg")
380
+
381
+ #Dot I trick: to place q in registers, it saves shared memory
382
+ # if BLOCK_DMODEL < 128:
383
+ # I = tl.where(offs_k[:, None] == offs_k,
384
+ # tl.full((BLOCK_DMODEL, BLOCK_DMODEL), 1.0, dtype=input_dtype),
385
+ # tl.full((BLOCK_DMODEL, BLOCK_DMODEL), 0.0, dtype=input_dtype))
386
+ # q = tl.dot(q, I, input_precision="ieee").to(input_dtype)
387
+ # else:
388
+ # I = tl.where(offs_m_base[:, None] == offs_m_base,
389
+ # tl.full((BLOCK_M, BLOCK_M), 1.0, dtype=input_dtype),
390
+ # tl.full((BLOCK_M, BLOCK_M), 0.0, dtype=input_dtype))
391
+ # q = tl.dot(I, q, input_precision="ieee").to(input_dtype)
392
+
393
+ # NOTE: Loop-Bound-For-N
394
+ # The indices in m-dimension that this block may access is in `[start_m * BLOCK_M, (start_m + 1) * BLOCK_M)`.
395
+ # According to the rule of causal masking, then max index in n-dimension that this block may access
396
+ # is `P_SEQ + (start_m + 1) * BLOCK_M`.
397
+ # However, the upper bound of index in n-dimension should never exceed the sequence length of k/v(`P_SEQ + N_CTX`).
398
+ # `P_SEQ + (start_m + 1) * BLOCK_M` may be larger than `N`.
399
+ # At this case, there would be illegal memory access when loading k & v tiles
400
+ # if mask_n is not applied for loading(only when `DIVISIBLE_N`` is true).
401
+ # See also https://github.com/FlagOpen/FlagAttention/pull/8
402
+ if IS_CAUSAL:
403
+ hi = tl.minimum(N, P_SEQ + (start_m + 1) * BLOCK_M)
404
+ if LARGER_M:
405
+ hi = tl.maximum(0, hi)
406
+ else:
407
+ hi = N
408
+
409
+ offs_n_init = offs_n_base
410
+ if HAS_SEQ_START:
411
+ SEQ_START += off_z
412
+ seq_start = tl.load(SEQ_START)
413
+ lo = tl.minimum(seq_start, hi)
414
+ lo = (lo // BLOCK_N) * BLOCK_N
415
+ offs_n_init += lo
416
+ else:
417
+ lo = 0
418
+ seq_start = 0
419
+
420
+ # loop over k, v and update accumulators
421
+ k_ptrs = K + (offs_k[:, None] * stride_kk + offs_n_init[None, :] * stride_kn) # (BLOCK_DMODEL, BLOCK_N)
422
+ v_ptrs = V + (offs_n_init[:, None] * stride_vn + offs_k[None, :] * stride_vk) # (BLOCK_N, BLOCK_DMODEL)
423
+ log_lambda_in_ptrs = LOG_LAMBDA + (offs_n_init * stride_log_lambda_n) # (BLOCK_N, BLOCK_DMODEL)
424
+ for start_n in range(lo, hi, BLOCK_N):
425
+ start_n = tl.multiple_of(start_n, BLOCK_N)
426
+ offs_n = start_n + offs_n_base
427
+
428
+ # -- load k, v --
429
+ if DIVISIBLE_N:
430
+ k = tl.load(k_ptrs, cache_modifier=".cg")
431
+ v = tl.load(v_ptrs, cache_modifier=".cg")
432
+ log_lambda_in = tl.load(log_lambda_in_ptrs, cache_modifier=".cg")
433
+ else:
434
+ mask_n = offs_n < N
435
+ k = tl.load(k_ptrs, mask=mask_n[None, :], cache_modifier=".cg")
436
+ v = tl.load(v_ptrs, mask=mask_n[:, None], cache_modifier=".cg")
437
+ log_lambda_in = tl.load(log_lambda_in_ptrs, mask=mask_n, cache_modifier=".cg")
438
+
439
+ # -- compute qk ---
440
+ # s = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
441
+ s = tl.dot(q, k, input_precision="ieee") * qk_scale
442
+ decay_bias = log_lambda_out[:, None] - log_lambda_in[None, :]
443
+ s += decay_bias * log2e
444
+
445
+ if not DIVISIBLE_N:
446
+ s = tl.where(mask_n[None, :], s, float("-inf"))
447
+ if IS_CAUSAL:
448
+ causal_mask = (P_SEQ + offs_m[:, None]) >= offs_n[None, :]
449
+ s = tl.where(causal_mask, s, float("-inf"))
450
+ if HAS_SEQ_START:
451
+ s = tl.where(offs_n[None, :] >= seq_start, s, float("-inf"))
452
+
453
+
454
+ # -- compute scaling constant ---
455
+ m_i_new = tl.maximum(m_i, tl.max(s, 1))
456
+ alpha = tl.math.exp2((m_i - m_i_new))
457
+ p = tl.math.exp2(s - m_i_new[:, None])
458
+
459
+ # -- compute partial sumexpn before applying dropout
460
+ p_sum = tl.sum(p, 1)
461
+
462
+
463
+ # -- scale and update acc: acc *= alpha[:, None]--
464
+ acc *= alpha[:, None]
465
+ acc += tl.dot(p.to(input_dtype), v, input_precision="ieee")
466
+
467
+ # -- update m_i and l_i --
468
+ l_i = l_i * alpha + p_sum
469
+ m_i = m_i_new
470
+ # update pointers
471
+ k_ptrs += BLOCK_N * stride_kn
472
+ v_ptrs += BLOCK_N * stride_vn
473
+ log_lambda_in_ptrs += BLOCK_N * stride_log_lambda_n
474
+
475
+ # write back l & o
476
+ if IS_CAUSAL and (LARGER_M or HAS_SEQ_START):
477
+ is_empty_line = (offs_m + P_SEQ) < seq_start
478
+ acc = tl.where(is_empty_line[:, None], 0.0, acc * (1.0 / l_i[:, None]))
479
+ l = tl.where(is_empty_line, float("-inf"), m_i * loge2 + tl.log(l_i))
480
+ else:
481
+ acc = acc * (1.0 / l_i[:, None])
482
+ l = m_i * loge2 + tl.log(l_i) # log(normalizer)
483
+
484
+
485
+ if DIVISIBLE_M:
486
+ tl.store(l_ptrs, l, cache_modifier=".cg")
487
+ tl.store(o_ptrs, acc.to(input_dtype), cache_modifier=".cg")
488
+ else:
489
+ tl.store(l_ptrs, l, mask=mask_m, cache_modifier=".cg")
490
+ tl.store(o_ptrs, acc.to(input_dtype), mask=mask_m[:, None], cache_modifier=".cg")
491
+
492
+
493
+ # --------------------------- Backward ---------------------------
494
+ # NOTE: this function can be overwritten at runtime to use your custom config
495
+ def get_bwd_config(B, H, M, N, D, causal):
496
+ if torch.cuda.get_device_capability() == (9, 0):
497
+ if not causal:
498
+ BLOCK_M = 128 if D <= 64 else 64
499
+ BLOCK_N = 64
500
+ num_stages = 2
501
+ num_warps = 4
502
+ else:
503
+ BLOCK_M = 64
504
+ BLOCK_N = 64
505
+ num_stages = 3 if D <= 64 else 2
506
+ num_warps = 4
507
+ elif torch.cuda.get_device_capability() == (8, 0):
508
+ if not causal:
509
+ BLOCK_M = 128 if D <= 64 else 64
510
+ BLOCK_N = 64
511
+ num_stages = 2
512
+ num_warps = 4
513
+ else:
514
+ BLOCK_M = 64
515
+ BLOCK_N = 64
516
+ num_stages = 3 if D <= 64 else 2
517
+ num_warps = 4
518
+ elif torch.cuda.get_device_capability() == (8, 6): # tune for RTX-3090, device_capability(8, 6)
519
+ if not causal:
520
+ if D <= 64:
521
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
522
+ else:
523
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 8
524
+ else:
525
+ if D <= 64:
526
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
527
+ else:
528
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 2, 4
529
+ else:
530
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 1, 4
531
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
532
+
533
+ def get_bwd_kv_config(B, H, M, N, D, causal):
534
+ assert causal
535
+ if torch.cuda.get_device_capability() == (8, 0): # A100
536
+ if D <= 64:
537
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 4, 4
538
+ else:
539
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 128, 4, 8
540
+ elif torch.cuda.get_device_capability() == (8, 6): # tune for RTX-3090, device_capability(8, 6)
541
+ if D <= 64:
542
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
543
+ else:
544
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 2, 4
545
+ elif torch.cuda.get_device_capability() == (8, 9): # L40S
546
+ if D <= 64:
547
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 128, 4, 8
548
+ else:
549
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 128, 2, 8
550
+ elif torch.cuda.get_device_capability() == (9, 0): # H100
551
+ if D <= 64:
552
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 4
553
+ else:
554
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
555
+ else:
556
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
557
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
558
+
559
+ def get_bwd_q_config(B, H, M, N, D, causal):
560
+ assert causal
561
+ if torch.cuda.get_device_capability() == (8, 0): # A100
562
+ if D <= 64:
563
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 4
564
+ else:
565
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 4, 8
566
+ elif torch.cuda.get_device_capability() == (8, 6): # tune for RTX-3090, device_capability(8, 6)
567
+ if D <= 64:
568
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
569
+ else:
570
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 2, 4
571
+ elif torch.cuda.get_device_capability() == (8, 9): # L40S
572
+ if D <= 64:
573
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 4, 4
574
+ else:
575
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 3, 4
576
+ elif torch.cuda.get_device_capability() == (9, 0): # H100
577
+ if D <= 64:
578
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 128, 4, 8
579
+ else:
580
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 128, 2, 8
581
+ else:
582
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
583
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
584
+
585
+
586
+ @triton.jit
587
+ def _bwd_preprocess(
588
+ Out, DO,
589
+ Delta,
590
+ stride_oz, stride_oh, stride_om, stride_ok,
591
+ stride_doz, stride_doh, stride_dom, stride_dok,
592
+ stride_dz, stride_dh, stride_dm,
593
+ M,
594
+ BLOCK_M: tl.constexpr, D_HEAD: tl.constexpr,
595
+ DIVISIBLE_M: tl.constexpr,
596
+ ):
597
+ off_h = tl.program_id(1)
598
+ off_z = tl.program_id(2)
599
+ Out += off_z * stride_oz + off_h * stride_oh
600
+ DO += off_z * stride_doz + off_h * stride_doh
601
+ Delta += off_z * stride_dz + off_h * stride_dh
602
+
603
+ # compute (Out * Dout).sum() for vector interpretation
604
+ off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)
605
+ off_n = tl.arange(0, D_HEAD)
606
+
607
+ # load
608
+ o_ptrs = Out + off_m[:, None] * stride_om + off_n[None, :] * stride_ok
609
+ do_ptrs = DO + off_m[:, None] * stride_dom + off_n[None, :] * stride_dok
610
+
611
+ if DIVISIBLE_M:
612
+ o = tl.load(o_ptrs).to(tl.float32)
613
+ do = tl.load(do_ptrs).to(tl.float32)
614
+ else:
615
+ mask_m = off_m < M
616
+ o = tl.load(o_ptrs, mask=mask_m[:, None]).to(tl.float32)
617
+ do = tl.load(do_ptrs, mask=mask_m[:, None]).to(tl.float32)
618
+
619
+ # compute
620
+ delta = tl.sum(o * do, axis=1)
621
+
622
+ # write-back
623
+ d_ptrs = Delta + off_m * stride_dm
624
+ if DIVISIBLE_M:
625
+ tl.store(d_ptrs, delta)
626
+ else:
627
+ tl.store(d_ptrs, delta, mask=mask_m)
628
+
629
+
630
+ @triton.jit
631
+ def _bwd_kv_kernel(
632
+ Q, K, V, LOG_LAMBDA, SEQ_START, sm_scale, DO,
633
+ DK, DV, DLOG_LAMBDA,
634
+ L,
635
+ D,
636
+ stride_qz, stride_qh, stride_qm, stride_qk,
637
+ stride_kz, stride_kh, stride_kn, stride_kk,
638
+ stride_vz, stride_vh, stride_vn, stride_vk,
639
+ stride_log_lambda_z, stride_log_lambda_h, stride_log_lambda_n,
640
+ stride_doz, stride_doh, stride_dom, stride_dok,
641
+ stride_dkz, stride_dkh, stride_dkn, stride_dkk,
642
+ stride_dvz, stride_dvh, stride_dvn, stride_dvk,
643
+ stride_dlog_lambda_z, stride_dlog_lambda_h, stride_dlog_lambda_n,
644
+ Z, H, M, N, P_SEQ,
645
+ num_groups,
646
+ BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr,
647
+ CAUSAL: tl.constexpr,
648
+ DIVISIBLE_M: tl.constexpr, DIVISIBLE_N: tl.constexpr, HAS_SEQ_START: tl.constexpr,
649
+ ):
650
+ input_dtype = Q.dtype.element_ty
651
+ # -- grid id --
652
+ start_n = tl.program_id(0)
653
+ off_h = tl.program_id(1)
654
+ off_z = tl.program_id(2)
655
+ log2e: tl.constexpr = 1.4426950408889634
656
+ qk_scale = sm_scale * log2e
657
+
658
+ # offset pointers for (batch, head)
659
+ off_hk = off_h // num_groups
660
+ Q += off_z * stride_qz + off_h * stride_qh
661
+ K += off_z * stride_kz + off_hk * stride_kh
662
+ V += off_z * stride_vz + off_hk * stride_vh
663
+ LOG_LAMBDA += off_z * stride_log_lambda_z + off_h * stride_log_lambda_h
664
+ DO += off_z * stride_doz + off_h * stride_doh
665
+
666
+ # offset pointers for batch/head
667
+ DK += off_z * stride_dkz + off_h * stride_dkh
668
+ DV += off_z * stride_dvz + off_h * stride_dvh
669
+ DLOG_LAMBDA += off_z * stride_dlog_lambda_z + off_h * stride_dlog_lambda_h
670
+
671
+ # offset pointers for batch/head
672
+ D += (off_z * H + off_h) * M
673
+ L += (off_z * H + off_h) * M
674
+
675
+ if CAUSAL:
676
+ lo = tl.maximum(start_n * BLOCK_N - P_SEQ, 0)
677
+ lo = (lo // BLOCK_M) * BLOCK_M
678
+ else:
679
+ lo = 0
680
+
681
+ offs_m_init = lo + tl.arange(0, BLOCK_M)
682
+ offs_n = start_n * BLOCK_N + tl.arange(0, BLOCK_N)
683
+ offs_m_base = tl.arange(0, BLOCK_M)
684
+ offs_k = tl.arange(0, BLOCK_DMODEL)
685
+
686
+ # initialize pointers to value-like data
687
+ q_ptrs = Q + (offs_m_init[:, None] * stride_qm + offs_k[None, :] * stride_qk) # (BLOCK_M, BLOCK_DMODEL)
688
+ log_lambda_out_ptrs = LOG_LAMBDA + (P_SEQ + offs_m_init) * stride_log_lambda_n # (BLOCK_N, BLOCK_DMODEL)
689
+ k_ptrs = K + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk) # (BLOCK_N, BLOCK_DMODEL)
690
+ v_ptrs = V + (offs_n[:, None] * stride_vn + offs_k[None, :] * stride_vk) # (BLOCK_N, BLOCK_DMODEL)
691
+ log_lambda_in_ptrs = LOG_LAMBDA + (offs_n * stride_log_lambda_n) # (BLOCK_N, BLOCK_DMODEL)
692
+ do_ptrs = DO + (offs_m_init[:, None] * stride_dom + offs_k[None, :] * stride_dok) # (BLOCK_M, BLOCK_DMODEL)
693
+
694
+ dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_k[None, :] * stride_dvk) # (BLOCK_N, BLOCK_DMODEL)
695
+ dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_k[None, :] * stride_dkk) # (BLOCK_N, BLOCK_DMODEL)
696
+ dlog_lambda_in_ptrs = DLOG_LAMBDA + (offs_n * stride_dlog_lambda_n) # (BLOCK_N, BLOCK_DMODEL)
697
+
698
+ # k and v stay in SRAM throughout
699
+ if DIVISIBLE_N:
700
+ v = tl.load(v_ptrs)
701
+ k = tl.load(k_ptrs)
702
+ log_lambda_in = tl.load(log_lambda_in_ptrs)
703
+ else:
704
+ mask_n = offs_n < N
705
+ v = tl.load(v_ptrs, mask=mask_n[:, None])
706
+ k = tl.load(k_ptrs, mask=mask_n[:, None])
707
+ log_lambda_in = tl.load(log_lambda_in_ptrs, mask=mask_n)
708
+
709
+ # If the N block doesn't contain seq_start, no need to loop
710
+ if HAS_SEQ_START:
711
+ SEQ_START += off_z
712
+ seq_start = tl.load(SEQ_START)
713
+ hi = tl.where(start_n * BLOCK_N + BLOCK_N >= seq_start - 1, M, lo)
714
+ else:
715
+ hi = M
716
+
717
+ # initialize dk amd dv
718
+ dk = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32)
719
+ dv = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32)
720
+ dlog_lambda_in = tl.zeros([BLOCK_N], dtype=tl.float32)
721
+
722
+ # loop over a col
723
+ for start_m in range(lo, hi, BLOCK_M):
724
+ start_m = tl.multiple_of(start_m, BLOCK_M)
725
+ offs_m = start_m + offs_m_base
726
+ causal_mask = (P_SEQ + offs_m[None, :]) >= (offs_n[:, None]) # (BLOCK_M, BLOCK_N)
727
+
728
+ # load q1, k1, q2, k2, v, do on-chip
729
+ if DIVISIBLE_M:
730
+ q = tl.load(q_ptrs)
731
+ log_lambda_out = tl.load(log_lambda_out_ptrs)
732
+ else:
733
+ mask_m = offs_m < M
734
+ valid_mask = mask_m[None, :] # & mask_n
735
+ q = tl.load(q_ptrs, mask=mask_m[:, None])
736
+ log_lambda_out = tl.load(log_lambda_out_ptrs, mask=mask_m)
737
+ # recompute p = softmax(qk * sm_scale, dim=-1)
738
+ # s = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
739
+ sT = tl.dot(k, tl.trans(q), input_precision="ieee") * qk_scale
740
+ decay_bias = log_lambda_out[None, :] - log_lambda_in[:, None]
741
+ sT += decay_bias * log2e
742
+ # NOTE: since softmax in backward is pointwise, the normalizer has been saved in fwd)
743
+ # So masking on s is not needed.
744
+ # s = tl.where(valid_mask, s , float("-inf"))
745
+ # if CAUSAL:
746
+ # s = tl.where(causal_mask, s, float("-inf"))
747
+
748
+ # -- recompute p ---
749
+ if DIVISIBLE_M:
750
+ l = tl.load(L + offs_m)
751
+ else:
752
+ l = tl.load(L + offs_m, mask=mask_m)
753
+ pT = tl.math.exp2(sT - l[None, :] * log2e) # (BLOCK_M, BLOCK_N)
754
+
755
+ if not DIVISIBLE_M:
756
+ pT = tl.where(valid_mask, pT, 0.0)
757
+ if CAUSAL:
758
+ pT = tl.where(causal_mask, pT, 0.0)
759
+
760
+ # compute dv = dot(p, do)
761
+ if DIVISIBLE_M:
762
+ do = tl.load(do_ptrs)
763
+ else:
764
+ do = tl.load(do_ptrs, mask=mask_m[:, None]) # (BLOCK_M, BLOCK_DMODEL)
765
+
766
+
767
+ dv += tl.dot(pT.to(input_dtype), do, input_precision="ieee") # (BLOCK_N, BLOCK_DMODEL) # still correct
768
+
769
+ # compute dp = dot(v, do)
770
+ if DIVISIBLE_M:
771
+ delta = tl.load(D + offs_m)
772
+ else:
773
+ delta = tl.load(D + offs_m, mask=mask_m)
774
+ # dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
775
+ dpT = tl.dot(v, tl.trans(do), input_precision="ieee")
776
+
777
+
778
+ # compute ds = p * (dp - delta[:, None])
779
+ dsT = pT * (dpT - delta[None, :]) # (BLOCK_M, BLOCK_N)
780
+
781
+ if not DIVISIBLE_M:
782
+ dsT = tl.where(valid_mask, dsT, 0.0)
783
+ if CAUSAL:
784
+ dsT = tl.where(causal_mask, dsT, 0.0)
785
+
786
+ # compute dk = dot(ds.T, q) masking
787
+ dk += tl.dot(dsT.to(input_dtype), q, input_precision="ieee")
788
+ dlog_lambda_in += -tl.sum(dsT, axis=1)
789
+
790
+ # increment pointers
791
+ q_ptrs += BLOCK_M * stride_qm
792
+ log_lambda_out_ptrs += BLOCK_M * stride_log_lambda_n
793
+ do_ptrs += BLOCK_M * stride_dom
794
+
795
+ dk *= sm_scale
796
+ if HAS_SEQ_START:
797
+ # Mask out
798
+ seq_mask = (offs_n >= seq_start)
799
+ dk = tl.where(seq_mask[:, None], dk, 0.0)
800
+ dv = tl.where(seq_mask[:, None], dv, 0.0)
801
+ dlog_lambda_in = tl.where(seq_mask, dlog_lambda_in, 0.0)
802
+ if DIVISIBLE_N:
803
+ tl.store(dk_ptrs, dk.to(input_dtype)) # (BLOCK_N, BLOCK_DMODEL)
804
+ tl.store(dv_ptrs, dv.to(input_dtype)) # (BLOCK_N, BLOCK_DMODEL,)
805
+ tl.store(dlog_lambda_in_ptrs, dlog_lambda_in.to(tl.float32)) # (BLOCK_N, BLOCK_DMODEL,)
806
+ else:
807
+ tl.store(dk_ptrs, dk.to(input_dtype), mask=mask_n[:, None]) # (BLOCK_N, BLOCK_DMODEL)
808
+ tl.store(dv_ptrs, dv.to(input_dtype), mask=mask_n[:, None]) # (BLOCK_N, BLOCK_DMODEL)
809
+ tl.store(dlog_lambda_in_ptrs, dlog_lambda_in.to(tl.float32), mask=mask_n) # (BLOCK_N, BLOCK_DMODEL,)
810
+
811
+
812
+ @triton.jit
813
+ def _bwd_q_kernel(
814
+ Q, K, V, LOG_LAMBDA, SEQ_START, sm_scale, DO,
815
+ DQ, DLOG_LAMBDA,
816
+ L,
817
+ D,
818
+ stride_qz, stride_qh, stride_qm, stride_qk,
819
+ stride_kz, stride_kh, stride_kn, stride_kk,
820
+ stride_vz, stride_vh, stride_vn, stride_vk,
821
+ stride_log_lambda_z, stride_log_lambda_h, stride_log_lambda_n,
822
+ stride_doz, stride_doh, stride_dom, stride_dok,
823
+ stride_dqz, stride_dqh, stride_dqm, stride_dqk,
824
+ stride_dlog_lambda_z, stride_dlog_lambda_h, stride_dlog_lambda_n,
825
+ Z, H, M, N, P_SEQ,
826
+ num_groups,
827
+ BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr,
828
+ CAUSAL: tl.constexpr, LARGER_M: tl.constexpr, HAS_SEQ_START: tl.constexpr,
829
+ DIVISIBLE_M: tl.constexpr, DIVISIBLE_N: tl.constexpr,
830
+ ):
831
+ input_dtype = Q.dtype.element_ty
832
+ # -- grid id --
833
+ start_m = tl.program_id(0)
834
+ off_h = tl.program_id(1)
835
+ off_z = tl.program_id(2)
836
+
837
+ # scale sm_scale by log_2(e) and use
838
+ # 2^x instead of exp in the loop because CSE and LICM
839
+ # don't work as expected with `exp` in the loop
840
+ log2e: tl.constexpr = 1.4426950408889634
841
+ qk_scale = sm_scale * log2e
842
+
843
+ # offset pointers for (batch, head)
844
+ off_hk = off_h // num_groups
845
+ Q += off_z * stride_qz + off_h * stride_qh
846
+ K += off_z * stride_kz + off_hk * stride_kh
847
+ V += off_z * stride_vz + off_hk * stride_vh
848
+ LOG_LAMBDA += off_z * stride_log_lambda_z + off_h * stride_log_lambda_h
849
+ DO += off_z * stride_doz + off_h * stride_doh
850
+ D += (off_z * H + off_h) * M
851
+ L += (off_z * H + off_h) * M
852
+
853
+ # offset pointers for batch/head
854
+ DQ += off_z * stride_dqz + off_h * stride_dqh
855
+ DLOG_LAMBDA += off_z * stride_dlog_lambda_z + off_h * stride_dlog_lambda_h
856
+
857
+ offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
858
+ offs_k = tl.arange(0, BLOCK_DMODEL)
859
+
860
+ # initialize pointers to value-like data
861
+ q_ptrs = Q + (offs_m[:, None] * stride_qm + offs_k[None, :] * stride_qk) # (BLOCK_M, BLOCK_DMODEL)
862
+ log_lambda_out_ptrs = LOG_LAMBDA + (P_SEQ + offs_m) * stride_log_lambda_n
863
+
864
+ dq_ptrs = DQ + (offs_m[:, None] * stride_dqm + offs_k[None, :] * stride_dqk) # (BLOCK_M, BLOCK_DMODEL)
865
+ dlog_lambda_out_ptrs = DLOG_LAMBDA + (P_SEQ + offs_m) * stride_dlog_lambda_n
866
+ do_ptrs = DO + (offs_m[:, None] * stride_dom + offs_k[None, :] * stride_dok) # (BLOCK_M, BLOCK_DMODEL)
867
+
868
+ # pointer to row-wise quantities in value-like data
869
+ d_ptrs = D + offs_m
870
+ l_ptrs = L + offs_m
871
+
872
+ # load q: it will stay in SRAM throughout
873
+ if DIVISIBLE_M:
874
+ q = tl.load(q_ptrs)
875
+ do = tl.load(do_ptrs)
876
+ delta = tl.load(d_ptrs)
877
+ l = tl.load(l_ptrs)
878
+ log_lambda_out = tl.load(log_lambda_out_ptrs)
879
+ else:
880
+ mask_m = offs_m < M
881
+ q = tl.load(q_ptrs, mask=mask_m[:, None])
882
+ do = tl.load(do_ptrs, mask=mask_m[:, None])
883
+ delta = tl.load(d_ptrs, mask=mask_m)
884
+ l = tl.load(l_ptrs, mask=mask_m)
885
+ log_lambda_out = tl.load(log_lambda_out_ptrs, mask=mask_m)
886
+
887
+ # initialize dq
888
+ dq = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
889
+ dlog_lambda_out = tl.zeros([BLOCK_M], dtype=tl.float32)
890
+
891
+ # loop over k, v and update accumulator
892
+ # see note "Loop-Bound-For-N"
893
+ if CAUSAL:
894
+ hi = tl.minimum(N, P_SEQ + (start_m + 1) * BLOCK_M)
895
+ if LARGER_M:
896
+ hi = tl.maximum(0, hi)
897
+ else:
898
+ hi = N
899
+
900
+ offs_n_base = tl.arange(0, BLOCK_N)
901
+ offs_n_init = offs_n_base
902
+ if HAS_SEQ_START:
903
+ SEQ_START += off_z
904
+ seq_start = tl.load(SEQ_START)
905
+ lo = tl.minimum(seq_start, hi)
906
+ lo = (lo // BLOCK_N) * BLOCK_N
907
+ offs_n_init += lo
908
+ else:
909
+ lo = 0
910
+ k_ptrs = K + (offs_n_init[:, None] * stride_kn + offs_k[None, :] * stride_kk) # (BLOCK_N, BLOCK_DMODEL)
911
+ v_ptrs = V + (offs_n_init[:, None] * stride_vn + offs_k[None, :] * stride_vk) # (BLOCK_N, BLOCK_DMODEL)
912
+ log_lambda_in_ptrs = LOG_LAMBDA + (offs_n_init * stride_log_lambda_n)
913
+
914
+ # loop over a row
915
+ for start_n in range(lo, hi, BLOCK_N):
916
+ offs_n = start_n + offs_n_base
917
+
918
+ # load k1, k2, v on chip
919
+ if DIVISIBLE_N:
920
+ v = tl.load(v_ptrs)
921
+ k = tl.load(k_ptrs)
922
+ log_lambda_in = tl.load(log_lambda_in_ptrs)
923
+ else:
924
+ mask_n = offs_n < N
925
+ v = tl.load(v_ptrs, mask=mask_n[:, None])
926
+ k = tl.load(k_ptrs, mask=mask_n[:, None])
927
+ log_lambda_in = tl.load(log_lambda_in_ptrs, mask=mask_n)
928
+
929
+
930
+ # recompute p = softmax(qk * sm_scale, dim=-1)
931
+ if not DIVISIBLE_N:
932
+ valid_mask = mask_n[None, :] # & mask_m[:, None]
933
+ if CAUSAL:
934
+ causal_mask = (P_SEQ + offs_m[:, None]) >= (offs_n[None, :]) # (BLOCK_M, BLOCK_N)
935
+ # s = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
936
+ s = tl.dot(q, tl.trans(k), input_precision="ieee") * qk_scale
937
+ decay_bias = log_lambda_out[:, None] - log_lambda_in[None, :]
938
+ s += decay_bias * log2e
939
+
940
+ # NOTE: since softmax in backward is pointwise, the normalizer has been saved in fwd)
941
+ # So masking on s is not needed.
942
+ # if CAUSAL:
943
+ # s = tl.where(causal_mask & valid_mask, s, float("-inf"))
944
+ # else:
945
+ # s = tl.where(valid_mask, s, float("-inf"))
946
+ p = tl.math.exp2(s - l[:, None] * log2e) # (BLOCK_M, BLOCK_N)
947
+
948
+ # compute dp = dot(v, do)
949
+ # dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
950
+ dp = tl.dot(do.to(input_dtype), tl.trans(v), input_precision="ieee")
951
+
952
+
953
+ # no need to mask dp
954
+ # if CAUSAL:
955
+ # dp = tl.where(causal_mask & valid_mask, dp, 0.0)
956
+ # else:
957
+ # dp = tl.where(valid_mask, dp, 0.0)
958
+
959
+ # compute ds = p * (dp - delta[:, None])
960
+ # move scale out to dq at last
961
+ ds = p * (dp - delta[:, None]) # (BLOCK_M, BLOCK_N)
962
+
963
+ # mask ds to ensure no small values
964
+ if not DIVISIBLE_N:
965
+ ds = tl.where(valid_mask, ds, 0.0)
966
+ if CAUSAL:
967
+ ds = tl.where(causal_mask, ds, 0.0)
968
+ if HAS_SEQ_START:
969
+ ds = tl.where(offs_n[None, :] >= seq_start, ds, 0.0)
970
+
971
+ dq += tl.dot(ds.to(input_dtype), k, input_precision="ieee")
972
+ dlog_lambda_out += tl.sum(ds, axis=1)
973
+
974
+ # increment pointers
975
+ k_ptrs += BLOCK_N * stride_kn
976
+ v_ptrs += BLOCK_N * stride_vn
977
+ log_lambda_in_ptrs += BLOCK_N * stride_log_lambda_n
978
+
979
+ dq *= sm_scale
980
+ if DIVISIBLE_M:
981
+ tmp = tl.load(dlog_lambda_out_ptrs)
982
+ else:
983
+ tmp = tl.load(dlog_lambda_out_ptrs, mask=mask_m)
984
+ dlog_lambda_out += tmp
985
+ if DIVISIBLE_M:
986
+ tl.store(dq_ptrs, dq.to(input_dtype))
987
+ tl.store(dlog_lambda_out_ptrs, dlog_lambda_out)
988
+ else:
989
+ tl.store(dq_ptrs, dq.to(input_dtype), mask=mask_m[:, None])
990
+ tl.store(dlog_lambda_out_ptrs, dlog_lambda_out, mask=mask_m)
991
+
992
+
993
+
994
+ @pytest.mark.parametrize("Z, H, M, N, HEAD_DIM", [(4, 2, 1020, 2098, 64), (4, 2, 1024, 2048, 64)])
995
+ @pytest.mark.parametrize("causal", [True])
996
+ def test_op(Z, H, M, N, HEAD_DIM, causal, dtype=torch.bfloat16):
997
+ torch.manual_seed(24)
998
+ q = (torch.empty((Z, H, M, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
999
+ k = (torch.empty((Z, H, N, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
1000
+ v = (torch.empty((Z, H, N, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
1001
+ fgate_logit = torch.empty((Z, H, N), dtype=torch.float32, device="cuda").uniform_(5, 10)
1002
+ log_fgate = torch.nn.functional.logsigmoid(fgate_logit).requires_grad_()
1003
+ seq_start = torch.randint(low=0, high=N, size=(Z,), dtype=torch.long, device="cuda")
1004
+ # seq_start = torch.randint(low=0, high=10, size=(Z,), dtype=torch.long, device="cuda")
1005
+ # seq_start = torch.full(fill_value=0, size=(Z,), dtype=torch.long, device="cuda")
1006
+ sm_scale = 0.5
1007
+ dout = torch.randn_like(q)
1008
+ # reference implementation
1009
+ P_SEQ = N - M
1010
+ mask = torch.tril(torch.ones((M, N), device="cuda"), diagonal=P_SEQ)
1011
+ p = torch.matmul(q, k.transpose(2, 3)) * sm_scale
1012
+ p = p.float()
1013
+
1014
+ log_lambda = torch.cumsum(log_fgate, dim=-1)
1015
+ decay_bias = log_lambda[..., -M:, None] - log_lambda[..., None, :]
1016
+ p = p + decay_bias
1017
+ if causal:
1018
+ p[:, :, mask == 0] = float("-inf")
1019
+
1020
+ attention_mask = torch.arange(N, device="cuda") < seq_start[:, None, None, None]
1021
+ p = torch.where(attention_mask, float("-inf"), p)
1022
+ p = torch.softmax(p.float(), dim=-1).to(dtype)
1023
+ p = p.clone()
1024
+ p[torch.isnan(p)] = 0.0
1025
+ # p = torch.exp(p)
1026
+ ref_out = torch.matmul(p, v)
1027
+ ref_out.backward(dout)
1028
+ ref_dv, v.grad = v.grad.clone(), None
1029
+ ref_dk, k.grad = k.grad.clone(), None
1030
+ ref_dq, q.grad = q.grad.clone(), None
1031
+ ref_dlog_fgate, log_fgate.grad = log_fgate.grad.clone(), None
1032
+ # triton implementation
1033
+ tri_out = forgetting_attention(q, k, v, log_fgate, head_first=True, seq_start=seq_start, sm_scale=sm_scale)
1034
+ tri_out = tri_out.to(dtype)
1035
+
1036
+ tri_out.backward(dout)
1037
+ tri_dv, v.grad = v.grad.clone(), None
1038
+ tri_dk, k.grad = k.grad.clone(), None
1039
+ tri_dq, q.grad = q.grad.clone(), None
1040
+ tri_dlog_fgate, log_fgate.grad = log_fgate.grad.clone(), None
1041
+ # compare
1042
+ # assert torch.allclose(tri_log_normalizer[~torch.isnan(tri_log_normalizer)], ref_log_normalizer[~torch.isnan(ref_log_normalizer)], atol=1e-2, rtol=0)
1043
+ assert torch.allclose(ref_out, tri_out, atol=1e-2, rtol=0), (ref_out - tri_out).abs().max()
1044
+ rtol = 0
1045
+ # Relative tolerance workaround for known hardware limitation of MI200 GPU.
1046
+ # For details see https://pytorch.org/docs/stable/notes/numerical_accuracy.html#reduced-precision-fp16-and-bf16-gemms-and-convolutions-on-amd-instinct-mi200-devices
1047
+ # if torch.version.hip is not None and triton.runtime.driver.active.get_current_target().arch == "gfx90a":
1048
+ # rtol = 1e-2
1049
+ assert torch.allclose(ref_dv, tri_dv, atol=1e-2, rtol=rtol), (ref_dv - tri_dv).abs().max()
1050
+ assert torch.allclose(ref_dk, tri_dk, atol=1e-2, rtol=rtol), (ref_dk - tri_dk).abs().max()
1051
+ assert torch.allclose(ref_dq, tri_dq, atol=1e-2, rtol=rtol), (ref_dq - tri_dq).abs().max()
1052
+ assert torch.allclose(ref_dlog_fgate, tri_dlog_fgate, atol=1e-2, rtol=rtol), (ref_dlog_fgate - tri_dlog_fgate).abs().max()
1053
+
1054
+ try:
1055
+ from flash_attn.flash_attn_interface import \
1056
+ flash_attn_qkvpacked_func as flash_attn_func
1057
+ HAS_FLASH = True
1058
+ except BaseException:
1059
+ HAS_FLASH = False
1060
+
1061
+ TORCH_HAS_FP8 = hasattr(torch, 'float8_e5m2')
1062
+ BATCH, N_HEADS, HEAD_DIM = 4, 32, 128
1063
+ # vary seq length for fixed head and batch=4
1064
+ configs = []
1065
+ for mode in ["fwd", "bwd"]:
1066
+ # for mode in ["bwd"]:
1067
+ # for causal in [True, False]:
1068
+ for causal in [True]:
1069
+ if mode == "bwd" and not causal:
1070
+ continue
1071
+ configs.append(
1072
+ triton.testing.Benchmark(
1073
+ x_names=["N_CTX"],
1074
+ # x_vals=[2**i for i in range(10, 15)],
1075
+ x_vals=[2**i for i in range(14, 15)],
1076
+ line_arg="provider",
1077
+ # line_vals=["triton-fp16", "flag"] + (["flash"] if HAS_FLASH else []),
1078
+ # line_names=["Triton [FP16]", "Flag"] + (["Flash-2"] if HAS_FLASH else []),
1079
+ line_vals=["flag"] + (["flash"] if HAS_FLASH else []),
1080
+ line_names=["Flag"] + (["Flash-2"] if HAS_FLASH else []),
1081
+ styles=[("red", "-"), ("blue", "-"), ("green", "-")],
1082
+ ylabel="ms",
1083
+ plot_name=f"fused-attention-batch{BATCH}-head{N_HEADS}-d{HEAD_DIM}-{mode}-causal={causal}",
1084
+ args={
1085
+ "H": N_HEADS,
1086
+ "BATCH": BATCH,
1087
+ "HEAD_DIM": HEAD_DIM,
1088
+ "mode": mode,
1089
+ "causal": causal,
1090
+ },
1091
+ ))
1092
+
1093
+
1094
+ @triton.testing.perf_report(configs)
1095
+ def bench_flash_attention(BATCH, H, N_CTX, HEAD_DIM, causal, mode, provider, device="cuda"):
1096
+ assert mode in ["fwd", "bwd"]
1097
+ warmup = 25
1098
+ rep = 100
1099
+ dtype = torch.bfloat16
1100
+ if "flag" in provider:
1101
+ q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1102
+ k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1103
+ v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1104
+ fgate_logit = torch.empty((BATCH, H, N_CTX), dtype=torch.float32, device="cuda").uniform_(5, 10)
1105
+ log_fgate = torch.nn.functional.logsigmoid(fgate_logit).requires_grad_()
1106
+ # if mode == "fwd" and "fp8" in provider:
1107
+ # q = q.to(torch.float8_e5m2)
1108
+ # k = k.to(torch.float8_e5m2)
1109
+ # v = v.permute(0, 1, 3, 2).contiguous()
1110
+ # v = v.permute(0, 1, 3, 2)
1111
+ # v = v.to(torch.float8_e5m2)
1112
+ sm_scale = 1.3
1113
+ fn = lambda: forgetting_attention(q, k, v, log_fgate, head_first=True, sm_scale=sm_scale)
1114
+ if mode == "bwd":
1115
+ o = fn()
1116
+ do = torch.randn_like(o)
1117
+ fn = lambda: o.backward(do, retain_graph=True)
1118
+ ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
1119
+ if provider == "flash":
1120
+ qkv = torch.randn((BATCH, N_CTX, 3, H, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1121
+ fn = lambda: flash_attn_func(qkv, causal=causal)
1122
+ if mode == "bwd":
1123
+ o = fn()
1124
+ do = torch.randn_like(o)
1125
+ fn = lambda: o.backward(do, retain_graph=True)
1126
+ ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
1127
+ flops_per_matmul = 2.0 * BATCH * H * N_CTX * N_CTX * HEAD_DIM
1128
+ total_flops = 2 * flops_per_matmul
1129
+ if causal:
1130
+ total_flops *= 0.5
1131
+ if mode == "bwd":
1132
+ total_flops *= 2.5 # 2.0(bwd) + 0.5(recompute)
1133
+ return total_flops / ms * 1e-9
1134
+
1135
+
1136
+ if __name__ == "__main__":
1137
+ # only works on post-Ampere GPUs right now
1138
+ bench_flash_attention.run(save_path=".", print_data=True)
ops/.ipynb_checkpoints/forgetting_attention_std-checkpoint.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Forgetting Attention - 标准 Softmax 版本
3
+ 在 forgetting_attention.py 最后添加这个函数
4
+ """
5
+
6
+ import math
7
+ import torch
8
+ import torch.nn.functional as F
9
+ from einops import rearrange
10
+ from typing import Optional
11
+
12
+
13
+ def forgetting_attention_std(
14
+ q: torch.Tensor,
15
+ k: torch.Tensor,
16
+ v: torch.Tensor,
17
+ log_fgate: torch.Tensor,
18
+ *,
19
+ head_first: bool = False,
20
+ seq_start: Optional[torch.Tensor] = None,
21
+ sm_scale: Optional[float] = None,
22
+ ) -> torch.Tensor:
23
+ """标准 Softmax 版本的 Forgetting Attention"""
24
+
25
+ if not head_first:
26
+ q = rearrange(q, "b t h d -> b h t d")
27
+ k = rearrange(k, "b t h d -> b h t d")
28
+ v = rearrange(v, "b t h d -> b h t d")
29
+ log_fgate = rearrange(log_fgate, "b t h -> b h t")
30
+
31
+ B, H, T_q, D = q.shape
32
+ T_k = k.shape[2]
33
+
34
+ if sm_scale is None:
35
+ sm_scale = 1.0 / math.sqrt(D)
36
+
37
+ # 计算 QK 分数
38
+ scores = torch.matmul(q.float(), k.float().transpose(-2, -1)) * sm_scale
39
+
40
+ # 处理 seq_start
41
+ log_fgate_masked = log_fgate.float()
42
+ if seq_start is not None:
43
+ log_fgate_masked = log_fgate_masked.clone()
44
+ mask_idx = torch.arange(T_k, device=q.device)[None, None, :] < seq_start[:, None, None]
45
+ log_fgate_masked[mask_idx] = 0.0
46
+
47
+ # 计算累积衰减
48
+ log_lambda = torch.cumsum(log_fgate_masked, dim=-1)
49
+ decay_bias = log_lambda[:, :, :T_q, None] - log_lambda[:, :, None, :]
50
+ scores = scores + decay_bias
51
+
52
+ # Causal mask
53
+ P_SEQ = T_k - T_q
54
+ causal_mask = torch.triu(torch.ones((T_q, T_k), dtype=torch.bool, device=q.device), diagonal=P_SEQ + 1)
55
+ scores = scores.masked_fill(causal_mask[None, None, :, :], float('-inf'))
56
+
57
+ # seq_start mask
58
+ if seq_start is not None:
59
+ seq_mask = torch.arange(T_k, device=q.device)[None, None, None, :] < seq_start[None, :, None, None]
60
+ scores = scores.masked_fill(seq_mask, float('-inf'))
61
+
62
+ # Softmax
63
+ attn = F.softmax(scores, dim=-1)
64
+ attn = torch.nan_to_num(attn, 0.0)
65
+
66
+ # 计算输出
67
+ out = torch.matmul(attn.to(v.dtype), v)
68
+
69
+ if not head_first:
70
+ out = rearrange(out, "b h t d -> b t h d")
71
+
72
+ return out
ops/.ipynb_checkpoints/geometric_attention_std-checkpoint.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Geometric Attention - 标准 Softmax 版本
3
+ 基于论文 "The Neural Data Router" (Csordás et al., 2022)
4
+ """
5
+
6
+ import math
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+ from einops import rearrange
11
+ from typing import Optional
12
+
13
+
14
+ def geometric_attention_std(
15
+ q: torch.Tensor,
16
+ k: torch.Tensor,
17
+ v: torch.Tensor,
18
+ *,
19
+ head_first: bool = False,
20
+ seq_start: Optional[torch.Tensor] = None,
21
+ sm_scale: Optional[float] = None,
22
+ normalize: bool = True,
23
+ ) -> torch.Tensor:
24
+ """
25
+ 标准 Softmax 版本的 Geometric Attention
26
+
27
+ Args:
28
+ q: Query tensor [B, T, H, D] or [B, H, T, D] if head_first
29
+ k: Key tensor [B, T, H, D] or [B, H, T, D] if head_first
30
+ v: Value tensor [B, T, H, D] or [B, H, T, D] if head_first
31
+ head_first: 是否head维度在前
32
+ seq_start: 序列起始位置 [B]
33
+ sm_scale: scaling factor,默认 1/sqrt(D)
34
+ normalize: 是否归一化attention weights
35
+
36
+ Returns:
37
+ output: [B, T, H, D] or [B, H, T, D] if head_first
38
+ """
39
+
40
+ # Rearrange to head_first format
41
+ if not head_first:
42
+ q = rearrange(q, "b t h d -> b h t d")
43
+ k = rearrange(k, "b t h d -> b h t d")
44
+ v = rearrange(v, "b t h d -> b h t d")
45
+
46
+ B, H, T_q, D = q.shape
47
+ T_k = k.shape[2]
48
+
49
+ if sm_scale is None:
50
+ sm_scale = 1.0 / math.sqrt(D)
51
+
52
+ # Step 1: 计算 content-based logits
53
+ logits = torch.matmul(q.float(), k.float().transpose(-2, -1)) * sm_scale
54
+ # logits: [B, H, T_q, T_k]
55
+
56
+ # Step 2: Mask diagonal (不允许attend到自己)
57
+ if T_q == T_k:
58
+ diag_mask = torch.eye(T_q, dtype=torch.bool, device=q.device)
59
+ logits = logits.masked_fill(diag_mask[None, None, :, :], float('-inf'))
60
+
61
+ # Step 3: 处理 seq_start mask
62
+ if seq_start is not None:
63
+ seq_mask = torch.arange(T_k, device=q.device)[None, None, None, :] < seq_start[None, :, None, None]
64
+ logits = logits.masked_fill(seq_mask, float('-inf'))
65
+
66
+ # Step 4: Causal mask (如果需要)
67
+ # 注意:geometric attention论文中没有causal,如果你的任务需要可以取消注释
68
+ # P_SEQ = T_k - T_q
69
+ # causal_mask = torch.triu(torch.ones((T_q, T_k), dtype=torch.bool, device=q.device), diagonal=P_SEQ + 1)
70
+ # logits = logits.masked_fill(causal_mask[None, None, :, :], float('-inf'))
71
+
72
+ # Step 5: Geometric weighting (核心算法)
73
+ attn_weights = geometric_weighting(logits, normalize=normalize)
74
+
75
+ # Step 6: 应用attention到values
76
+ out = torch.matmul(attn_weights.to(v.dtype), v)
77
+
78
+ if not head_first:
79
+ out = rearrange(out, "b h t d -> b t h d")
80
+
81
+ return out
82
+
83
+
84
+ def geometric_weighting(
85
+ logits: torch.Tensor,
86
+ normalize: bool = True,
87
+ ) -> torch.Tensor:
88
+ """
89
+ 计算geometric attention weights
90
+
91
+ 实现论文中的 Equation 7:
92
+ A[i,j] = P[i,j] * ∏(1 - P[i,k]) for k closer to i than j
93
+
94
+ Args:
95
+ logits: [B, H, T_q, T_k] attention logits
96
+ normalize: 是否归一化
97
+
98
+ Returns:
99
+ weights: [B, H, T_q, T_k] attention weights
100
+ """
101
+ B, H, T_q, T_k = logits.shape
102
+
103
+ # Step 1: Sigmoid to get matching probabilities
104
+ P = torch.sigmoid(logits) # [B, H, T_q, T_k]
105
+
106
+ # Step 2: 使用 log-space 计算(数值稳定)
107
+ log_P = torch.log(P + 1e-10)
108
+ log_one_minus_P = torch.log(1.0 - P + 1e-10)
109
+
110
+ # Step 3: 简化版本 - 使用cumsum实现几何分布
111
+ # 这是一个高效的近似,避免了显式的循环
112
+
113
+ # 对于每个位置i,计算其左侧所有位置的log(1-P)累积和
114
+ log_decay_left = log_one_minus_P.cumsum(dim=-1)
115
+
116
+ # 计算weights(简化版)
117
+ # 完整版本需要根据距离动态选择区间,这里用一个高效近似
118
+ weights = torch.exp(log_P + log_decay_left.roll(1, dims=-1))
119
+
120
+ # 第一个位置特殊处理(没有左侧元素)
121
+ # 避免inplace操作
122
+ weights_first = P[:, :, :, :1] # 获取第一列
123
+ weights = torch.cat([weights_first, weights[:, :, :, 1:]], dim=-1)
124
+
125
+ # Step 4: 归一化(可选)
126
+ if normalize:
127
+ weights = F.normalize(weights, p=1, dim=-1)
128
+
129
+ # 处理NaN(如果所有位置都是-inf)
130
+ weights = torch.nan_to_num(weights, 0.0)
131
+
132
+ return weights
133
+
134
+
135
+ def geometric_weighting_full(
136
+ logits: torch.Tensor,
137
+ normalize: bool = True,
138
+ ) -> torch.Tensor:
139
+ """
140
+ 完整版geometric weighting(更慢但更准确)
141
+
142
+ 仅在需要最高精度时使用,训练时建议用上面的简化版
143
+ """
144
+ B, H, T_q, T_k = logits.shape
145
+ device = logits.device
146
+
147
+ P = torch.sigmoid(logits)
148
+ log_P = torch.log(P + 1e-10)
149
+ log_one_minus_P = torch.log(1.0 - P + 1e-10)
150
+
151
+ # 初始化weights
152
+ weights = torch.zeros_like(P)
153
+
154
+ # 对每个(i,j)计算geometric weight
155
+ for i in range(T_q):
156
+ for j in range(T_k):
157
+ # 找出比j更接近i的所有位���k
158
+ if i < j:
159
+ # 向右看:closer positions are [i+1, ..., j-1]
160
+ closer_positions = range(i + 1, j)
161
+ elif i > j:
162
+ # 向左看:closer positions are [j+1, ..., i-1]
163
+ closer_positions = range(j + 1, i)
164
+ else:
165
+ # i == j (对角线),已经在外面mask掉了
166
+ continue
167
+
168
+ # 计算 ∏(1 - P[i,k]) in log-space
169
+ log_prod = sum(log_one_minus_P[:, :, i, k] for k in closer_positions) if closer_positions else 0.0
170
+
171
+ # weights[i,j] = P[i,j] * ∏(1 - P[i,k])
172
+ weights[:, :, i, j] = torch.exp(log_P[:, :, i, j] + log_prod)
173
+
174
+ if normalize:
175
+ weights = F.normalize(weights, p=1, dim=-1)
176
+
177
+ weights = torch.nan_to_num(weights, 0.0)
178
+
179
+ return weights
ops/.ipynb_checkpoints/sliding_window_attention_std-checkpoint.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Sliding Window / Hard Attention
3
+ Based on "Context Limitations Make Neural Language Models More Human-Like"
4
+ (Kuribayashi et al., 2022)
5
+ """
6
+
7
+ import math
8
+ import torch
9
+ import torch.nn.functional as F
10
+ from einops import rearrange
11
+ from typing import Optional
12
+
13
+
14
+ def sliding_window_attention_std(
15
+ q: torch.Tensor,
16
+ k: torch.Tensor,
17
+ v: torch.Tensor,
18
+ *,
19
+ head_first: bool = False,
20
+ seq_start: Optional[torch.Tensor] = None,
21
+ sm_scale: Optional[float] = None,
22
+ window_size: int = 2, # 默认2-gram(看前1个token)
23
+ ) -> torch.Tensor:
24
+ """
25
+ Sliding Window Attention
26
+
27
+ 硬截断:只能attend到最近window_size个token
28
+ """
29
+
30
+ if not head_first:
31
+ q = rearrange(q, "b t h d -> b h t d")
32
+ k = rearrange(k, "b t h d -> b h t d")
33
+ v = rearrange(v, "b t h d -> b h t d")
34
+
35
+ B, H, T_q, D = q.shape
36
+ T_k = k.shape[2]
37
+
38
+ if sm_scale is None:
39
+ sm_scale = 1.0 / math.sqrt(D)
40
+
41
+ # Compute logits
42
+ logits = torch.matmul(q.float(), k.float().transpose(-2, -1)) * sm_scale
43
+
44
+ # Create sliding window mask
45
+ mask = create_sliding_window_mask(T_q, T_k, window_size, device=q.device)
46
+ logits = logits.masked_fill(~mask, float('-inf'))
47
+
48
+ # Seq start mask
49
+ if seq_start is not None:
50
+ seq_mask = torch.arange(T_k, device=q.device)[None, None, None, :] < seq_start[None, :, None, None]
51
+ logits = logits.masked_fill(seq_mask, float('-inf'))
52
+
53
+ # Standard softmax
54
+ weights = F.softmax(logits, dim=-1)
55
+
56
+ # Apply to values
57
+ out = torch.matmul(weights, v)
58
+
59
+ if not head_first:
60
+ out = rearrange(out, "b h t d -> b t h d")
61
+
62
+ return out
63
+
64
+
65
+ def create_sliding_window_mask(
66
+ T_q: int,
67
+ T_k: int,
68
+ window_size: int,
69
+ device: torch.device
70
+ ) -> torch.Tensor:
71
+ """
72
+ 创建sliding window mask
73
+
74
+ window_size=1: 只看前1个token (2-gram)
75
+ window_size=2: 只看前2个token (3-gram)
76
+ """
77
+ # 基础causal mask
78
+ mask = torch.tril(torch.ones(T_q, T_k, dtype=torch.bool, device=device))
79
+
80
+ # 应用window限制
81
+ if window_size > 0 and window_size < T_k:
82
+ for i in range(T_q):
83
+ # 只保留 [i-window_size+1, i] 范围
84
+ start = max(0, i - window_size + 1)
85
+ if start > 0:
86
+ mask[i, :start] = False
87
+
88
+ return mask[None, None, :, :] # [1, 1, T_q, T_k]
ops/.ipynb_checkpoints/stickbreaking_attention_std-checkpoint.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Stick-breaking Attention - ICLR 2025
3
+ 基于论文 "Scaling Stick-Breaking Attention" (Tan et al., 2025)
4
+ 简化的PyTorch实现(不使用Triton)
5
+ """
6
+
7
+ import math
8
+ import torch
9
+ import torch.nn as nn
10
+ import torch.nn.functional as F
11
+ from einops import rearrange
12
+ from typing import Optional
13
+
14
+
15
+ def stickbreaking_attention_std(
16
+ q: torch.Tensor,
17
+ k: torch.Tensor,
18
+ v: torch.Tensor,
19
+ *,
20
+ head_first: bool = False,
21
+ seq_start: Optional[torch.Tensor] = None,
22
+ sm_scale: Optional[float] = None,
23
+ normalize: bool = True,
24
+ attend_current: bool = False,
25
+ ) -> torch.Tensor:
26
+ """
27
+ Stick-breaking attention
28
+
29
+ Based on ICLR 2025 paper, simplified PyTorch implementation
30
+ A_{i,j} = exp(z_{i,j} - ∑_{k=i}^{j-1} softplus(z_{k,j}))
31
+
32
+ Args:
33
+ q: query [B, T, H, D] or [B, H, T, D] if head_first
34
+ k: key [B, T, H, D] or [B, H, T, D] if head_first
35
+ v: value [B, T, H, D] or [B, H, T, D] if head_first
36
+ attend_current: whether to attend to current position
37
+ normalize: whether to normalize attention weights
38
+ """
39
+
40
+ if not head_first:
41
+ q = rearrange(q, "b t h d -> b h t d")
42
+ k = rearrange(k, "b t h d -> b h t d")
43
+ v = rearrange(v, "b t h d -> b h t d")
44
+
45
+ B, H, T_q, D = q.shape
46
+ T_k = k.shape[2]
47
+
48
+ if sm_scale is None:
49
+ sm_scale = 1.0 / math.sqrt(D)
50
+
51
+ # Compute logits: QK^T / sqrt(d)
52
+ logits = torch.matmul(q.float(), k.float().transpose(-2, -1)) * sm_scale
53
+ # [B, H, T_q, T_k]
54
+
55
+ # Causal mask (optional: mask diagonal if not attend_current)
56
+ if T_q == T_k and not attend_current:
57
+ diag_mask = torch.eye(T_q, dtype=torch.bool, device=q.device)
58
+ logits = logits.masked_fill(diag_mask[None, None, :, :], float('-inf'))
59
+
60
+ # Seq start mask
61
+ if seq_start is not None:
62
+ seq_mask = torch.arange(T_k, device=q.device)[None, None, None, :] < seq_start[None, :, None, None]
63
+ logits = logits.masked_fill(seq_mask, float('-inf'))
64
+
65
+ # Stick-breaking weighting
66
+ attn_weights = stickbreaking_weighting(logits, normalize=normalize)
67
+
68
+ # Apply attention to values
69
+ out = torch.matmul(attn_weights.to(v.dtype), v)
70
+
71
+ if not head_first:
72
+ out = rearrange(out, "b h t d -> b t h d")
73
+
74
+ return out
75
+
76
+
77
+ def stickbreaking_weighting(
78
+ logits: torch.Tensor,
79
+ normalize: bool = True,
80
+ ) -> torch.Tensor:
81
+ """
82
+ Compute stick-breaking attention weights
83
+
84
+ From paper Equation 4:
85
+ A_{i,j} = exp(z_{i,j} - ∑_{k=i}^{j-1} log(1 + exp(z_{k,j})))
86
+
87
+ Where log(1 + exp(x)) is softplus(x)
88
+ """
89
+ B, H, T_q, T_k = logits.shape
90
+ device = logits.device
91
+
92
+ # Softplus: log(1 + exp(x))
93
+ # Numerically stable version from paper (Equation 5)
94
+ def softplus_stable(x):
95
+ # softplus(x) = log(1 + exp(x))
96
+ # When x > 15, exp(x) is huge, just return x
97
+ return torch.where(
98
+ x > 15.0,
99
+ x,
100
+ torch.log1p(torch.exp(torch.clamp(x, max=15.0)))
101
+ )
102
+
103
+ # Compute softplus for all logits
104
+ logits_sp = softplus_stable(logits) # [B, H, T_q, T_k]
105
+
106
+ # For each query position, compute cumulative sum
107
+ # We need to accumulate from left to right (position i to j-1)
108
+ log_weights = torch.zeros_like(logits)
109
+
110
+ for i in range(T_q):
111
+ # For query i, we compute attention to all keys j
112
+ z_i = logits[:, :, i, :] # [B, H, T_k]
113
+ z_sp_i = logits_sp[:, :, i, :] # [B, H, T_k]
114
+
115
+ # Cumulative sum of softplus
116
+ # csum[j] = ∑_{k=0}^{j} softplus(z_{i,k})
117
+ csum = z_sp_i.cumsum(dim=-1)
ops/.ipynb_checkpoints/vanilla_attention_std-checkpoint.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Vanilla Transformer 的标准 Softmax Attention
3
+ 用于替换 flash_attn 的实现
4
+ """
5
+ import math
6
+ import torch
7
+ import torch.nn.functional as F
8
+ from einops import rearrange
9
+ from typing import Optional, Tuple
10
+
11
+ def vanilla_attention_std(
12
+ q: torch.Tensor,
13
+ k: torch.Tensor,
14
+ v: torch.Tensor,
15
+ causal: bool = True,
16
+ window_size: Optional[Tuple[int, int]] = None,
17
+ sm_scale: Optional[float] = None,
18
+ ) -> torch.Tensor:
19
+ """
20
+ 标准 Softmax Attention,兼容 flash_attn_func 的输入格式
21
+
22
+ Args:
23
+ q, k, v: [batch, seq_len, num_heads, head_dim] 格式
24
+ causal: 是否使用因果mask
25
+ window_size: 滑动窗口大小 (left, right),(-1, -1) 表示无限制
26
+ sm_scale: softmax 缩放因子
27
+
28
+ Returns:
29
+ output: [batch, seq_len, num_heads, head_dim] 格式
30
+ """
31
+ B, T_q, H, D = q.shape
32
+ T_k = k.shape[1]
33
+
34
+ if sm_scale is None:
35
+ sm_scale = 1.0 / math.sqrt(D)
36
+
37
+ # 转换为 [B, H, T, D] 格式进行计算
38
+ q = rearrange(q, 'b t h d -> b h t d')
39
+ k = rearrange(k, 'b t h d -> b h t d')
40
+ v = rearrange(v, 'b t h d -> b h t d')
41
+
42
+ # 计算 attention scores
43
+ scores = torch.matmul(q.float(), k.float().transpose(-2, -1)) * sm_scale
44
+
45
+ # Causal mask
46
+ if causal:
47
+ P_SEQ = T_k - T_q # 处理 KV cache 的情况
48
+ causal_mask = torch.triu(
49
+ torch.ones((T_q, T_k), dtype=torch.bool, device=q.device),
50
+ diagonal=P_SEQ + 1
51
+ )
52
+ scores = scores.masked_fill(causal_mask[None, None, :, :], float('-inf'))
53
+
54
+ # Window mask (sliding window attention)
55
+ if window_size is not None and window_size != (-1, -1):
56
+ left_window, right_window = window_size
57
+ window_mask = torch.ones((T_q, T_k), dtype=torch.bool, device=q.device)
58
+ for i in range(T_q):
59
+ # 计算每个查询位置的有效窗口范围
60
+ start = max(0, i - left_window)
61
+ end = min(T_k, i + right_window + 1)
62
+ window_mask[i, start:end] = False
63
+ scores = scores.masked_fill(window_mask[None, None, :, :], float('-inf'))
64
+
65
+ # Softmax
66
+ attn_weights = F.softmax(scores, dim=-1)
67
+ attn_weights = torch.nan_to_num(attn_weights, 0.0)
68
+
69
+ # Apply attention to values
70
+ output = torch.matmul(attn_weights.to(v.dtype), v)
71
+
72
+ # 转换回 [B, T, H, D] 格式
73
+ output = rearrange(output, 'b h t d -> b t h d')
74
+
75
+ return output
76
+
77
+
78
+ def vanilla_attention_varlen_std(
79
+ q: torch.Tensor,
80
+ k: torch.Tensor,
81
+ v: torch.Tensor,
82
+ cu_seqlens_q: torch.Tensor,
83
+ cu_seqlens_k: torch.Tensor,
84
+ max_seqlen_q: int,
85
+ max_seqlen_k: int,
86
+ causal: bool = True,
87
+ window_size: Optional[Tuple[int, int]] = None,
88
+ sm_scale: Optional[float] = None,
89
+ ) -> torch.Tensor:
90
+ """
91
+ 变长序列的标准 Softmax Attention,兼容 flash_attn_varlen_func
92
+
93
+ Args:
94
+ q: [total_q_tokens, num_heads, head_dim]
95
+ k: [total_k_tokens, num_kv_heads, head_dim]
96
+ v: [total_k_tokens, num_kv_heads, head_dim]
97
+ cu_seqlens_q: 累积序列长度 [batch_size + 1]
98
+ cu_seqlens_k: 累积序列长度 [batch_size + 1]
99
+ max_seqlen_q: 最大查询序列长度
100
+ max_seqlen_k: 最大键值序列长度
101
+
102
+ Returns:
103
+ output: [total_q_tokens, num_heads, head_dim]
104
+ """
105
+ batch_size = cu_seqlens_q.shape[0] - 1
106
+ H = q.shape[1]
107
+ D = q.shape[2]
108
+
109
+ if sm_scale is None:
110
+ sm_scale = 1.0 / math.sqrt(D)
111
+
112
+ outputs = []
113
+
114
+ # 逐批次处理
115
+ for b in range(batch_size):
116
+ q_start, q_end = cu_seqlens_q[b].item(), cu_seqlens_q[b+1].item()
117
+ k_start, k_end = cu_seqlens_k[b].item(), cu_seqlens_k[b+1].item()
118
+
119
+ if q_start == q_end: # 空序列
120
+ continue
121
+
122
+ # 提取当前批次的 q, k, v
123
+ q_b = q[q_start:q_end] # [T_q, H, D]
124
+ k_b = k[k_start:k_end] # [T_k, H, D]
125
+ v_b = v[k_start:k_end] # [T_k, H, D]
126
+
127
+ T_q = q_b.shape[0]
128
+ T_k = k_b.shape[0]
129
+
130
+ # 转换为 [H, T, D] 格式
131
+ q_b = rearrange(q_b, 't h d -> h t d')
132
+ k_b = rearrange(k_b, 't h d -> h t d')
133
+ v_b = rearrange(v_b, 't h d -> h t d')
134
+
135
+ # 计算 attention scores
136
+ scores = torch.matmul(q_b.float(), k_b.float().transpose(-2, -1)) * sm_scale
137
+
138
+ # Causal mask
139
+ if causal:
140
+ P_SEQ = T_k - T_q
141
+ causal_mask = torch.triu(
142
+ torch.ones((T_q, T_k), dtype=torch.bool, device=q.device),
143
+ diagonal=P_SEQ + 1
144
+ )
145
+ scores = scores.masked_fill(causal_mask[None, :, :], float('-inf'))
146
+
147
+ # Window mask
148
+ if window_size is not None and window_size != (-1, -1):
149
+ left_window, right_window = window_size
150
+ window_mask = torch.ones((T_q, T_k), dtype=torch.bool, device=q.device)
151
+ for i in range(T_q):
152
+ start = max(0, i - left_window)
153
+ end = min(T_k, i + right_window + 1)
154
+ window_mask[i, start:end] = False
155
+ scores = scores.masked_fill(window_mask[None, :, :], float('-inf'))
156
+
157
+ # Softmax
158
+ attn_weights = F.softmax(scores, dim=-1)
159
+ attn_weights = torch.nan_to_num(attn_weights, 0.0)
160
+
161
+ # Apply attention
162
+ output_b = torch.matmul(attn_weights.to(v_b.dtype), v_b)
163
+
164
+ # 转换回 [T, H, D] 格式
165
+ output_b = rearrange(output_b, 'h t d -> t h d')
166
+ outputs.append(output_b)
167
+
168
+ # 拼接所有批次的输出
169
+ output = torch.cat(outputs, dim=0)
170
+
171
+ return output
ops/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+
2
+ # Framework mock for ndr compatibility
3
+ from . import framework_mock
ops/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (208 Bytes). View file
 
ops/__pycache__/direction_sensitive_geometric.cpython-310.pyc ADDED
Binary file (5.28 kB). View file
 
ops/__pycache__/forgetting_attention.cpython-310.pyc ADDED
Binary file (25.1 kB). View file
 
ops/__pycache__/forgetting_attention_std.cpython-310.pyc ADDED
Binary file (1.84 kB). View file
 
ops/__pycache__/framework_mock.cpython-310.pyc ADDED
Binary file (1.01 kB). View file
 
ops/__pycache__/geometric_attention_final.cpython-310.pyc ADDED
Binary file (2.16 kB). View file
 
ops/__pycache__/geometric_attention_std.cpython-310.pyc ADDED
Binary file (3.89 kB). View file
 
ops/__pycache__/layer_with_visualization.cpython-310.pyc ADDED
Binary file (2.17 kB). View file
 
ops/__pycache__/multi_head_attention.cpython-310.pyc ADDED
Binary file (6.92 kB). View file
 
ops/__pycache__/multi_head_relative_pos_attention.cpython-310.pyc ADDED
Binary file (8.08 kB). View file
 
ops/__pycache__/sliding_window_attention_std.cpython-310.pyc ADDED
Binary file (2.07 kB). View file
 
ops/__pycache__/stickbreaking_attention_std.cpython-310.pyc ADDED
Binary file (1.14 kB). View file
 
ops/__pycache__/vanilla_attention_std.cpython-310.pyc ADDED
Binary file (3.95 kB). View file
 
ops/direction_sensitive_geometric.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from forgetting_transformer.ops.multi_head_attention import AttentionMask, MultiHeadAttentionBase, AttentionMergeMixin
3
+ from typing import Optional
4
+ from forgetting_transformer.ops.geometric_attention import geometric_attention_activation
5
+ import math
6
+ from forgetting_transformer.ops.multi_head_relative_pos_attention import FixedRelativeMultiheadAttentionBase, shift
7
+
8
+
9
+ class DirectionSensitiveGeometricAttention(AttentionMergeMixin, FixedRelativeMultiheadAttentionBase):
10
+ def __init__(self, state_size: int, n_heads: int, dropout: float = 0.0, global_pos_bias: bool = True,
11
+ global_content_bias: bool = True, input_size: Optional[int] = None,
12
+ output_size: Optional[int] = None, normalize_score: bool = True):
13
+ super(AttentionMergeMixin, self).__init__(state_size, n_heads, dropout, input_size)
14
+
15
+ self.data_to_kv = torch.nn.Linear(state_size, 2 * n_heads * self.projection_size, bias=False)
16
+ self.data_to_q = torch.nn.Linear(self.input_size, n_heads * self.projection_size, bias=False)
17
+ self.data_to_qp = torch.nn.Linear(self.input_size, n_heads * 2)
18
+
19
+ self.global_content_bias = torch.nn.Parameter(torch.zeros([n_heads, self.projection_size])) \
20
+ if global_content_bias else None
21
+
22
+ self.s_bias = torch.nn.Parameter(torch.full([1], 0.0))
23
+ self.scale = torch.nn.Parameter(torch.full([1], 1.0 / math.sqrt(self.projection_size)))
24
+ self.scale_pos = torch.nn.Parameter(torch.full([1], 1.0))
25
+ self.normalize_score = normalize_score
26
+
27
+ self.input_size = state_size if input_size is None else input_size
28
+
29
+ print(f"DirectionSensitiveGeometricAttention: normalize score: {normalize_score}")
30
+
31
+ super(DirectionSensitiveGeometricAttention, self).__init__(output_size)
32
+ self.reset_parameters()
33
+
34
+ def get_attention_scores(self, mask: Optional[torch.Tensor],
35
+ q_content: torch.Tensor, k_content: torch.Tensor,
36
+ q_pos: torch.Tensor,
37
+ pos_offset: int) -> torch.Tensor:
38
+
39
+ # content-content addressing
40
+ logits = torch.bmm(q_content, self.dropout(k_content).transpose(1, 2))
41
+
42
+ # directionality. Do scaling here, less flops.
43
+ prefer_back, prefer_front = (q_pos * self.scale_pos).unsqueeze(-2).expand(-1,-1,logits.shape[-1],-1).unbind(-1)
44
+ fpos = prefer_front.triu(1 + pos_offset) + prefer_back.tril(-1 + pos_offset)
45
+
46
+ logits = logits * self.scale + fpos + self.s_bias
47
+
48
+ logits = self.apply_logit_masks(logits.view(logits.shape[0] // self.n_heads, self.n_heads, *logits.shape[1:]), mask).flatten(0,1)
49
+
50
+ logits.masked_fill_(torch.eye(logits.shape[-1], device=logits.device, dtype=torch.bool)[pos_offset : pos_offset + logits.shape[-2]], float("-inf"))
51
+
52
+ return geometric_attention_activation(logits, mask, pos_offset, normalize=self.normalize_score)
53
+
54
+ def add_head_specific_bias(self, data: torch.Tensor, bias: Optional[torch.Tensor]) -> torch.Tensor:
55
+ # data [batch * n_heads, len, c]
56
+ # bias [n_heads, c]
57
+ return (data.view(-1, bias.shape[0], *data.shape[1:]) + bias.unsqueeze(1).type_as(data)).view_as(data) \
58
+ if bias is not None else data
59
+
60
+ def _attention(self, mask: Optional[torch.Tensor],
61
+ q_content: torch.Tensor, k_content: torch.Tensor,
62
+ q_pos: torch.Tensor,
63
+ v: torch.Tensor, pos_offset: int) -> [torch.Tensor, torch.Tensor]:
64
+
65
+ scores = self.get_attention_scores(mask, q_content, k_content, q_pos, pos_offset)
66
+
67
+ # Scores shape: [n_batch * n_heads, n_out, n_in]
68
+ return self._attention_read(mask, scores, v)
69
+
70
+ def forward(self, curr_state: torch.Tensor, attend_to: torch.Tensor, mask: Optional[AttentionMask],
71
+ pos_offset: int = 0, need_weights: bool = False):
72
+ # curr_state: [batch_size, out_len, c]
73
+ # attend_to: [batch_size, in_len, c]
74
+ batch_size, in_len = attend_to.shape[0:2]
75
+ out_len = curr_state.shape[1]
76
+
77
+ k_content, v = self.transform_data(attend_to, self.data_to_kv, 2)
78
+ q, = self.transform_data(curr_state, self.data_to_q, 1)
79
+ q_pos, = self.transform_data(curr_state, self.data_to_qp, 1)
80
+
81
+ q_content = self.add_head_specific_bias(q, self.global_content_bias)
82
+
83
+ data, scores = self.merged_attention(batch_size, out_len, mask, q_content, k_content, q_pos, v,
84
+ pos_offset, need_weights=need_weights)
85
+
86
+ if need_weights:
87
+ return data, scores
88
+ else:
89
+ return data
90
+
91
+ def reset_parameters(self):
92
+ torch.nn.init.xavier_uniform_(self.data_to_q.weight)
93
+ torch.nn.init.xavier_uniform_(self.pos_to_pq.weight)
94
+ torch.nn.init.xavier_uniform_(self.data_to_kv.weight[:self.projection_size * self.n_heads])
95
+ torch.nn.init.xavier_uniform_(self.data_to_kv.weight[self.projection_size * self.n_heads:])
96
+
97
+ if self.global_content_bias is not None:
98
+ self.global_content_bias.data.fill_(0)
99
+
100
+
101
+ class DirectionSensitiveGeometricAttentionMyInit(DirectionSensitiveGeometricAttention):
102
+ def xavier_manual_(self, tensor: torch.Tensor, fan_in: int, fan_out: int, gain: float = 1) -> torch.Tensor:
103
+ std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
104
+ a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
105
+
106
+ return torch.nn.init._no_grad_uniform_(tensor, -a, a)
107
+
108
+ def reset_parameters(self):
109
+ self.xavier_manual_(self.data_to_q.weight, self.state_size, self.projection_size)
110
+ self.xavier_manual_(self.pos_to_pq.weight, self.state_size, 2)
111
+ self.xavier_manual_(self.data_to_kv.weight, self.state_size, self.projection_size)
112
+ self.xavier_manual_(self.multi_head_merge.weight, self.projection_size, self.state_size)
113
+
114
+ if self.global_content_bias is not None:
115
+ self.global_content_bias.data.fill_(0)
ops/direction_sensitive_geometric.py.bak ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from .multi_head_attention import AttentionMask, MultiHeadAttentionBase, AttentionMergeMixin
3
+ from typing import Optional
4
+ from .geometric_attention import geometric_attention_activation
5
+ import math
6
+ from .multi_head_relative_pos_attention import FixedRelativeMultiheadAttentionBase, shift
7
+
8
+
9
+ class DirectionSensitiveGeometricAttention(AttentionMergeMixin, FixedRelativeMultiheadAttentionBase):
10
+ def __init__(self, state_size: int, n_heads: int, dropout: float = 0.0, global_pos_bias: bool = True,
11
+ global_content_bias: bool = True, input_size: Optional[int] = None,
12
+ output_size: Optional[int] = None, normalize_score: bool = True):
13
+ super(AttentionMergeMixin, self).__init__(state_size, n_heads, dropout, input_size)
14
+
15
+ self.data_to_kv = torch.nn.Linear(state_size, 2 * n_heads * self.projection_size, bias=False)
16
+ self.data_to_q = torch.nn.Linear(self.input_size, n_heads * self.projection_size, bias=False)
17
+ self.data_to_qp = torch.nn.Linear(self.input_size, n_heads * 2)
18
+
19
+ self.global_content_bias = torch.nn.Parameter(torch.zeros([n_heads, self.projection_size])) \
20
+ if global_content_bias else None
21
+
22
+ self.s_bias = torch.nn.Parameter(torch.full([1], 0.0))
23
+ self.scale = torch.nn.Parameter(torch.full([1], 1.0 / math.sqrt(self.projection_size)))
24
+ self.scale_pos = torch.nn.Parameter(torch.full([1], 1.0))
25
+ self.normalize_score = normalize_score
26
+
27
+ self.input_size = state_size if input_size is None else input_size
28
+
29
+ print(f"DirectionSensitiveGeometricAttention: normalize score: {normalize_score}")
30
+
31
+ super(DirectionSensitiveGeometricAttention, self).__init__(output_size)
32
+ self.reset_parameters()
33
+
34
+ def get_attention_scores(self, mask: Optional[torch.Tensor],
35
+ q_content: torch.Tensor, k_content: torch.Tensor,
36
+ q_pos: torch.Tensor,
37
+ pos_offset: int) -> torch.Tensor:
38
+
39
+ # content-content addressing
40
+ logits = torch.bmm(q_content, self.dropout(k_content).transpose(1, 2))
41
+
42
+ # directionality. Do scaling here, less flops.
43
+ prefer_back, prefer_front = (q_pos * self.scale_pos).unsqueeze(-2).expand(-1,-1,logits.shape[-1],-1).unbind(-1)
44
+ fpos = prefer_front.triu(1 + pos_offset) + prefer_back.tril(-1 + pos_offset)
45
+
46
+ logits = logits * self.scale + fpos + self.s_bias
47
+
48
+ logits = self.apply_logit_masks(logits.view(logits.shape[0] // self.n_heads, self.n_heads, *logits.shape[1:]), mask).flatten(0,1)
49
+
50
+ logits.masked_fill_(torch.eye(logits.shape[-1], device=logits.device, dtype=torch.bool)[pos_offset : pos_offset + logits.shape[-2]], float("-inf"))
51
+
52
+ return geometric_attention_activation(logits, mask, pos_offset, normalize=self.normalize_score)
53
+
54
+ def add_head_specific_bias(self, data: torch.Tensor, bias: Optional[torch.Tensor]) -> torch.Tensor:
55
+ # data [batch * n_heads, len, c]
56
+ # bias [n_heads, c]
57
+ return (data.view(-1, bias.shape[0], *data.shape[1:]) + bias.unsqueeze(1).type_as(data)).view_as(data) \
58
+ if bias is not None else data
59
+
60
+ def _attention(self, mask: Optional[torch.Tensor],
61
+ q_content: torch.Tensor, k_content: torch.Tensor,
62
+ q_pos: torch.Tensor,
63
+ v: torch.Tensor, pos_offset: int) -> [torch.Tensor, torch.Tensor]:
64
+
65
+ scores = self.get_attention_scores(mask, q_content, k_content, q_pos, pos_offset)
66
+
67
+ # Scores shape: [n_batch * n_heads, n_out, n_in]
68
+ return self._attention_read(mask, scores, v)
69
+
70
+ def forward(self, curr_state: torch.Tensor, attend_to: torch.Tensor, mask: Optional[AttentionMask],
71
+ pos_offset: int = 0, need_weights: bool = False):
72
+ # curr_state: [batch_size, out_len, c]
73
+ # attend_to: [batch_size, in_len, c]
74
+ batch_size, in_len = attend_to.shape[0:2]
75
+ out_len = curr_state.shape[1]
76
+
77
+ k_content, v = self.transform_data(attend_to, self.data_to_kv, 2)
78
+ q, = self.transform_data(curr_state, self.data_to_q, 1)
79
+ q_pos, = self.transform_data(curr_state, self.data_to_qp, 1)
80
+
81
+ q_content = self.add_head_specific_bias(q, self.global_content_bias)
82
+
83
+ data, scores = self.merged_attention(batch_size, out_len, mask, q_content, k_content, q_pos, v,
84
+ pos_offset, need_weights=need_weights)
85
+
86
+ if need_weights:
87
+ return data, scores
88
+ else:
89
+ return data
90
+
91
+ def reset_parameters(self):
92
+ torch.nn.init.xavier_uniform_(self.data_to_q.weight)
93
+ torch.nn.init.xavier_uniform_(self.pos_to_pq.weight)
94
+ torch.nn.init.xavier_uniform_(self.data_to_kv.weight[:self.projection_size * self.n_heads])
95
+ torch.nn.init.xavier_uniform_(self.data_to_kv.weight[self.projection_size * self.n_heads:])
96
+
97
+ if self.global_content_bias is not None:
98
+ self.global_content_bias.data.fill_(0)
99
+
100
+
101
+ class DirectionSensitiveGeometricAttentionMyInit(DirectionSensitiveGeometricAttention):
102
+ def xavier_manual_(self, tensor: torch.Tensor, fan_in: int, fan_out: int, gain: float = 1) -> torch.Tensor:
103
+ std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
104
+ a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
105
+
106
+ return torch.nn.init._no_grad_uniform_(tensor, -a, a)
107
+
108
+ def reset_parameters(self):
109
+ self.xavier_manual_(self.data_to_q.weight, self.state_size, self.projection_size)
110
+ self.xavier_manual_(self.pos_to_pq.weight, self.state_size, 2)
111
+ self.xavier_manual_(self.data_to_kv.weight, self.state_size, self.projection_size)
112
+ self.xavier_manual_(self.multi_head_merge.weight, self.projection_size, self.state_size)
113
+
114
+ if self.global_content_bias is not None:
115
+ self.global_content_bias.data.fill_(0)
ops/forgetting_attention.py ADDED
@@ -0,0 +1,1138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Implementation of Forgetting Attention.
3
+
4
+ Our code is adapted from https://github.com/FlagOpen/FlagAttention/blob/ee91638dec6da8c00c4113d179f469e0ffcd5852/src/flag_attn/flash.py. The code is modified to implement Forgetting Attention.
5
+
6
+ The original license info from FlagAttention:
7
+
8
+ Copyright 2023 BAAI
9
+
10
+ Licensed under the Apache License, Version 2.0 (the "License");
11
+ you may not use this file except in compliance with the License.
12
+ You may obtain a copy of the License at
13
+
14
+ http://www.apache.org/licenses/LICENSE-2.0
15
+
16
+ Unless required by applicable law or agreed to in writing, software
17
+ distributed under the License is distributed on an "AS IS" BASIS,
18
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ See the License for the specific language governing permissions and
20
+ limitations under the License.
21
+ """
22
+ import pytest
23
+ import math
24
+ import torch
25
+ import triton
26
+ import triton.language as tl
27
+ from einops import rearrange
28
+ from typing import Optional
29
+
30
+
31
+ __all__ = ["forgetting_attention"]
32
+
33
+
34
+ # File flash.py
35
+ def maybe_contiguous(x):
36
+ # only when the inner most dimension is contiguous can LDGSTS be used
37
+ # so inner-dimension contiguity is enforced.
38
+ return x.contiguous() if x.stride(-1) != 1 else x
39
+
40
+ def rounded_multiple(a, b):
41
+ return (a + b - 1) // b * b
42
+
43
+ # --------------------------- public API ---------------------------
44
+ class ForgettingAttention(torch.autograd.Function):
45
+ @staticmethod
46
+ def forward(ctx, q, k, v, log_fgate, seq_start, causal, sm_scale, return_log_normalizer):
47
+ assert causal, "Only causal attention is supported"
48
+ Dq, Dk, Dv = q.shape[-1], k.shape[-1], v.shape[-1]
49
+ assert Dq == Dk == Dv, "feature size of q, k, v should be equal"
50
+ assert Dk in {16, 32, 64, 128}, "We only support head dims in {16, 32, 64, 128}"
51
+
52
+ B, H, M, D = q.shape
53
+ if seq_start is not None:
54
+ has_seq_start = True
55
+ assert seq_start.shape == (B,)
56
+ else:
57
+ has_seq_start = False
58
+ seq_start = torch.zeros((B,), device=q.device, dtype=torch.long)
59
+ N = k.shape[2]
60
+ assert log_fgate.shape == (B, H, N)
61
+ log_fgate = log_fgate.float()
62
+ if has_seq_start:
63
+ log_fgate = log_fgate.clone()
64
+ # We absolutely don't want masked value to affect result. If we
65
+ # don't do this then it could via affecting numerical precision of
66
+ # cumsum
67
+ mask_index = (torch.arange(N, device=q.device)[None, None, :] < seq_start[:, None, None])
68
+ mask_index = torch.broadcast_to(mask_index, log_fgate.size())
69
+ log_fgate[mask_index] = 0.0
70
+
71
+ log_lambda = torch.cumsum(log_fgate, dim=-1, dtype=log_fgate.dtype).float()
72
+
73
+ Hk, Hv = k.shape[1], v.shape[1]
74
+ assert Hk == Hv, "num of heads in k and v should be equal"
75
+ assert H == Hk, "groupped query attention has not been tested. You can uncomment this if you know what you are doing."
76
+ assert H % Hk == 0, "number of heads in q must be a multiple of that in k & v"
77
+ num_groups = H // Hk
78
+
79
+ P_SEQ = N - M
80
+ larger_m = M > N
81
+ assert (not larger_m), "The key/value tensors must be longer than the query tensor"
82
+
83
+ if sm_scale is None:
84
+ sm_scale = 1. / math.sqrt(D)
85
+
86
+ # contiguity
87
+ q, k, v = maybe_contiguous(q), maybe_contiguous(k), maybe_contiguous(v)
88
+
89
+ # to work around https://github.com/openai/triton/issues/2441
90
+ device = torch.cuda.device_of(q)
91
+
92
+ with torch.cuda.device(device):
93
+
94
+ config = get_fwd_config(B, H, M, N, D, causal)
95
+ BLOCK_M, BLOCK_N, num_stages, num_warps = config
96
+
97
+ divisible_m = M % BLOCK_M == 0
98
+ divisible_n = N % BLOCK_N == 0
99
+ # consider using 3d grid to avoid div & rem
100
+ grid = (triton.cdiv(M, BLOCK_M), H, B)
101
+ o = torch.empty_like(q)
102
+ L = torch.empty((B, H, M), device=q.device, dtype=torch.float32)
103
+ _fwd_kernel[grid](
104
+ q, k, v, log_lambda, seq_start, sm_scale,
105
+ L, o,
106
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
107
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
108
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3),
109
+ log_lambda.stride(0), log_lambda.stride(1), log_lambda.stride(2),
110
+ o.stride(0), o.stride(1), o.stride(2), o.stride(3),
111
+ B, H, M, N, P_SEQ, num_groups,
112
+ BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N, BLOCK_DMODEL=D,
113
+ IS_CAUSAL=causal, LARGER_M=larger_m, HAS_SEQ_START=has_seq_start,
114
+ DIVISIBLE_M=divisible_m, DIVISIBLE_N=divisible_n,
115
+ num_warps=num_warps, num_stages=num_stages,
116
+ )
117
+
118
+ # autograd context maintenance
119
+ ctx.save_for_backward(q, k, v, o, L, log_lambda, seq_start)
120
+ ctx.sm_scale = sm_scale
121
+ ctx.causal = causal
122
+ ctx.has_seq_start = has_seq_start
123
+
124
+ has_extra_return = return_log_normalizer
125
+ if has_extra_return:
126
+ outs = (
127
+ o,
128
+ L if return_log_normalizer else None,
129
+ )
130
+ return outs
131
+ return o
132
+
133
+ @staticmethod
134
+ def backward(ctx, do, *ignored):
135
+ q, k, v, o, L, log_lambda, seq_start = ctx.saved_tensors
136
+ sm_scale = ctx.sm_scale
137
+ causal = ctx.causal
138
+ has_seq_start = ctx.has_seq_start
139
+
140
+ B, H, M, D = q.shape
141
+ N = k.shape[2]
142
+ Hk = k.shape[1]
143
+ num_groups = H // Hk
144
+ P_SEQ = N - M
145
+ larger_m = M > N
146
+
147
+ if sm_scale is None:
148
+ sm_scale = 1. / math.sqrt(D)
149
+
150
+ # to work around https://github.com/openai/triton/issues/2441
151
+ device = torch.cuda.device_of(q)
152
+ with torch.cuda.device(device):
153
+ config = get_bwd_config(B, H, M, N, D, causal)
154
+ BLOCK_M, BLOCK_N, num_stages, num_warps = config
155
+
156
+ divisible_m = M % BLOCK_M == 0
157
+ divisible_n = N % BLOCK_N == 0
158
+
159
+ delta = torch.empty_like(L)
160
+ grid = (triton.cdiv(M, BLOCK_M), H, B)
161
+ _bwd_preprocess[grid](
162
+ o, do,
163
+ delta,
164
+ o.stride(0), o.stride(1), o.stride(2), o.stride(3),
165
+ do.stride(0), do.stride(1), do.stride(2), do.stride(3),
166
+ delta.stride(0), delta.stride(1), delta.stride(2),
167
+ M,
168
+ BLOCK_M=BLOCK_M, D_HEAD=D,
169
+ DIVISIBLE_M=divisible_m,
170
+ )
171
+
172
+ # NOTE that dk & dv always have the same number of heads as q, instead of q.
173
+ BLOCK_M, BLOCK_N, num_stages, num_warps = get_bwd_kv_config(B, H, M, N, D, causal)
174
+ divisible_m = M % BLOCK_M == 0
175
+ divisible_n = N % BLOCK_N == 0
176
+
177
+ dk = torch.empty((B, H, N, D), dtype=k.dtype, device=q.device)
178
+ dv = torch.empty((B, H, N, D), dtype=v.dtype, device=q.device)
179
+ dlog_lambda = torch.empty((B, H, N), dtype=log_lambda.dtype, device=q.device)
180
+ grid = (triton.cdiv(N, BLOCK_N), H, B)
181
+ _bwd_kv_kernel[grid](
182
+ q, k, v, log_lambda, seq_start, sm_scale, do,
183
+ dk, dv, dlog_lambda,
184
+ L, delta,
185
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
186
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
187
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3),
188
+ log_lambda.stride(0), log_lambda.stride(1), log_lambda.stride(2),
189
+ do.stride(0), do.stride(1), do.stride(2), do.stride(3),
190
+ dk.stride(0), dk.stride(1), dk.stride(2), dk.stride(3),
191
+ dv.stride(0), dv.stride(1), dv.stride(2), dv.stride(3),
192
+ dlog_lambda.stride(0), dlog_lambda.stride(1), dlog_lambda.stride(2),
193
+ B, H, M, N, P_SEQ,
194
+ num_groups,
195
+ BLOCK_M=BLOCK_M, BLOCK_DMODEL=D, BLOCK_N=BLOCK_N, CAUSAL=causal,
196
+ DIVISIBLE_M=divisible_m, DIVISIBLE_N=divisible_n, HAS_SEQ_START=has_seq_start,
197
+ num_stages=num_stages, num_warps=num_warps,
198
+ )
199
+
200
+ BLOCK_M, BLOCK_N, num_stages, num_warps = get_bwd_q_config(B, H, M, N, D, causal)
201
+ divisible_m = M % BLOCK_M == 0
202
+ divisible_n = N % BLOCK_N == 0
203
+ dq = torch.zeros_like(q)
204
+ grid = (triton.cdiv(M, BLOCK_M), H, B)
205
+ _bwd_q_kernel[grid](
206
+ q, k, v, log_lambda, seq_start, sm_scale, do,
207
+ dq, dlog_lambda,
208
+ L, delta,
209
+ q.stride(0), q.stride(1), q.stride(2), q.stride(3),
210
+ k.stride(0), k.stride(1), k.stride(2), k.stride(3),
211
+ v.stride(0), v.stride(1), v.stride(2), v.stride(3),
212
+ log_lambda.stride(0), log_lambda.stride(1), log_lambda.stride(2),
213
+ do.stride(0), do.stride(1), do.stride(2), do.stride(3),
214
+ dq.stride(0), dq.stride(1), dq.stride(2), dq.stride(3),
215
+ dlog_lambda.stride(0), dlog_lambda.stride(1), dlog_lambda.stride(2),
216
+ B, H, M, N, P_SEQ,
217
+ num_groups,
218
+ BLOCK_M=BLOCK_M, BLOCK_DMODEL=D, BLOCK_N=BLOCK_N,
219
+ CAUSAL=causal, LARGER_M=larger_m, HAS_SEQ_START=has_seq_start,
220
+ DIVISIBLE_M=divisible_m, DIVISIBLE_N=divisible_n,
221
+ num_stages=num_stages, num_warps = num_warps,
222
+ )
223
+ dk = dk.reshape((B, Hk, num_groups, N, D)).sum(2)
224
+ dv = dv.reshape((B, Hk, num_groups, N, D)).sum(2)
225
+ dcumsum = torch.cumsum(dlog_lambda, dim=-1, dtype=log_lambda.dtype)
226
+ dlog_fgate = dlog_lambda + dcumsum[..., -1:] - dcumsum
227
+ dlog_fgate = dlog_fgate.float()
228
+ return dq, dk, dv, dlog_fgate, None, None, None, None, None, None, None
229
+
230
+
231
+ def forgetting_attention(
232
+ q: torch.Tensor,
233
+ k: torch.Tensor,
234
+ v: torch.Tensor,
235
+ log_fgate: torch.Tensor,
236
+ *,
237
+ head_first: bool = False,
238
+ seq_start: Optional[torch.Tensor] = None,
239
+ sm_scale: Optional[float] = None,
240
+ ):
241
+ """
242
+ A FlashAttention-based implementation of Forgetting Attention.
243
+
244
+ Note:
245
+ - We recommand bfloat16/float16 for q, k, v and float32 for log_fgate. float32 for
246
+ q, k, v is also supported, but the kernel will not use tensor cores if q, k, v are
247
+ in float32 (which would be slow).
248
+ - We only support seqlen_q <= seqlen_k
249
+ - We only support causal attention
250
+ - Head dimension must be in one of {16, 32, 64, 128}
251
+
252
+ Arguments:
253
+ - q: (batch_size, seqlen_q, num_heads, head_dim) unless head_first=True.
254
+ - k: (batch_size, seqlen_k, num_heads, head_dim) unless head_first=True.
255
+ - v: (batch_size, seqlen_k, num_heads, head_dim) unless head_first=True.
256
+ - log_fgate: (batch_size, seqlen_k, num_heads) unless head_first=True.
257
+ This should be the **log** of the forget gates. This is typically the
258
+ output of torch.nn.functional.logsigmoid.
259
+ - head_first: if True, the order the num_heads and seqlen_* axis of the all
260
+ FloatTensor inputs and outputs should be (num_heads, seq_len_*) instead of
261
+ (seq_len_*, num_heads)
262
+ - seq_start: If not None, should be LongTensor with shape (batch_size,)
263
+ and range in [0, seq_len_k). For each batch index batch_id, no attention
264
+ will be allocated to tokens before the token index seq_start[batch_id].
265
+ This is useful for left-padded inputs.
266
+ - sm_scale: The scaling of attention scores before applying softmax. If
267
+ None, it defaults to (1.0 / math.sqrt(head_dim))
268
+
269
+ Returns:
270
+ out (torch.Tensor): (batch_size, seqlen_q, num_heads, head_dim) unless head_first=True.
271
+ """
272
+ if not head_first:
273
+ q, k, v = [rearrange(item, "b t h d -> b h t d") for item in (q, k, v)]
274
+ log_fgate = rearrange(log_fgate, "b t h -> b h t")
275
+ out = ForgettingAttention.apply(q, k, v, log_fgate, seq_start, True, sm_scale, False)
276
+ if not head_first:
277
+ out = rearrange(out, "b h t d -> b t h d")
278
+ return out
279
+
280
+
281
+ # --------------------------- Forward ---------------------------
282
+ # NOTE: this function can be overwritten at runtime to use your custom config
283
+ def get_fwd_config(B, H, M, N, D, causal):
284
+ assert causal
285
+ if torch.cuda.get_device_capability() == (8, 0):
286
+ if D <= 64:
287
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 32, 3, 4
288
+ else:
289
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 4, 4
290
+ elif torch.cuda.get_device_capability() == (9, 0):
291
+ # H100
292
+ if D <= 64:
293
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 8
294
+ else:
295
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 128, 2, 8
296
+ elif torch.cuda.get_device_capability() == (8, 6):
297
+ if not causal:
298
+ if D <= 64:
299
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 4
300
+ else:
301
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 2, 4
302
+ else: # causal
303
+ if D <= 64:
304
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 3, 4
305
+ else:
306
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 2, 4
307
+ elif torch.cuda.get_device_capability() == (8, 9):
308
+ # L40S
309
+ if D <= 64:
310
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 2, 4
311
+ else:
312
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 2, 4
313
+ else:
314
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
315
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
316
+
317
+
318
+ @triton.jit
319
+ def _fwd_kernel(
320
+ Q, K, V, LOG_LAMBDA, SEQ_START, sm_scale,
321
+ L, O,
322
+ stride_qz, stride_qh, stride_qm, stride_qk,
323
+ stride_kz, stride_kh, stride_kn, stride_kk,
324
+ stride_vz, stride_vh, stride_vn, stride_vk,
325
+ stride_log_lambda_z, stride_log_lambda_h, stride_log_lambda_n,
326
+ stride_oz, stride_oh, stride_om, stride_ok,
327
+ Z, H, M, N, P_SEQ,
328
+ num_groups,
329
+ BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr,
330
+ IS_CAUSAL: tl.constexpr, LARGER_M: tl.constexpr, HAS_SEQ_START: tl.constexpr,
331
+ DIVISIBLE_M: tl.constexpr, DIVISIBLE_N: tl.constexpr,
332
+ ):
333
+ input_dtype = Q.dtype.element_ty
334
+ # -- grid id --
335
+ start_m = tl.program_id(0)
336
+ off_h = tl.program_id(1)
337
+ off_z = tl.program_id(2)
338
+
339
+ # scale sm_scale by log_2(e) and use
340
+ # 2^x instead of exp in the loop because CSE and LICM
341
+ # don't work as expected with `exp` in the loop
342
+ log2e: tl.constexpr = 1.4426950408889634
343
+ loge2: tl.constexpr = 0.6931471805599453
344
+ qk_scale = sm_scale * log2e
345
+
346
+ # offset pointers for (batch, head)
347
+ off_hk = off_h // num_groups
348
+ Q += off_z * stride_qz + off_h * stride_qh
349
+ K += off_z * stride_kz + off_hk * stride_kh
350
+ V += off_z * stride_vz + off_hk * stride_vh
351
+ LOG_LAMBDA += off_z * stride_log_lambda_z + off_h * stride_log_lambda_h
352
+ O += off_z * stride_oz + off_h * stride_oh
353
+ L += (off_z * H + off_h) * M # l's shape is (B, H, M)
354
+
355
+ offs_m_base = tl.arange(0, BLOCK_M)
356
+ offs_m = start_m * BLOCK_M + offs_m_base
357
+ offs_n_base = tl.arange(0, BLOCK_N)
358
+ offs_k = tl.arange(0, BLOCK_DMODEL)
359
+
360
+
361
+ # initialize pointers to value-like data
362
+ q_ptrs = Q + (offs_m[:, None] * stride_qm + offs_k[None, :] * stride_qk) # (BLOCK_M, BLOCK_DMODEL)
363
+ log_lambda_out_ptrs = LOG_LAMBDA + (P_SEQ + offs_m) * stride_log_lambda_n
364
+ o_ptrs = O + (offs_m[:, None] * stride_om + offs_k[None, :] * stride_ok) # (BLOCK_M, BLOCK_DMODEL)
365
+ l_ptrs = L + offs_m
366
+
367
+ # initialize pointer to m and l, fp32 for accumulators
368
+ m_i = tl.full([BLOCK_M], value=-float("inf"), dtype=tl.float32)
369
+ l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
370
+ acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
371
+
372
+ # load q
373
+ if DIVISIBLE_M:
374
+ q = tl.load(q_ptrs, cache_modifier=".cg")
375
+ log_lambda_out = tl.load(log_lambda_out_ptrs, cache_modifier=".cg")
376
+ else:
377
+ mask_m = offs_m < M
378
+ q = tl.load(q_ptrs, mask=mask_m[:, None], cache_modifier=".cg")
379
+ log_lambda_out = tl.load(log_lambda_out_ptrs, mask=mask_m, cache_modifier=".cg")
380
+
381
+ #Dot I trick: to place q in registers, it saves shared memory
382
+ # if BLOCK_DMODEL < 128:
383
+ # I = tl.where(offs_k[:, None] == offs_k,
384
+ # tl.full((BLOCK_DMODEL, BLOCK_DMODEL), 1.0, dtype=input_dtype),
385
+ # tl.full((BLOCK_DMODEL, BLOCK_DMODEL), 0.0, dtype=input_dtype))
386
+ # q = tl.dot(q, I, input_precision="ieee").to(input_dtype)
387
+ # else:
388
+ # I = tl.where(offs_m_base[:, None] == offs_m_base,
389
+ # tl.full((BLOCK_M, BLOCK_M), 1.0, dtype=input_dtype),
390
+ # tl.full((BLOCK_M, BLOCK_M), 0.0, dtype=input_dtype))
391
+ # q = tl.dot(I, q, input_precision="ieee").to(input_dtype)
392
+
393
+ # NOTE: Loop-Bound-For-N
394
+ # The indices in m-dimension that this block may access is in `[start_m * BLOCK_M, (start_m + 1) * BLOCK_M)`.
395
+ # According to the rule of causal masking, then max index in n-dimension that this block may access
396
+ # is `P_SEQ + (start_m + 1) * BLOCK_M`.
397
+ # However, the upper bound of index in n-dimension should never exceed the sequence length of k/v(`P_SEQ + N_CTX`).
398
+ # `P_SEQ + (start_m + 1) * BLOCK_M` may be larger than `N`.
399
+ # At this case, there would be illegal memory access when loading k & v tiles
400
+ # if mask_n is not applied for loading(only when `DIVISIBLE_N`` is true).
401
+ # See also https://github.com/FlagOpen/FlagAttention/pull/8
402
+ if IS_CAUSAL:
403
+ hi = tl.minimum(N, P_SEQ + (start_m + 1) * BLOCK_M)
404
+ if LARGER_M:
405
+ hi = tl.maximum(0, hi)
406
+ else:
407
+ hi = N
408
+
409
+ offs_n_init = offs_n_base
410
+ if HAS_SEQ_START:
411
+ SEQ_START += off_z
412
+ seq_start = tl.load(SEQ_START)
413
+ lo = tl.minimum(seq_start, hi)
414
+ lo = (lo // BLOCK_N) * BLOCK_N
415
+ offs_n_init += lo
416
+ else:
417
+ lo = 0
418
+ seq_start = 0
419
+
420
+ # loop over k, v and update accumulators
421
+ k_ptrs = K + (offs_k[:, None] * stride_kk + offs_n_init[None, :] * stride_kn) # (BLOCK_DMODEL, BLOCK_N)
422
+ v_ptrs = V + (offs_n_init[:, None] * stride_vn + offs_k[None, :] * stride_vk) # (BLOCK_N, BLOCK_DMODEL)
423
+ log_lambda_in_ptrs = LOG_LAMBDA + (offs_n_init * stride_log_lambda_n) # (BLOCK_N, BLOCK_DMODEL)
424
+ for start_n in range(lo, hi, BLOCK_N):
425
+ start_n = tl.multiple_of(start_n, BLOCK_N)
426
+ offs_n = start_n + offs_n_base
427
+
428
+ # -- load k, v --
429
+ if DIVISIBLE_N:
430
+ k = tl.load(k_ptrs, cache_modifier=".cg")
431
+ v = tl.load(v_ptrs, cache_modifier=".cg")
432
+ log_lambda_in = tl.load(log_lambda_in_ptrs, cache_modifier=".cg")
433
+ else:
434
+ mask_n = offs_n < N
435
+ k = tl.load(k_ptrs, mask=mask_n[None, :], cache_modifier=".cg")
436
+ v = tl.load(v_ptrs, mask=mask_n[:, None], cache_modifier=".cg")
437
+ log_lambda_in = tl.load(log_lambda_in_ptrs, mask=mask_n, cache_modifier=".cg")
438
+
439
+ # -- compute qk ---
440
+ # s = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
441
+ s = tl.dot(q, k, input_precision="ieee") * qk_scale
442
+ decay_bias = log_lambda_out[:, None] - log_lambda_in[None, :]
443
+ s += decay_bias * log2e
444
+
445
+ if not DIVISIBLE_N:
446
+ s = tl.where(mask_n[None, :], s, float("-inf"))
447
+ if IS_CAUSAL:
448
+ causal_mask = (P_SEQ + offs_m[:, None]) >= offs_n[None, :]
449
+ s = tl.where(causal_mask, s, float("-inf"))
450
+ if HAS_SEQ_START:
451
+ s = tl.where(offs_n[None, :] >= seq_start, s, float("-inf"))
452
+
453
+
454
+ # -- compute scaling constant ---
455
+ m_i_new = tl.maximum(m_i, tl.max(s, 1))
456
+ alpha = tl.math.exp2((m_i - m_i_new))
457
+ p = tl.math.exp2(s - m_i_new[:, None])
458
+
459
+ # -- compute partial sumexpn before applying dropout
460
+ p_sum = tl.sum(p, 1)
461
+
462
+
463
+ # -- scale and update acc: acc *= alpha[:, None]--
464
+ acc *= alpha[:, None]
465
+ acc += tl.dot(p.to(input_dtype), v, input_precision="ieee")
466
+
467
+ # -- update m_i and l_i --
468
+ l_i = l_i * alpha + p_sum
469
+ m_i = m_i_new
470
+ # update pointers
471
+ k_ptrs += BLOCK_N * stride_kn
472
+ v_ptrs += BLOCK_N * stride_vn
473
+ log_lambda_in_ptrs += BLOCK_N * stride_log_lambda_n
474
+
475
+ # write back l & o
476
+ if IS_CAUSAL and (LARGER_M or HAS_SEQ_START):
477
+ is_empty_line = (offs_m + P_SEQ) < seq_start
478
+ acc = tl.where(is_empty_line[:, None], 0.0, acc * (1.0 / l_i[:, None]))
479
+ l = tl.where(is_empty_line, float("-inf"), m_i * loge2 + tl.log(l_i))
480
+ else:
481
+ acc = acc * (1.0 / l_i[:, None])
482
+ l = m_i * loge2 + tl.log(l_i) # log(normalizer)
483
+
484
+
485
+ if DIVISIBLE_M:
486
+ tl.store(l_ptrs, l, cache_modifier=".cg")
487
+ tl.store(o_ptrs, acc.to(input_dtype), cache_modifier=".cg")
488
+ else:
489
+ tl.store(l_ptrs, l, mask=mask_m, cache_modifier=".cg")
490
+ tl.store(o_ptrs, acc.to(input_dtype), mask=mask_m[:, None], cache_modifier=".cg")
491
+
492
+
493
+ # --------------------------- Backward ---------------------------
494
+ # NOTE: this function can be overwritten at runtime to use your custom config
495
+ def get_bwd_config(B, H, M, N, D, causal):
496
+ if torch.cuda.get_device_capability() == (9, 0):
497
+ if not causal:
498
+ BLOCK_M = 128 if D <= 64 else 64
499
+ BLOCK_N = 64
500
+ num_stages = 2
501
+ num_warps = 4
502
+ else:
503
+ BLOCK_M = 64
504
+ BLOCK_N = 64
505
+ num_stages = 3 if D <= 64 else 2
506
+ num_warps = 4
507
+ elif torch.cuda.get_device_capability() == (8, 0):
508
+ if not causal:
509
+ BLOCK_M = 128 if D <= 64 else 64
510
+ BLOCK_N = 64
511
+ num_stages = 2
512
+ num_warps = 4
513
+ else:
514
+ BLOCK_M = 64
515
+ BLOCK_N = 64
516
+ num_stages = 3 if D <= 64 else 2
517
+ num_warps = 4
518
+ elif torch.cuda.get_device_capability() == (8, 6): # tune for RTX-3090, device_capability(8, 6)
519
+ if not causal:
520
+ if D <= 64:
521
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
522
+ else:
523
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 8
524
+ else:
525
+ if D <= 64:
526
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
527
+ else:
528
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 2, 4
529
+ else:
530
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 1, 4
531
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
532
+
533
+ def get_bwd_kv_config(B, H, M, N, D, causal):
534
+ assert causal
535
+ if torch.cuda.get_device_capability() == (8, 0): # A100
536
+ if D <= 64:
537
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 4, 4
538
+ else:
539
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 128, 4, 8
540
+ elif torch.cuda.get_device_capability() == (8, 6): # tune for RTX-3090, device_capability(8, 6)
541
+ if D <= 64:
542
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
543
+ else:
544
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 2, 4
545
+ elif torch.cuda.get_device_capability() == (8, 9): # L40S
546
+ if D <= 64:
547
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 128, 4, 8
548
+ else:
549
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 128, 2, 8
550
+ elif torch.cuda.get_device_capability() == (9, 0): # H100
551
+ if D <= 64:
552
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 4
553
+ else:
554
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
555
+ else:
556
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
557
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
558
+
559
+ def get_bwd_q_config(B, H, M, N, D, causal):
560
+ assert causal
561
+ if torch.cuda.get_device_capability() == (8, 0): # A100
562
+ if D <= 64:
563
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 3, 4
564
+ else:
565
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 64, 4, 8
566
+ elif torch.cuda.get_device_capability() == (8, 6): # tune for RTX-3090, device_capability(8, 6)
567
+ if D <= 64:
568
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
569
+ else:
570
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 32, 32, 2, 4
571
+ elif torch.cuda.get_device_capability() == (8, 9): # L40S
572
+ if D <= 64:
573
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 4, 4
574
+ else:
575
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 32, 3, 4
576
+ elif torch.cuda.get_device_capability() == (9, 0): # H100
577
+ if D <= 64:
578
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 128, 4, 8
579
+ else:
580
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 128, 128, 2, 8
581
+ else:
582
+ BLOCK_M, BLOCK_N, num_stages, num_warps = 64, 64, 2, 4
583
+ return (BLOCK_M, BLOCK_N, num_stages, num_warps)
584
+
585
+
586
+ @triton.jit
587
+ def _bwd_preprocess(
588
+ Out, DO,
589
+ Delta,
590
+ stride_oz, stride_oh, stride_om, stride_ok,
591
+ stride_doz, stride_doh, stride_dom, stride_dok,
592
+ stride_dz, stride_dh, stride_dm,
593
+ M,
594
+ BLOCK_M: tl.constexpr, D_HEAD: tl.constexpr,
595
+ DIVISIBLE_M: tl.constexpr,
596
+ ):
597
+ off_h = tl.program_id(1)
598
+ off_z = tl.program_id(2)
599
+ Out += off_z * stride_oz + off_h * stride_oh
600
+ DO += off_z * stride_doz + off_h * stride_doh
601
+ Delta += off_z * stride_dz + off_h * stride_dh
602
+
603
+ # compute (Out * Dout).sum() for vector interpretation
604
+ off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)
605
+ off_n = tl.arange(0, D_HEAD)
606
+
607
+ # load
608
+ o_ptrs = Out + off_m[:, None] * stride_om + off_n[None, :] * stride_ok
609
+ do_ptrs = DO + off_m[:, None] * stride_dom + off_n[None, :] * stride_dok
610
+
611
+ if DIVISIBLE_M:
612
+ o = tl.load(o_ptrs).to(tl.float32)
613
+ do = tl.load(do_ptrs).to(tl.float32)
614
+ else:
615
+ mask_m = off_m < M
616
+ o = tl.load(o_ptrs, mask=mask_m[:, None]).to(tl.float32)
617
+ do = tl.load(do_ptrs, mask=mask_m[:, None]).to(tl.float32)
618
+
619
+ # compute
620
+ delta = tl.sum(o * do, axis=1)
621
+
622
+ # write-back
623
+ d_ptrs = Delta + off_m * stride_dm
624
+ if DIVISIBLE_M:
625
+ tl.store(d_ptrs, delta)
626
+ else:
627
+ tl.store(d_ptrs, delta, mask=mask_m)
628
+
629
+
630
+ @triton.jit
631
+ def _bwd_kv_kernel(
632
+ Q, K, V, LOG_LAMBDA, SEQ_START, sm_scale, DO,
633
+ DK, DV, DLOG_LAMBDA,
634
+ L,
635
+ D,
636
+ stride_qz, stride_qh, stride_qm, stride_qk,
637
+ stride_kz, stride_kh, stride_kn, stride_kk,
638
+ stride_vz, stride_vh, stride_vn, stride_vk,
639
+ stride_log_lambda_z, stride_log_lambda_h, stride_log_lambda_n,
640
+ stride_doz, stride_doh, stride_dom, stride_dok,
641
+ stride_dkz, stride_dkh, stride_dkn, stride_dkk,
642
+ stride_dvz, stride_dvh, stride_dvn, stride_dvk,
643
+ stride_dlog_lambda_z, stride_dlog_lambda_h, stride_dlog_lambda_n,
644
+ Z, H, M, N, P_SEQ,
645
+ num_groups,
646
+ BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr,
647
+ CAUSAL: tl.constexpr,
648
+ DIVISIBLE_M: tl.constexpr, DIVISIBLE_N: tl.constexpr, HAS_SEQ_START: tl.constexpr,
649
+ ):
650
+ input_dtype = Q.dtype.element_ty
651
+ # -- grid id --
652
+ start_n = tl.program_id(0)
653
+ off_h = tl.program_id(1)
654
+ off_z = tl.program_id(2)
655
+ log2e: tl.constexpr = 1.4426950408889634
656
+ qk_scale = sm_scale * log2e
657
+
658
+ # offset pointers for (batch, head)
659
+ off_hk = off_h // num_groups
660
+ Q += off_z * stride_qz + off_h * stride_qh
661
+ K += off_z * stride_kz + off_hk * stride_kh
662
+ V += off_z * stride_vz + off_hk * stride_vh
663
+ LOG_LAMBDA += off_z * stride_log_lambda_z + off_h * stride_log_lambda_h
664
+ DO += off_z * stride_doz + off_h * stride_doh
665
+
666
+ # offset pointers for batch/head
667
+ DK += off_z * stride_dkz + off_h * stride_dkh
668
+ DV += off_z * stride_dvz + off_h * stride_dvh
669
+ DLOG_LAMBDA += off_z * stride_dlog_lambda_z + off_h * stride_dlog_lambda_h
670
+
671
+ # offset pointers for batch/head
672
+ D += (off_z * H + off_h) * M
673
+ L += (off_z * H + off_h) * M
674
+
675
+ if CAUSAL:
676
+ lo = tl.maximum(start_n * BLOCK_N - P_SEQ, 0)
677
+ lo = (lo // BLOCK_M) * BLOCK_M
678
+ else:
679
+ lo = 0
680
+
681
+ offs_m_init = lo + tl.arange(0, BLOCK_M)
682
+ offs_n = start_n * BLOCK_N + tl.arange(0, BLOCK_N)
683
+ offs_m_base = tl.arange(0, BLOCK_M)
684
+ offs_k = tl.arange(0, BLOCK_DMODEL)
685
+
686
+ # initialize pointers to value-like data
687
+ q_ptrs = Q + (offs_m_init[:, None] * stride_qm + offs_k[None, :] * stride_qk) # (BLOCK_M, BLOCK_DMODEL)
688
+ log_lambda_out_ptrs = LOG_LAMBDA + (P_SEQ + offs_m_init) * stride_log_lambda_n # (BLOCK_N, BLOCK_DMODEL)
689
+ k_ptrs = K + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk) # (BLOCK_N, BLOCK_DMODEL)
690
+ v_ptrs = V + (offs_n[:, None] * stride_vn + offs_k[None, :] * stride_vk) # (BLOCK_N, BLOCK_DMODEL)
691
+ log_lambda_in_ptrs = LOG_LAMBDA + (offs_n * stride_log_lambda_n) # (BLOCK_N, BLOCK_DMODEL)
692
+ do_ptrs = DO + (offs_m_init[:, None] * stride_dom + offs_k[None, :] * stride_dok) # (BLOCK_M, BLOCK_DMODEL)
693
+
694
+ dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_k[None, :] * stride_dvk) # (BLOCK_N, BLOCK_DMODEL)
695
+ dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_k[None, :] * stride_dkk) # (BLOCK_N, BLOCK_DMODEL)
696
+ dlog_lambda_in_ptrs = DLOG_LAMBDA + (offs_n * stride_dlog_lambda_n) # (BLOCK_N, BLOCK_DMODEL)
697
+
698
+ # k and v stay in SRAM throughout
699
+ if DIVISIBLE_N:
700
+ v = tl.load(v_ptrs)
701
+ k = tl.load(k_ptrs)
702
+ log_lambda_in = tl.load(log_lambda_in_ptrs)
703
+ else:
704
+ mask_n = offs_n < N
705
+ v = tl.load(v_ptrs, mask=mask_n[:, None])
706
+ k = tl.load(k_ptrs, mask=mask_n[:, None])
707
+ log_lambda_in = tl.load(log_lambda_in_ptrs, mask=mask_n)
708
+
709
+ # If the N block doesn't contain seq_start, no need to loop
710
+ if HAS_SEQ_START:
711
+ SEQ_START += off_z
712
+ seq_start = tl.load(SEQ_START)
713
+ hi = tl.where(start_n * BLOCK_N + BLOCK_N >= seq_start - 1, M, lo)
714
+ else:
715
+ hi = M
716
+
717
+ # initialize dk amd dv
718
+ dk = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32)
719
+ dv = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32)
720
+ dlog_lambda_in = tl.zeros([BLOCK_N], dtype=tl.float32)
721
+
722
+ # loop over a col
723
+ for start_m in range(lo, hi, BLOCK_M):
724
+ start_m = tl.multiple_of(start_m, BLOCK_M)
725
+ offs_m = start_m + offs_m_base
726
+ causal_mask = (P_SEQ + offs_m[None, :]) >= (offs_n[:, None]) # (BLOCK_M, BLOCK_N)
727
+
728
+ # load q1, k1, q2, k2, v, do on-chip
729
+ if DIVISIBLE_M:
730
+ q = tl.load(q_ptrs)
731
+ log_lambda_out = tl.load(log_lambda_out_ptrs)
732
+ else:
733
+ mask_m = offs_m < M
734
+ valid_mask = mask_m[None, :] # & mask_n
735
+ q = tl.load(q_ptrs, mask=mask_m[:, None])
736
+ log_lambda_out = tl.load(log_lambda_out_ptrs, mask=mask_m)
737
+ # recompute p = softmax(qk * sm_scale, dim=-1)
738
+ # s = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
739
+ sT = tl.dot(k, tl.trans(q), input_precision="ieee") * qk_scale
740
+ decay_bias = log_lambda_out[None, :] - log_lambda_in[:, None]
741
+ sT += decay_bias * log2e
742
+ # NOTE: since softmax in backward is pointwise, the normalizer has been saved in fwd)
743
+ # So masking on s is not needed.
744
+ # s = tl.where(valid_mask, s , float("-inf"))
745
+ # if CAUSAL:
746
+ # s = tl.where(causal_mask, s, float("-inf"))
747
+
748
+ # -- recompute p ---
749
+ if DIVISIBLE_M:
750
+ l = tl.load(L + offs_m)
751
+ else:
752
+ l = tl.load(L + offs_m, mask=mask_m)
753
+ pT = tl.math.exp2(sT - l[None, :] * log2e) # (BLOCK_M, BLOCK_N)
754
+
755
+ if not DIVISIBLE_M:
756
+ pT = tl.where(valid_mask, pT, 0.0)
757
+ if CAUSAL:
758
+ pT = tl.where(causal_mask, pT, 0.0)
759
+
760
+ # compute dv = dot(p, do)
761
+ if DIVISIBLE_M:
762
+ do = tl.load(do_ptrs)
763
+ else:
764
+ do = tl.load(do_ptrs, mask=mask_m[:, None]) # (BLOCK_M, BLOCK_DMODEL)
765
+
766
+
767
+ dv += tl.dot(pT.to(input_dtype), do, input_precision="ieee") # (BLOCK_N, BLOCK_DMODEL) # still correct
768
+
769
+ # compute dp = dot(v, do)
770
+ if DIVISIBLE_M:
771
+ delta = tl.load(D + offs_m)
772
+ else:
773
+ delta = tl.load(D + offs_m, mask=mask_m)
774
+ # dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
775
+ dpT = tl.dot(v, tl.trans(do), input_precision="ieee")
776
+
777
+
778
+ # compute ds = p * (dp - delta[:, None])
779
+ dsT = pT * (dpT - delta[None, :]) # (BLOCK_M, BLOCK_N)
780
+
781
+ if not DIVISIBLE_M:
782
+ dsT = tl.where(valid_mask, dsT, 0.0)
783
+ if CAUSAL:
784
+ dsT = tl.where(causal_mask, dsT, 0.0)
785
+
786
+ # compute dk = dot(ds.T, q) masking
787
+ dk += tl.dot(dsT.to(input_dtype), q, input_precision="ieee")
788
+ dlog_lambda_in += -tl.sum(dsT, axis=1)
789
+
790
+ # increment pointers
791
+ q_ptrs += BLOCK_M * stride_qm
792
+ log_lambda_out_ptrs += BLOCK_M * stride_log_lambda_n
793
+ do_ptrs += BLOCK_M * stride_dom
794
+
795
+ dk *= sm_scale
796
+ if HAS_SEQ_START:
797
+ # Mask out
798
+ seq_mask = (offs_n >= seq_start)
799
+ dk = tl.where(seq_mask[:, None], dk, 0.0)
800
+ dv = tl.where(seq_mask[:, None], dv, 0.0)
801
+ dlog_lambda_in = tl.where(seq_mask, dlog_lambda_in, 0.0)
802
+ if DIVISIBLE_N:
803
+ tl.store(dk_ptrs, dk.to(input_dtype)) # (BLOCK_N, BLOCK_DMODEL)
804
+ tl.store(dv_ptrs, dv.to(input_dtype)) # (BLOCK_N, BLOCK_DMODEL,)
805
+ tl.store(dlog_lambda_in_ptrs, dlog_lambda_in.to(tl.float32)) # (BLOCK_N, BLOCK_DMODEL,)
806
+ else:
807
+ tl.store(dk_ptrs, dk.to(input_dtype), mask=mask_n[:, None]) # (BLOCK_N, BLOCK_DMODEL)
808
+ tl.store(dv_ptrs, dv.to(input_dtype), mask=mask_n[:, None]) # (BLOCK_N, BLOCK_DMODEL)
809
+ tl.store(dlog_lambda_in_ptrs, dlog_lambda_in.to(tl.float32), mask=mask_n) # (BLOCK_N, BLOCK_DMODEL,)
810
+
811
+
812
+ @triton.jit
813
+ def _bwd_q_kernel(
814
+ Q, K, V, LOG_LAMBDA, SEQ_START, sm_scale, DO,
815
+ DQ, DLOG_LAMBDA,
816
+ L,
817
+ D,
818
+ stride_qz, stride_qh, stride_qm, stride_qk,
819
+ stride_kz, stride_kh, stride_kn, stride_kk,
820
+ stride_vz, stride_vh, stride_vn, stride_vk,
821
+ stride_log_lambda_z, stride_log_lambda_h, stride_log_lambda_n,
822
+ stride_doz, stride_doh, stride_dom, stride_dok,
823
+ stride_dqz, stride_dqh, stride_dqm, stride_dqk,
824
+ stride_dlog_lambda_z, stride_dlog_lambda_h, stride_dlog_lambda_n,
825
+ Z, H, M, N, P_SEQ,
826
+ num_groups,
827
+ BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr,
828
+ CAUSAL: tl.constexpr, LARGER_M: tl.constexpr, HAS_SEQ_START: tl.constexpr,
829
+ DIVISIBLE_M: tl.constexpr, DIVISIBLE_N: tl.constexpr,
830
+ ):
831
+ input_dtype = Q.dtype.element_ty
832
+ # -- grid id --
833
+ start_m = tl.program_id(0)
834
+ off_h = tl.program_id(1)
835
+ off_z = tl.program_id(2)
836
+
837
+ # scale sm_scale by log_2(e) and use
838
+ # 2^x instead of exp in the loop because CSE and LICM
839
+ # don't work as expected with `exp` in the loop
840
+ log2e: tl.constexpr = 1.4426950408889634
841
+ qk_scale = sm_scale * log2e
842
+
843
+ # offset pointers for (batch, head)
844
+ off_hk = off_h // num_groups
845
+ Q += off_z * stride_qz + off_h * stride_qh
846
+ K += off_z * stride_kz + off_hk * stride_kh
847
+ V += off_z * stride_vz + off_hk * stride_vh
848
+ LOG_LAMBDA += off_z * stride_log_lambda_z + off_h * stride_log_lambda_h
849
+ DO += off_z * stride_doz + off_h * stride_doh
850
+ D += (off_z * H + off_h) * M
851
+ L += (off_z * H + off_h) * M
852
+
853
+ # offset pointers for batch/head
854
+ DQ += off_z * stride_dqz + off_h * stride_dqh
855
+ DLOG_LAMBDA += off_z * stride_dlog_lambda_z + off_h * stride_dlog_lambda_h
856
+
857
+ offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
858
+ offs_k = tl.arange(0, BLOCK_DMODEL)
859
+
860
+ # initialize pointers to value-like data
861
+ q_ptrs = Q + (offs_m[:, None] * stride_qm + offs_k[None, :] * stride_qk) # (BLOCK_M, BLOCK_DMODEL)
862
+ log_lambda_out_ptrs = LOG_LAMBDA + (P_SEQ + offs_m) * stride_log_lambda_n
863
+
864
+ dq_ptrs = DQ + (offs_m[:, None] * stride_dqm + offs_k[None, :] * stride_dqk) # (BLOCK_M, BLOCK_DMODEL)
865
+ dlog_lambda_out_ptrs = DLOG_LAMBDA + (P_SEQ + offs_m) * stride_dlog_lambda_n
866
+ do_ptrs = DO + (offs_m[:, None] * stride_dom + offs_k[None, :] * stride_dok) # (BLOCK_M, BLOCK_DMODEL)
867
+
868
+ # pointer to row-wise quantities in value-like data
869
+ d_ptrs = D + offs_m
870
+ l_ptrs = L + offs_m
871
+
872
+ # load q: it will stay in SRAM throughout
873
+ if DIVISIBLE_M:
874
+ q = tl.load(q_ptrs)
875
+ do = tl.load(do_ptrs)
876
+ delta = tl.load(d_ptrs)
877
+ l = tl.load(l_ptrs)
878
+ log_lambda_out = tl.load(log_lambda_out_ptrs)
879
+ else:
880
+ mask_m = offs_m < M
881
+ q = tl.load(q_ptrs, mask=mask_m[:, None])
882
+ do = tl.load(do_ptrs, mask=mask_m[:, None])
883
+ delta = tl.load(d_ptrs, mask=mask_m)
884
+ l = tl.load(l_ptrs, mask=mask_m)
885
+ log_lambda_out = tl.load(log_lambda_out_ptrs, mask=mask_m)
886
+
887
+ # initialize dq
888
+ dq = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
889
+ dlog_lambda_out = tl.zeros([BLOCK_M], dtype=tl.float32)
890
+
891
+ # loop over k, v and update accumulator
892
+ # see note "Loop-Bound-For-N"
893
+ if CAUSAL:
894
+ hi = tl.minimum(N, P_SEQ + (start_m + 1) * BLOCK_M)
895
+ if LARGER_M:
896
+ hi = tl.maximum(0, hi)
897
+ else:
898
+ hi = N
899
+
900
+ offs_n_base = tl.arange(0, BLOCK_N)
901
+ offs_n_init = offs_n_base
902
+ if HAS_SEQ_START:
903
+ SEQ_START += off_z
904
+ seq_start = tl.load(SEQ_START)
905
+ lo = tl.minimum(seq_start, hi)
906
+ lo = (lo // BLOCK_N) * BLOCK_N
907
+ offs_n_init += lo
908
+ else:
909
+ lo = 0
910
+ k_ptrs = K + (offs_n_init[:, None] * stride_kn + offs_k[None, :] * stride_kk) # (BLOCK_N, BLOCK_DMODEL)
911
+ v_ptrs = V + (offs_n_init[:, None] * stride_vn + offs_k[None, :] * stride_vk) # (BLOCK_N, BLOCK_DMODEL)
912
+ log_lambda_in_ptrs = LOG_LAMBDA + (offs_n_init * stride_log_lambda_n)
913
+
914
+ # loop over a row
915
+ for start_n in range(lo, hi, BLOCK_N):
916
+ offs_n = start_n + offs_n_base
917
+
918
+ # load k1, k2, v on chip
919
+ if DIVISIBLE_N:
920
+ v = tl.load(v_ptrs)
921
+ k = tl.load(k_ptrs)
922
+ log_lambda_in = tl.load(log_lambda_in_ptrs)
923
+ else:
924
+ mask_n = offs_n < N
925
+ v = tl.load(v_ptrs, mask=mask_n[:, None])
926
+ k = tl.load(k_ptrs, mask=mask_n[:, None])
927
+ log_lambda_in = tl.load(log_lambda_in_ptrs, mask=mask_n)
928
+
929
+
930
+ # recompute p = softmax(qk * sm_scale, dim=-1)
931
+ if not DIVISIBLE_N:
932
+ valid_mask = mask_n[None, :] # & mask_m[:, None]
933
+ if CAUSAL:
934
+ causal_mask = (P_SEQ + offs_m[:, None]) >= (offs_n[None, :]) # (BLOCK_M, BLOCK_N)
935
+ # s = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
936
+ s = tl.dot(q, tl.trans(k), input_precision="ieee") * qk_scale
937
+ decay_bias = log_lambda_out[:, None] - log_lambda_in[None, :]
938
+ s += decay_bias * log2e
939
+
940
+ # NOTE: since softmax in backward is pointwise, the normalizer has been saved in fwd)
941
+ # So masking on s is not needed.
942
+ # if CAUSAL:
943
+ # s = tl.where(causal_mask & valid_mask, s, float("-inf"))
944
+ # else:
945
+ # s = tl.where(valid_mask, s, float("-inf"))
946
+ p = tl.math.exp2(s - l[:, None] * log2e) # (BLOCK_M, BLOCK_N)
947
+
948
+ # compute dp = dot(v, do)
949
+ # dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
950
+ dp = tl.dot(do.to(input_dtype), tl.trans(v), input_precision="ieee")
951
+
952
+
953
+ # no need to mask dp
954
+ # if CAUSAL:
955
+ # dp = tl.where(causal_mask & valid_mask, dp, 0.0)
956
+ # else:
957
+ # dp = tl.where(valid_mask, dp, 0.0)
958
+
959
+ # compute ds = p * (dp - delta[:, None])
960
+ # move scale out to dq at last
961
+ ds = p * (dp - delta[:, None]) # (BLOCK_M, BLOCK_N)
962
+
963
+ # mask ds to ensure no small values
964
+ if not DIVISIBLE_N:
965
+ ds = tl.where(valid_mask, ds, 0.0)
966
+ if CAUSAL:
967
+ ds = tl.where(causal_mask, ds, 0.0)
968
+ if HAS_SEQ_START:
969
+ ds = tl.where(offs_n[None, :] >= seq_start, ds, 0.0)
970
+
971
+ dq += tl.dot(ds.to(input_dtype), k, input_precision="ieee")
972
+ dlog_lambda_out += tl.sum(ds, axis=1)
973
+
974
+ # increment pointers
975
+ k_ptrs += BLOCK_N * stride_kn
976
+ v_ptrs += BLOCK_N * stride_vn
977
+ log_lambda_in_ptrs += BLOCK_N * stride_log_lambda_n
978
+
979
+ dq *= sm_scale
980
+ if DIVISIBLE_M:
981
+ tmp = tl.load(dlog_lambda_out_ptrs)
982
+ else:
983
+ tmp = tl.load(dlog_lambda_out_ptrs, mask=mask_m)
984
+ dlog_lambda_out += tmp
985
+ if DIVISIBLE_M:
986
+ tl.store(dq_ptrs, dq.to(input_dtype))
987
+ tl.store(dlog_lambda_out_ptrs, dlog_lambda_out)
988
+ else:
989
+ tl.store(dq_ptrs, dq.to(input_dtype), mask=mask_m[:, None])
990
+ tl.store(dlog_lambda_out_ptrs, dlog_lambda_out, mask=mask_m)
991
+
992
+
993
+
994
+ @pytest.mark.parametrize("Z, H, M, N, HEAD_DIM", [(4, 2, 1020, 2098, 64), (4, 2, 1024, 2048, 64)])
995
+ @pytest.mark.parametrize("causal", [True])
996
+ def test_op(Z, H, M, N, HEAD_DIM, causal, dtype=torch.bfloat16):
997
+ torch.manual_seed(24)
998
+ q = (torch.empty((Z, H, M, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
999
+ k = (torch.empty((Z, H, N, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
1000
+ v = (torch.empty((Z, H, N, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
1001
+ fgate_logit = torch.empty((Z, H, N), dtype=torch.float32, device="cuda").uniform_(5, 10)
1002
+ log_fgate = torch.nn.functional.logsigmoid(fgate_logit).requires_grad_()
1003
+ seq_start = torch.randint(low=0, high=N, size=(Z,), dtype=torch.long, device="cuda")
1004
+ # seq_start = torch.randint(low=0, high=10, size=(Z,), dtype=torch.long, device="cuda")
1005
+ # seq_start = torch.full(fill_value=0, size=(Z,), dtype=torch.long, device="cuda")
1006
+ sm_scale = 0.5
1007
+ dout = torch.randn_like(q)
1008
+ # reference implementation
1009
+ P_SEQ = N - M
1010
+ mask = torch.tril(torch.ones((M, N), device="cuda"), diagonal=P_SEQ)
1011
+ p = torch.matmul(q, k.transpose(2, 3)) * sm_scale
1012
+ p = p.float()
1013
+
1014
+ log_lambda = torch.cumsum(log_fgate, dim=-1)
1015
+ decay_bias = log_lambda[..., -M:, None] - log_lambda[..., None, :]
1016
+ p = p + decay_bias
1017
+ if causal:
1018
+ p[:, :, mask == 0] = float("-inf")
1019
+
1020
+ attention_mask = torch.arange(N, device="cuda") < seq_start[:, None, None, None]
1021
+ p = torch.where(attention_mask, float("-inf"), p)
1022
+ p = torch.softmax(p.float(), dim=-1).to(dtype)
1023
+ p = p.clone()
1024
+ p[torch.isnan(p)] = 0.0
1025
+ # p = torch.exp(p)
1026
+ ref_out = torch.matmul(p, v)
1027
+ ref_out.backward(dout)
1028
+ ref_dv, v.grad = v.grad.clone(), None
1029
+ ref_dk, k.grad = k.grad.clone(), None
1030
+ ref_dq, q.grad = q.grad.clone(), None
1031
+ ref_dlog_fgate, log_fgate.grad = log_fgate.grad.clone(), None
1032
+ # triton implementation
1033
+ tri_out = forgetting_attention(q, k, v, log_fgate, head_first=True, seq_start=seq_start, sm_scale=sm_scale)
1034
+ tri_out = tri_out.to(dtype)
1035
+
1036
+ tri_out.backward(dout)
1037
+ tri_dv, v.grad = v.grad.clone(), None
1038
+ tri_dk, k.grad = k.grad.clone(), None
1039
+ tri_dq, q.grad = q.grad.clone(), None
1040
+ tri_dlog_fgate, log_fgate.grad = log_fgate.grad.clone(), None
1041
+ # compare
1042
+ # assert torch.allclose(tri_log_normalizer[~torch.isnan(tri_log_normalizer)], ref_log_normalizer[~torch.isnan(ref_log_normalizer)], atol=1e-2, rtol=0)
1043
+ assert torch.allclose(ref_out, tri_out, atol=1e-2, rtol=0), (ref_out - tri_out).abs().max()
1044
+ rtol = 0
1045
+ # Relative tolerance workaround for known hardware limitation of MI200 GPU.
1046
+ # For details see https://pytorch.org/docs/stable/notes/numerical_accuracy.html#reduced-precision-fp16-and-bf16-gemms-and-convolutions-on-amd-instinct-mi200-devices
1047
+ # if torch.version.hip is not None and triton.runtime.driver.active.get_current_target().arch == "gfx90a":
1048
+ # rtol = 1e-2
1049
+ assert torch.allclose(ref_dv, tri_dv, atol=1e-2, rtol=rtol), (ref_dv - tri_dv).abs().max()
1050
+ assert torch.allclose(ref_dk, tri_dk, atol=1e-2, rtol=rtol), (ref_dk - tri_dk).abs().max()
1051
+ assert torch.allclose(ref_dq, tri_dq, atol=1e-2, rtol=rtol), (ref_dq - tri_dq).abs().max()
1052
+ assert torch.allclose(ref_dlog_fgate, tri_dlog_fgate, atol=1e-2, rtol=rtol), (ref_dlog_fgate - tri_dlog_fgate).abs().max()
1053
+
1054
+ try:
1055
+ from flash_attn.flash_attn_interface import \
1056
+ flash_attn_qkvpacked_func as flash_attn_func
1057
+ HAS_FLASH = True
1058
+ except BaseException:
1059
+ HAS_FLASH = False
1060
+
1061
+ TORCH_HAS_FP8 = hasattr(torch, 'float8_e5m2')
1062
+ BATCH, N_HEADS, HEAD_DIM = 4, 32, 128
1063
+ # vary seq length for fixed head and batch=4
1064
+ configs = []
1065
+ for mode in ["fwd", "bwd"]:
1066
+ # for mode in ["bwd"]:
1067
+ # for causal in [True, False]:
1068
+ for causal in [True]:
1069
+ if mode == "bwd" and not causal:
1070
+ continue
1071
+ configs.append(
1072
+ triton.testing.Benchmark(
1073
+ x_names=["N_CTX"],
1074
+ # x_vals=[2**i for i in range(10, 15)],
1075
+ x_vals=[2**i for i in range(14, 15)],
1076
+ line_arg="provider",
1077
+ # line_vals=["triton-fp16", "flag"] + (["flash"] if HAS_FLASH else []),
1078
+ # line_names=["Triton [FP16]", "Flag"] + (["Flash-2"] if HAS_FLASH else []),
1079
+ line_vals=["flag"] + (["flash"] if HAS_FLASH else []),
1080
+ line_names=["Flag"] + (["Flash-2"] if HAS_FLASH else []),
1081
+ styles=[("red", "-"), ("blue", "-"), ("green", "-")],
1082
+ ylabel="ms",
1083
+ plot_name=f"fused-attention-batch{BATCH}-head{N_HEADS}-d{HEAD_DIM}-{mode}-causal={causal}",
1084
+ args={
1085
+ "H": N_HEADS,
1086
+ "BATCH": BATCH,
1087
+ "HEAD_DIM": HEAD_DIM,
1088
+ "mode": mode,
1089
+ "causal": causal,
1090
+ },
1091
+ ))
1092
+
1093
+
1094
+ @triton.testing.perf_report(configs)
1095
+ def bench_flash_attention(BATCH, H, N_CTX, HEAD_DIM, causal, mode, provider, device="cuda"):
1096
+ assert mode in ["fwd", "bwd"]
1097
+ warmup = 25
1098
+ rep = 100
1099
+ dtype = torch.bfloat16
1100
+ if "flag" in provider:
1101
+ q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1102
+ k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1103
+ v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1104
+ fgate_logit = torch.empty((BATCH, H, N_CTX), dtype=torch.float32, device="cuda").uniform_(5, 10)
1105
+ log_fgate = torch.nn.functional.logsigmoid(fgate_logit).requires_grad_()
1106
+ # if mode == "fwd" and "fp8" in provider:
1107
+ # q = q.to(torch.float8_e5m2)
1108
+ # k = k.to(torch.float8_e5m2)
1109
+ # v = v.permute(0, 1, 3, 2).contiguous()
1110
+ # v = v.permute(0, 1, 3, 2)
1111
+ # v = v.to(torch.float8_e5m2)
1112
+ sm_scale = 1.3
1113
+ fn = lambda: forgetting_attention(q, k, v, log_fgate, head_first=True, sm_scale=sm_scale)
1114
+ if mode == "bwd":
1115
+ o = fn()
1116
+ do = torch.randn_like(o)
1117
+ fn = lambda: o.backward(do, retain_graph=True)
1118
+ ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
1119
+ if provider == "flash":
1120
+ qkv = torch.randn((BATCH, N_CTX, 3, H, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
1121
+ fn = lambda: flash_attn_func(qkv, causal=causal)
1122
+ if mode == "bwd":
1123
+ o = fn()
1124
+ do = torch.randn_like(o)
1125
+ fn = lambda: o.backward(do, retain_graph=True)
1126
+ ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
1127
+ flops_per_matmul = 2.0 * BATCH * H * N_CTX * N_CTX * HEAD_DIM
1128
+ total_flops = 2 * flops_per_matmul
1129
+ if causal:
1130
+ total_flops *= 0.5
1131
+ if mode == "bwd":
1132
+ total_flops *= 2.5 # 2.0(bwd) + 0.5(recompute)
1133
+ return total_flops / ms * 1e-9
1134
+
1135
+
1136
+ if __name__ == "__main__":
1137
+ # only works on post-Ampere GPUs right now
1138
+ bench_flash_attention.run(save_path=".", print_data=True)
ops/forgetting_attention_std.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Forgetting Attention - 标准 Softmax 版本
3
+ 在 forgetting_attention.py 最后添加这个函数
4
+ """
5
+
6
+ import math
7
+ import torch
8
+ import torch.nn.functional as F
9
+ from einops import rearrange
10
+ from typing import Optional
11
+
12
+
13
+ def forgetting_attention_std(
14
+ q: torch.Tensor,
15
+ k: torch.Tensor,
16
+ v: torch.Tensor,
17
+ log_fgate: torch.Tensor,
18
+ *,
19
+ head_first: bool = False,
20
+ seq_start: Optional[torch.Tensor] = None,
21
+ sm_scale: Optional[float] = None,
22
+ ) -> torch.Tensor:
23
+ """标准 Softmax 版本的 Forgetting Attention"""
24
+
25
+ if not head_first:
26
+ q = rearrange(q, "b t h d -> b h t d")
27
+ k = rearrange(k, "b t h d -> b h t d")
28
+ v = rearrange(v, "b t h d -> b h t d")
29
+ log_fgate = rearrange(log_fgate, "b t h -> b h t")
30
+
31
+ B, H, T_q, D = q.shape
32
+ T_k = k.shape[2]
33
+
34
+ if sm_scale is None:
35
+ sm_scale = 1.0 / math.sqrt(D)
36
+
37
+ # 计算 QK 分数
38
+ scores = torch.matmul(q.float(), k.float().transpose(-2, -1)) * sm_scale
39
+
40
+ # 处理 seq_start
41
+ log_fgate_masked = log_fgate.float()
42
+ if seq_start is not None:
43
+ log_fgate_masked = log_fgate_masked.clone()
44
+ mask_idx = torch.arange(T_k, device=q.device)[None, None, :] < seq_start[:, None, None]
45
+ log_fgate_masked[mask_idx] = 0.0
46
+
47
+ # 计算累积衰减
48
+ log_lambda = torch.cumsum(log_fgate_masked, dim=-1)
49
+ decay_bias = log_lambda[:, :, :T_q, None] - log_lambda[:, :, None, :]
50
+ scores = scores + decay_bias
51
+
52
+ # Causal mask
53
+ P_SEQ = T_k - T_q
54
+ causal_mask = torch.triu(torch.ones((T_q, T_k), dtype=torch.bool, device=q.device), diagonal=P_SEQ + 1)
55
+ scores = scores.masked_fill(causal_mask[None, None, :, :], float('-inf'))
56
+
57
+ # seq_start mask
58
+ if seq_start is not None:
59
+ seq_mask = torch.arange(T_k, device=q.device)[None, None, None, :] < seq_start[None, :, None, None]
60
+ scores = scores.masked_fill(seq_mask, float('-inf'))
61
+
62
+ # Softmax
63
+ attn = F.softmax(scores, dim=-1)
64
+ attn = torch.nan_to_num(attn, 0.0)
65
+
66
+ # 计算输出
67
+ out = torch.matmul(attn.to(v.dtype), v)
68
+
69
+ if not head_first:
70
+ out = rearrange(out, "b h t d -> b t h d")
71
+
72
+ return out
ops/framework_mock.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Mock framework module for ndr geometric attention
3
+ 只保留必要的部分
4
+ """
5
+ import torch
6
+ from typing import Optional, Any
7
+
8
+ class visualize:
9
+ """Mock visualize class"""
10
+ @staticmethod
11
+ def attention(*args, **kwargs):
12
+ """Dummy attention visualization"""
13
+ pass
14
+
15
+ @staticmethod
16
+ def plot(*args, **kwargs):
17
+ """Dummy plot"""
18
+ pass
19
+
20
+ # Mock其他可能需要的功能
21
+ def get_logger(name: str):
22
+ """Mock logger"""
23
+ import logging
24
+ return logging.getLogger(name)
25
+
ops/geometric_attention/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .cuda_interface import geometric_attention_activation
ops/geometric_attention/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (258 Bytes). View file
 
ops/geometric_attention/__pycache__/cuda_interface.cpython-310.pyc ADDED
Binary file (3.45 kB). View file
 
ops/geometric_attention/cuda_interface.cu ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <torch/extension.h>
2
+
3
+ __global__ void k_cuda_log_sigmoid_forward(int N, float * t, float *out_sigm, float *out_one_minus_sigm){
4
+ int i = threadIdx.x + blockIdx.x * blockDim.x;
5
+ if (i<N){
6
+ float x = t[i];
7
+ float c = - log(exp(-abs(x)) + 1);
8
+ out_sigm[i] = min(x, 0.0f) + c;
9
+ out_one_minus_sigm[i] = -max(x, 0.0f) + c;
10
+ }
11
+ }
12
+
13
+ __global__ void k_cuda_log_sigmoid_backward(int N, float *t, float *grad_sigm, float *grad_one_minus_sigm, float *grad_out){
14
+ int i = threadIdx.x + blockIdx.x * blockDim.x;
15
+ if (i<N){
16
+ float x = t[i];
17
+ float ne = exp(-abs(x));
18
+ float coeff = 1.0 / (ne + 1.0) * ne;
19
+
20
+ float r_one_minus = (x > 0) ? (coeff - 1) : (-coeff);
21
+ float r = (x < 0) ? (coeff - 1) : (-coeff);
22
+ grad_out[i] = - grad_sigm[i] * r + grad_one_minus_sigm[i] * r_one_minus;
23
+ }
24
+ }
25
+
26
+ std::vector<torch::Tensor> cuda_log_sigmoid_forward(torch::Tensor input){
27
+ auto o1 = torch::empty_like(input);
28
+ auto o2 = torch::empty_like(input);
29
+ auto inf = input.flatten();
30
+
31
+ const int N = inf.size(0);
32
+
33
+ const int threads = 256;
34
+ const int blocks = (N + threads - 1) / threads;
35
+
36
+ k_cuda_log_sigmoid_forward<<<blocks, threads>>>(N,
37
+ input.data<float>(),
38
+ o1.data<float>(),
39
+ o2.data<float>());
40
+
41
+ return {o1, o2};
42
+ }
43
+
44
+ std::vector<torch::Tensor> cuda_log_sigmoid_backward(torch::Tensor input, torch::Tensor grad_sigm, torch::Tensor grad_one_minus_sigm){
45
+ auto output = torch::empty_like(input);
46
+ auto N = input.flatten().size(0);
47
+
48
+ const int threads = 256;
49
+ const int blocks = (N + threads - 1) / threads;
50
+
51
+ k_cuda_log_sigmoid_backward<<<blocks, threads>>>(N,
52
+ input.data<float>(),
53
+ grad_sigm.data<float>(),
54
+ grad_one_minus_sigm.data<float>(),
55
+ output.data<float>());
56
+
57
+ return {output};
58
+ }
59
+
60
+
61
+ typedef torch::PackedTensorAccessor32<float, 3, torch::RestrictPtrTraits> float_accessor;
62
+
63
+ __global__ void k_cuda_window_sum_forward(float_accessor csum, float_accessor out, int offset){
64
+ const int in_p = threadIdx.z + blockIdx.z * blockDim.z;
65
+ const int out_p_mem = threadIdx.y + blockIdx.y * blockDim.y;
66
+ const int batch = threadIdx.x + blockIdx.x * blockDim.x;
67
+
68
+ const int out_p = out_p_mem + offset;
69
+
70
+ if (batch < out.size(0) & out_p_mem < out.size(1) & in_p < out.size(2)){
71
+ float res;
72
+ if (in_p == out_p){
73
+ res = 0;
74
+ } else {
75
+ const int offset = abs(out_p - in_p);
76
+ int p_i = out_p + offset - int(in_p > out_p);
77
+ const int n_i = out_p - offset;
78
+
79
+ p_i = min(p_i, out.size(2) - 1);
80
+
81
+ float d_n = (n_i >= 0) ? (csum[batch][out_p_mem][n_i]) : 0.0;
82
+ res = (csum[batch][out_p_mem][p_i]) - d_n;
83
+ }
84
+
85
+ out[batch][out_p_mem][in_p] = res;
86
+ }
87
+
88
+ }
89
+
90
+ __global__ void k_cuda_window_sum_backward(float_accessor grad_in, float_accessor grad_out, int offset){
91
+ const int in_p = threadIdx.z + blockIdx.z * blockDim.z;
92
+ const int out_p_mem = threadIdx.y + blockIdx.y * blockDim.y;
93
+ const int batch = threadIdx.x + blockIdx.x * blockDim.x;
94
+
95
+ const int out_p = out_p_mem + offset;
96
+
97
+ if (batch < grad_out.size(0) & out_p_mem < grad_out.size(1) & in_p < grad_out.size(2)){
98
+ const int other = 2 * out_p - in_p;
99
+
100
+ float res;
101
+ if (in_p == grad_out.size(2) - 1){
102
+ res = 0;
103
+ for (int i = 0; i < other + int(in_p != out_p); ++i){
104
+ res += grad_in[batch][out_p_mem][i];
105
+ }
106
+ } else if (in_p == out_p){
107
+ res = grad_in[batch][out_p_mem][min(in_p + 1, grad_out.size(2) - 1)];
108
+ } else if (in_p < out_p){
109
+ res = -grad_in[batch][out_p_mem][in_p];
110
+ if (other < grad_in.size(2))
111
+ res -= grad_in[batch][out_p_mem][other];
112
+ } else {
113
+ res = grad_in[batch][out_p_mem][in_p + 1];
114
+ if (other >= 0)
115
+ res += grad_in[batch][out_p_mem][other];
116
+ }
117
+
118
+ grad_out[batch][out_p_mem][in_p] = res;
119
+ }
120
+ }
121
+
122
+ dim3 get_grid_size(torch::Tensor target, dim3 block_dim){
123
+ return dim3(
124
+ (target.size(0) + block_dim.x - 1) / block_dim.x,
125
+ (target.size(1) + block_dim.y - 1) / block_dim.y,
126
+ (target.size(2) + block_dim.z - 1) / block_dim.z
127
+ );
128
+ }
129
+
130
+ torch::Tensor cuda_window_sum_forward(torch::Tensor input, int offset){
131
+ auto out = torch::empty_like(input);
132
+
133
+ dim3 block_size(2, 2, 32);
134
+ k_cuda_window_sum_forward<<<get_grid_size(input, block_size), block_size>>>(
135
+ input.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
136
+ out.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
137
+ offset
138
+ );
139
+
140
+ return out;
141
+ }
142
+
143
+ torch::Tensor cuda_window_sum_backward(torch::Tensor grad_in, int offset){
144
+ auto out = torch::empty_like(grad_in);
145
+
146
+ dim3 block_size(2, 2, 32);
147
+ k_cuda_window_sum_backward<<<get_grid_size(grad_in, block_size), block_size>>>(
148
+ grad_in.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
149
+ out.packed_accessor32<float, 3, torch::RestrictPtrTraits>(),
150
+ offset
151
+ );
152
+
153
+ return out;
154
+ }
155
+
156
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
157
+ m.def(
158
+ "cuda_log_sigmoid_forward",
159
+ &cuda_log_sigmoid_forward,
160
+ "Log sigmoid, forward pass"
161
+ );
162
+ m.def(
163
+ "cuda_log_sigmoid_backward",
164
+ &cuda_log_sigmoid_backward,
165
+ "Log sigmoid, backward pass"
166
+ );
167
+ m.def(
168
+ "cuda_window_sum_forward",
169
+ &cuda_window_sum_forward,
170
+ "Window sum, forward pass"
171
+ );
172
+ m.def(
173
+ "cuda_window_sum_backward",
174
+ &cuda_window_sum_backward,
175
+ "Window sum, backward pass"
176
+ );
177
+ }
ops/geometric_attention/cuda_interface.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import multiprocessing
4
+ from typing import Tuple, Optional
5
+ import torch.nn.functional as F
6
+ import filelock # 用filelock替代framework.utils.LockFile
7
+
8
+ # Just in time import
9
+ # https://pytorch.org/tutorials/advanced/cpp_extension
10
+
11
+ dirname = os.path.dirname(__file__)
12
+ filename = os.path.join(dirname, 'cuda_interface.cu')
13
+ outdir = "./cache/geometric_attention"
14
+ os.makedirs(outdir, exist_ok=True)
15
+
16
+ cuda_log_sigmoid_backward = None
17
+ cuda_log_sigmoid_forward = None
18
+ cuda_window_sum_forward = None
19
+ cuda_window_sum_backward = None
20
+
21
+ def load_extension():
22
+ global cuda_log_sigmoid_forward, cuda_log_sigmoid_backward
23
+ global cuda_window_sum_forward, cuda_window_sum_backward
24
+ if cuda_log_sigmoid_forward is not None:
25
+ return
26
+
27
+ # 使用filelock替代framework.utils.LockFile
28
+ lock = filelock.FileLock(outdir + "/lock.lock")
29
+ with lock:
30
+ from torch.utils.cpp_extension import load
31
+
32
+ os.environ["MAX_JOBS"] = str(multiprocessing.cpu_count())
33
+ ext = load(
34
+ extra_cuda_cflags=['--ftemplate-depth=1024'],
35
+ name="geometric_attention_cuda_interface",
36
+ sources=[filename], verbose=True)
37
+
38
+ cuda_log_sigmoid_forward = ext.cuda_log_sigmoid_forward
39
+ cuda_log_sigmoid_backward = ext.cuda_log_sigmoid_backward
40
+ cuda_window_sum_forward = ext.cuda_window_sum_forward
41
+ cuda_window_sum_backward = ext.cuda_window_sum_backward
42
+
43
+
44
+ class LogSigmoidFunction(torch.autograd.Function):
45
+ @staticmethod
46
+ def forward(ctx, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
47
+ x = x.detach().contiguous()
48
+ ctx.save_for_backward(x)
49
+ a, b = cuda_log_sigmoid_forward(x)
50
+ return a, b
51
+
52
+ @staticmethod
53
+ def backward(ctx, grad_in_sigm: torch.Tensor, grad_in_one_minus: torch.tensor) -> torch.Tensor:
54
+ xf, = ctx.saved_tensors
55
+ ga = grad_in_sigm.contiguous()
56
+ gb = grad_in_one_minus.contiguous()
57
+ return cuda_log_sigmoid_backward(xf, ga, gb)[0]
58
+
59
+
60
+ class WindowSumFunction(torch.autograd.Function):
61
+ @staticmethod
62
+ def forward(ctx, csum: torch.Tensor, offset: int) -> torch.Tensor:
63
+ ctx.saved_offset = offset
64
+ c2 = csum.detach().contiguous().flatten(end_dim=-3)
65
+ res = cuda_window_sum_forward(c2, offset)
66
+ return res.view_as(csum)
67
+
68
+ @staticmethod
69
+ def backward(ctx, grad_output: torch.Tensor) -> Tuple[torch.Tensor, None]:
70
+ offset = ctx.saved_offset
71
+ go = grad_output.contiguous().flatten(end_dim=-3)
72
+ res = cuda_window_sum_backward(go, offset)
73
+ return res.view_as(grad_output), None
74
+
75
+
76
+ def window_sum(x: torch.Tensor, offset: int) -> torch.Tensor:
77
+ load_extension()
78
+ return WindowSumFunction.apply(x, offset)
79
+
80
+
81
+ def log_sigmoid(x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
82
+ load_extension()
83
+ return LogSigmoidFunction.apply(x)
84
+
85
+
86
+ def geometric_attention_activation(logits: torch.Tensor, mask: Optional[torch.Tensor] = None, pos_offset: int = 0,
87
+ normalize: bool = True) -> torch.Tensor:
88
+ p, one_minus_p = log_sigmoid(logits)
89
+ not_previos = window_sum(one_minus_p.cumsum(-1), pos_offset)
90
+
91
+ probs = (not_previos + p).exp()
92
+
93
+ return F.normalize(probs, 1, -1) if normalize else probs
ops/geometric_attention/cuda_interface.py.bak ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import multiprocessing
4
+ from framework.utils import LockFile
5
+ from typing import Tuple, Optional
6
+ import torch.nn.functional as F
7
+
8
+ # Just in time import
9
+ # https://pytorch.org/tutorials/advanced/cpp_extens
10
+
11
+ dirname = os.path.dirname(__file__)
12
+ filename = os.path.join(dirname, 'cuda_interface.cu')
13
+ outdir = "./cache/geometric_attention"
14
+ os.makedirs(outdir, exist_ok=True)
15
+
16
+ cuda_log_sigmoid_backward = None
17
+ cuda_log_sigmoid_forward = None
18
+ cuda_window_sum_forward = None
19
+ cuda_window_sum_backward = None
20
+
21
+ def load_extension():
22
+ global cuda_log_sigmoid_forward, cuda_log_sigmoid_backward
23
+ global cuda_window_sum_forward, cuda_window_sum_backward
24
+ if cuda_log_sigmoid_forward is not None:
25
+ return
26
+
27
+ with LockFile(outdir + "/lock"):
28
+ from torch.utils.cpp_extension import load
29
+
30
+ os.environ["MAX_JOBS"] = str(multiprocessing.cpu_count())
31
+ ext = load(
32
+ extra_cuda_cflags=['--ftemplate-depth=1024'],
33
+ name="geometric_attention_cuda_interface",
34
+ sources=[filename], verbose=True)
35
+ #, build_directory=outdir)
36
+
37
+ cuda_log_sigmoid_forward = ext.cuda_log_sigmoid_forward
38
+ cuda_log_sigmoid_backward = ext.cuda_log_sigmoid_backward
39
+ cuda_window_sum_forward = ext.cuda_window_sum_forward
40
+ cuda_window_sum_backward = ext.cuda_window_sum_backward
41
+
42
+
43
+ class LogSigmoidFunction(torch.autograd.Function):
44
+ @staticmethod
45
+ def forward(ctx, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
46
+ x = x.detach().contiguous()
47
+ ctx.save_for_backward(x)
48
+ a, b = cuda_log_sigmoid_forward(x)
49
+ return a, b
50
+ # return res_a.view_as(x), res_b.view_as(x)
51
+
52
+ @staticmethod
53
+ def backward(ctx, grad_in_sigm: torch.Tensor, grad_in_one_minus: torch.tensor) -> torch.Tensor:
54
+ xf, = ctx.saved_tensors
55
+ ga = grad_in_sigm.contiguous()
56
+ gb = grad_in_one_minus.contiguous()
57
+ return cuda_log_sigmoid_backward(xf, ga, gb)[0]
58
+
59
+
60
+ class WindowSumFunction(torch.autograd.Function):
61
+ @staticmethod
62
+ def forward(ctx, csum: torch.Tensor, offset: int) -> torch.Tensor:
63
+ ctx.saved_offset = offset
64
+ c2 = csum.detach().contiguous().flatten(end_dim=-3)
65
+ res = cuda_window_sum_forward(c2, offset)
66
+ return res.view_as(csum)
67
+
68
+ @staticmethod
69
+ def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor:
70
+ offset = ctx.saved_offset
71
+ go = grad_output.contiguous().flatten(end_dim=-3)
72
+ res = cuda_window_sum_backward(go, offset)
73
+ return res.view_as(grad_output), None
74
+
75
+
76
+ def window_sum(x: torch.Tensor, offset: int) -> torch.Tensor:
77
+ load_extension()
78
+ return WindowSumFunction.apply(x, offset)
79
+
80
+
81
+ def log_sigmoid(x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
82
+ load_extension()
83
+ return LogSigmoidFunction.apply(x)
84
+
85
+
86
+ def geometric_attention_activation(logits: torch.Tensor, mask: Optional[torch.Tensor] = None, pos_offset: int = 0,
87
+ normalize: bool = True) -> torch.Tensor:
88
+ p, one_minus_p = log_sigmoid(logits)
89
+ not_previos = window_sum(one_minus_p.cumsum(-1), pos_offset)
90
+
91
+ probs = (not_previos + p).exp()
92
+
93
+ # return probs
94
+ return F.normalize(probs, 1, -1) if normalize else probs
ops/geometric_attention_final.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Geometric Attention - CUDA加速版本 (支持FP16)
3
+ """
4
+
5
+ import math
6
+ import torch
7
+ from einops import rearrange
8
+ from typing import Optional
9
+
10
+ # 尝试导入CUDA版本
11
+ try:
12
+ from forgetting_transformer.ops.geometric_attention.cuda_interface import (
13
+ load_extension,
14
+ geometric_attention_activation,
15
+ )
16
+ load_extension()
17
+ HAS_CUDA = True
18
+ print("✅ Using CUDA geometric attention (with FP16 support)")
19
+ except Exception as e:
20
+ HAS_CUDA = False
21
+ print(f"⚠️ CUDA not available: {e}")
22
+
23
+
24
+ def geometric_attention_cuda(
25
+ q: torch.Tensor,
26
+ k: torch.Tensor,
27
+ v: torch.Tensor,
28
+ *,
29
+ head_first: bool = False,
30
+ seq_start: Optional[torch.Tensor] = None,
31
+ sm_scale: Optional[float] = None,
32
+ normalize: bool = True,
33
+ ) -> torch.Tensor:
34
+ if not HAS_CUDA:
35
+ raise RuntimeError("CUDA not available")
36
+
37
+ # ⭐ 保存原始dtype
38
+ original_dtype = q.dtype
39
+ needs_cast = original_dtype == torch.float16
40
+
41
+ # ⭐ 如果是FP16,转成FP32
42
+ if needs_cast:
43
+ q = q.float()
44
+ k = k.float()
45
+ v = v.float()
46
+
47
+ # Rearrange
48
+ if not head_first:
49
+ q = rearrange(q, "b t h d -> b h t d")
50
+ k = rearrange(k, "b t h d -> b h t d")
51
+ v = rearrange(v, "b t h d -> b h t d")
52
+
53
+ B, H, T_q, D = q.shape
54
+
55
+ if sm_scale is None:
56
+ sm_scale = 1.0 / math.sqrt(D)
57
+
58
+ # Attention scores
59
+ logits = torch.matmul(q, k.transpose(-2, -1)) * sm_scale
60
+
61
+ # CUDA kernel (FP32)
62
+ attn_weights = geometric_attention_activation(
63
+ logits, mask=None, pos_offset=0, normalize=normalize
64
+ )
65
+
66
+ # Apply to values
67
+ output = torch.matmul(attn_weights, v)
68
+
69
+ # Rearrange back
70
+ if not head_first:
71
+ output = rearrange(output, "b h t d -> b t h d")
72
+
73
+ # ⭐ 转回原始dtype
74
+ if needs_cast:
75
+ output = output.to(original_dtype)
76
+
77
+ return output
78
+
79
+
80
+ def geometric_attention(
81
+ q: torch.Tensor,
82
+ k: torch.Tensor,
83
+ v: torch.Tensor,
84
+ *,
85
+ head_first: bool = False,
86
+ seq_start: Optional[torch.Tensor] = None,
87
+ sm_scale: Optional[float] = None,
88
+ normalize: bool = True,
89
+ ) -> torch.Tensor:
90
+ """自动选择CUDA或Python"""
91
+
92
+ if HAS_CUDA and q.is_cuda:
93
+ try:
94
+ return geometric_attention_cuda(
95
+ q, k, v, head_first=head_first,
96
+ seq_start=seq_start, sm_scale=sm_scale,
97
+ normalize=normalize
98
+ )
99
+ except Exception as e:
100
+ # 不打印太多警告,会刷屏
101
+ pass
102
+
103
+ # Fallback
104
+ from forgetting_transformer.ops.geometric_attention_std import geometric_attention_std
105
+ return geometric_attention_std(
106
+ q, k, v, head_first=head_first,
107
+ seq_start=seq_start, sm_scale=sm_scale,
108
+ normalize=normalize
109
+ )
ops/geometric_attention_std.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Geometric Attention - 标准 Softmax 版本
3
+ 基于论文 "The Neural Data Router" (Csordás et al., 2022)
4
+ """
5
+
6
+ import math
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+ from einops import rearrange
11
+ from typing import Optional
12
+
13
+
14
+ def geometric_attention_std(
15
+ q: torch.Tensor,
16
+ k: torch.Tensor,
17
+ v: torch.Tensor,
18
+ *,
19
+ head_first: bool = False,
20
+ seq_start: Optional[torch.Tensor] = None,
21
+ sm_scale: Optional[float] = None,
22
+ normalize: bool = True,
23
+ ) -> torch.Tensor:
24
+ """
25
+ 标准 Softmax 版本的 Geometric Attention
26
+
27
+ Args:
28
+ q: Query tensor [B, T, H, D] or [B, H, T, D] if head_first
29
+ k: Key tensor [B, T, H, D] or [B, H, T, D] if head_first
30
+ v: Value tensor [B, T, H, D] or [B, H, T, D] if head_first
31
+ head_first: 是否head维度在前
32
+ seq_start: 序列起始位置 [B]
33
+ sm_scale: scaling factor,默认 1/sqrt(D)
34
+ normalize: 是否归一化attention weights
35
+
36
+ Returns:
37
+ output: [B, T, H, D] or [B, H, T, D] if head_first
38
+ """
39
+
40
+ # Rearrange to head_first format
41
+ if not head_first:
42
+ q = rearrange(q, "b t h d -> b h t d")
43
+ k = rearrange(k, "b t h d -> b h t d")
44
+ v = rearrange(v, "b t h d -> b h t d")
45
+
46
+ B, H, T_q, D = q.shape
47
+ T_k = k.shape[2]
48
+
49
+ if sm_scale is None:
50
+ sm_scale = 1.0 / math.sqrt(D)
51
+
52
+ # Step 1: 计算 content-based logits
53
+ logits = torch.matmul(q.float(), k.float().transpose(-2, -1)) * sm_scale
54
+ # logits: [B, H, T_q, T_k]
55
+
56
+ # Step 2: Mask diagonal (不允许attend到自己)
57
+ if T_q == T_k:
58
+ diag_mask = torch.eye(T_q, dtype=torch.bool, device=q.device)
59
+ logits = logits.masked_fill(diag_mask[None, None, :, :], float('-inf'))
60
+
61
+ # Step 3: 处理 seq_start mask
62
+ if seq_start is not None:
63
+ seq_mask = torch.arange(T_k, device=q.device)[None, None, None, :] < seq_start[None, :, None, None]
64
+ logits = logits.masked_fill(seq_mask, float('-inf'))
65
+
66
+ # Step 4: Causal mask (如果需要)
67
+ # 注意:geometric attention论文中没有causal,如果你的任务需要可以取消注释
68
+ # P_SEQ = T_k - T_q
69
+ # causal_mask = torch.triu(torch.ones((T_q, T_k), dtype=torch.bool, device=q.device), diagonal=P_SEQ + 1)
70
+ # logits = logits.masked_fill(causal_mask[None, None, :, :], float('-inf'))
71
+
72
+ # Step 5: Geometric weighting (核心算法)
73
+ attn_weights = geometric_weighting(logits, normalize=normalize)
74
+
75
+ # Step 6: 应用attention到values
76
+ out = torch.matmul(attn_weights.to(v.dtype), v)
77
+
78
+ if not head_first:
79
+ out = rearrange(out, "b h t d -> b t h d")
80
+
81
+ return out
82
+
83
+
84
+ def geometric_weighting(
85
+ logits: torch.Tensor,
86
+ normalize: bool = True,
87
+ ) -> torch.Tensor:
88
+ """
89
+ 计算geometric attention weights
90
+
91
+ 实现论文中的 Equation 7:
92
+ A[i,j] = P[i,j] * ∏(1 - P[i,k]) for k closer to i than j
93
+
94
+ Args:
95
+ logits: [B, H, T_q, T_k] attention logits
96
+ normalize: 是否归一化
97
+
98
+ Returns:
99
+ weights: [B, H, T_q, T_k] attention weights
100
+ """
101
+ B, H, T_q, T_k = logits.shape
102
+
103
+ # Step 1: Sigmoid to get matching probabilities
104
+ P = torch.sigmoid(logits) # [B, H, T_q, T_k]
105
+
106
+ # Step 2: 使用 log-space 计算(数值稳定)
107
+ log_P = torch.log(P + 1e-10)
108
+ log_one_minus_P = torch.log(1.0 - P + 1e-10)
109
+
110
+ # Step 3: 简化版本 - 使用cumsum实现几何分布
111
+ # 这是一个高效的近似,避免了显式的循环
112
+
113
+ # 对于每个位置i,计算其左侧所有位置的log(1-P)累积和
114
+ log_decay_left = log_one_minus_P.cumsum(dim=-1)
115
+
116
+ # 计算weights(简化版)
117
+ # 完整版本需要根据距离动态选择区间,这里用一个高效近似
118
+ weights = torch.exp(log_P + log_decay_left.roll(1, dims=-1))
119
+
120
+ # 第一个位置特殊处理(没有左侧元素)
121
+ # 避免inplace操作
122
+ weights_first = P[:, :, :, :1] # 获取第一列
123
+ weights = torch.cat([weights_first, weights[:, :, :, 1:]], dim=-1)
124
+
125
+ # Step 4: 归一化(可选)
126
+ if normalize:
127
+ weights = F.normalize(weights, p=1, dim=-1)
128
+
129
+ # 处理NaN(如果所有位置都是-inf)
130
+ weights = torch.nan_to_num(weights, 0.0)
131
+
132
+ return weights
133
+
134
+
135
+ def geometric_weighting_full(
136
+ logits: torch.Tensor,
137
+ normalize: bool = True,
138
+ ) -> torch.Tensor:
139
+ """
140
+ 完整版geometric weighting(更慢但更准确)
141
+
142
+ 仅在需要最高精度时使用,训练时建议用上面的简化版
143
+ """
144
+ B, H, T_q, T_k = logits.shape
145
+ device = logits.device
146
+
147
+ P = torch.sigmoid(logits)
148
+ log_P = torch.log(P + 1e-10)
149
+ log_one_minus_P = torch.log(1.0 - P + 1e-10)
150
+
151
+ # 初始化weights
152
+ weights = torch.zeros_like(P)
153
+
154
+ # 对每个(i,j)计算geometric weight
155
+ for i in range(T_q):
156
+ for j in range(T_k):
157
+ # 找出比j更接近i的所有位���k
158
+ if i < j:
159
+ # 向右看:closer positions are [i+1, ..., j-1]
160
+ closer_positions = range(i + 1, j)
161
+ elif i > j:
162
+ # 向左看:closer positions are [j+1, ..., i-1]
163
+ closer_positions = range(j + 1, i)
164
+ else:
165
+ # i == j (对角线),已经在外面mask掉了
166
+ continue
167
+
168
+ # 计算 ∏(1 - P[i,k]) in log-space
169
+ log_prod = sum(log_one_minus_P[:, :, i, k] for k in closer_positions) if closer_positions else 0.0
170
+
171
+ # weights[i,j] = P[i,j] * ∏(1 - P[i,k])
172
+ weights[:, :, i, j] = torch.exp(log_P[:, :, i, j] + log_prod)
173
+
174
+ if normalize:
175
+ weights = F.normalize(weights, p=1, dim=-1)
176
+
177
+ weights = torch.nan_to_num(weights, 0.0)
178
+
179
+ return weights
ops/layer_with_visualization.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn
3
+ from typing import Dict, Any
4
+
5
+
6
+ class LayerWithVisualization(torch.nn.Module):
7
+ def __init__(self):
8
+ super().__init__()
9
+ self.visualization_enabled = False
10
+
11
+ def prepare(self):
12
+ # Should be called before the training step
13
+ pass
14
+
15
+ def plot(self, options: Dict[str, Any]) -> Dict[str, Any]:
16
+ raise NotImplementedError()
17
+
18
+
19
+ class LayerVisualizer:
20
+ def __init__(self, module: torch.nn.Module, options: Dict[str, Any] = {}):
21
+ self.modules = []
22
+ self.options = options
23
+ self.curr_options = None
24
+ for n, m in module.named_modules():
25
+ if isinstance(m, LayerWithVisualization):
26
+ self.modules.append((n, m))
27
+
28
+ def plot(self) -> Dict[str, Any]:
29
+ res = {}
30
+ for n, m in self.modules:
31
+ res.update({f"{n}/{k}": v for k, v in m.plot(self.curr_options).items()})
32
+ m.visualization_enabled = False
33
+
34
+ self.curr_options = None
35
+ return res
36
+
37
+ def prepare(self, options: Dict[str, Any] = {}):
38
+ self.curr_options = self.options.copy()
39
+ self.curr_options.update(options)
40
+
41
+ for _, m in self.modules:
42
+ m.prepare()
43
+ m.visualization_enabled = True
ops/multi_head_attention.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn
3
+ import torch.nn.functional as F
4
+ import math
5
+ from typing import Optional, Callable, List, Union, Tuple, Dict, Any
6
+ from dataclasses import dataclass
7
+ from forgetting_transformer.ops.layer_with_visualization import LayerWithVisualization
8
+ import forgetting_transformer.ops.framework_mock as framework
9
+
10
+
11
+ @dataclass
12
+ class AttentionMask:
13
+ src_length_mask: Optional[torch.Tensor]
14
+ position_mask: Optional[torch.Tensor]
15
+
16
+
17
+ class MultiHeadAttentionBase(LayerWithVisualization):
18
+ def __init__(self, state_size: int, n_heads: int, dropout: float=0.1, projection_size: Optional[int] = None):
19
+ assert state_size % n_heads == 0
20
+ super().__init__()
21
+ self.attention_to_visualize = []
22
+
23
+ self.state_size = state_size
24
+ self.projection_size = projection_size or (state_size // n_heads)
25
+ self.n_heads = n_heads
26
+ self.scale = 1.0 / math.sqrt(self.projection_size)
27
+
28
+ self.dropout = torch.nn.Dropout(dropout)
29
+
30
+ @staticmethod
31
+ def apply_logit_masks(logits: torch.Tensor, mask: Optional[AttentionMask], val: float = float("-inf")) -> torch.Tensor:
32
+ if mask.position_mask is not None:
33
+ # [..., N_out, N_in], broadcast works
34
+ logits = logits.masked_fill(mask.position_mask, val)
35
+
36
+ if mask.src_length_mask is not None:
37
+ # [B, ...., N_in], needs manual shaping
38
+ b, i = mask.src_length_mask.shape
39
+ pad_dims = logits.ndim - 2
40
+ logits = logits.masked_fill(mask.src_length_mask.view([b] + [1] * pad_dims + [i]), val)
41
+
42
+ return logits
43
+
44
+ def _masked_softmax(self, logits: torch.Tensor, mask: Optional[AttentionMask]) -> torch.Tensor:
45
+ if mask is None or (mask.src_length_mask is None and mask.position_mask is None):
46
+ return F.softmax(logits, -1)
47
+
48
+ # Output shape: [n_batch * n_heads, n_time_dest, n_time_src]
49
+ bb, n_time_dest, n_time_src = logits.shape
50
+
51
+ logits = logits.view(bb // self.n_heads, self.n_heads, n_time_dest, n_time_src)
52
+ logits = self.apply_logit_masks(logits, mask)
53
+
54
+ logits = F.softmax(logits, -1)
55
+ return logits.view(bb, n_time_dest, n_time_src)
56
+
57
+ def _attention_read(self, mask: Optional[AttentionMask], scores: torch.Tensor, v: torch.Tensor) -> \
58
+ Tuple[torch.Tensor, torch.Tensor]:
59
+ # scores: [n_batch * n_heads, n_out, n_in]
60
+ # v: [n_nbatch * n_heads, n_in]
61
+ # Output data shape [n_batch * n_heads, n_time_dest, data_size]
62
+ # Out attention score shape: [n_batch, n_heads, n_time_dest, n_time_src]
63
+ s_reshape = scores.view(-1, self.n_heads, *scores.shape[1:])
64
+ # scores = self.dropout(scores)
65
+ if self.visualization_enabled:
66
+ self.attention_to_visualize.append(s_reshape[0])
67
+ return torch.bmm(scores, v), s_reshape
68
+
69
+ def transform_data(self, input: torch.Tensor, proj: Callable[[torch.Tensor], torch.Tensor],
70
+ n_projs: int) -> List[torch.Tensor]:
71
+ # Input shape: [n_batch, n_steps, n_channels]
72
+ # Output: Tuple of n_projs tensors of dimension: [n_batch * n_heads, n_steps, projection_size]
73
+ n_batch, n_steps, _ = input.shape
74
+ transformed = proj(input).view(n_batch, n_steps, self.n_heads, n_projs, -1). \
75
+ permute(0, 2, 1, 3, 4).contiguous().view(n_batch * self.n_heads, n_steps, n_projs, -1)
76
+ return transformed.unbind(dim=2)
77
+
78
+ def plot(self, options: Dict[str, Any]) -> Dict[str, Any]:
79
+ r = {}
80
+ marks = options.get("steplabel")
81
+ if options.get("mha.plot_head_details") and self.attention_to_visualize[0].shape[0] > 1:
82
+ for head in range(self.attention_to_visualize[0].shape[0]):
83
+ r[f"head_{head}"] = framework.visualize.plot.AnimatedHeatmap(
84
+ torch.stack([layer[head] for _, layer in enumerate(self.attention_to_visualize)], 0),
85
+ ylabel="dest", xlabel="src", textval=False, x_marks=marks, y_marks=marks, ignore_wrong_marks=True)
86
+
87
+ r["attention_max"] = framework.visualize.plot.AnimatedHeatmap(
88
+ torch.stack([layer.max(0)[0] for _, layer in enumerate(self.attention_to_visualize)], 0),
89
+ ylabel="dest", xlabel="src", textval=False, x_marks=marks, y_marks=marks, ignore_wrong_marks=True)
90
+ self.attention_to_visualize = []
91
+ return r
92
+
93
+
94
+ class AttentionMergeMixin:
95
+ def __init__(self, out_size: Optional[int]) -> None:
96
+ self.multi_head_merge = torch.nn.Linear(self.n_heads * self.projection_size, out_size or self.state_size,
97
+ bias=False)
98
+
99
+ def merged_attention(self, n_batch: int, n_out_steps: int, *args, need_weights: bool = False, **kwargs) -> \
100
+ Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
101
+
102
+ data, scores = self._attention(*args, **kwargs)
103
+
104
+ data = data.view(n_batch, self.n_heads, n_out_steps, -1).permute(0, 2, 1, 3).contiguous().\
105
+ view(n_batch, n_out_steps, -1)
106
+
107
+ return self.multi_head_merge(data), scores
108
+
109
+
110
+ class AbsPosAttentionBase(MultiHeadAttentionBase):
111
+ def get_attention_scores(self, mask: Optional[torch.Tensor], q: torch.Tensor, k: torch.Tensor) -> torch.Tensor:
112
+ logits = torch.bmm(q, k.transpose(1, 2))
113
+ return self._masked_softmax(logits * self.scale, mask)
114
+
115
+ def _attention(self, mask: Optional[torch.Tensor], q: torch.Tensor, k: torch.Tensor, v: torch.Tensor) -> \
116
+ torch.Tensor:
117
+ # all inputs should have a shape of [n_batch, n_steps, data_size]
118
+ # Output shape [n_batch * n_heads, n_time_dest, data_size]
119
+ scores = self.get_attention_scores(mask, q, k)
120
+ return self._attention_read(mask, scores, v)
121
+
122
+
123
+ class MultiHeadAttention(AttentionMergeMixin, AbsPosAttentionBase):
124
+ def __init__(self, state_size: int, n_heads: int, dropout: float = 0.1, input_size: Optional[int] = None,
125
+ out_size: Optional[int] = None):
126
+ super(AttentionMergeMixin, self).__init__(state_size, n_heads, dropout)
127
+
128
+ self.data_to_kv = torch.nn.Linear(state_size, 2 * n_heads * self.projection_size, bias=False)
129
+ self.data_to_q = torch.nn.Linear(input_size or state_size, n_heads * self.projection_size, bias=False)
130
+
131
+ super(MultiHeadAttention, self).__init__(out_size)
132
+ self.reset_parameters()
133
+
134
+ def forward(self, curr_state: torch.Tensor, attend_to: torch.Tensor, mask: Optional[AttentionMask],
135
+ need_weights: bool = False):
136
+ # Input and output shape: [n_batch, n_steps, data_size]
137
+ k, v = self.transform_data(attend_to, self.data_to_kv, 2)
138
+ q, = self.transform_data(curr_state, self.data_to_q, 1)
139
+
140
+ data, scores = self.merged_attention(curr_state.shape[0], q.shape[1], mask, q, k, v)
141
+ if need_weights:
142
+ return data, scores
143
+ else:
144
+ return data
145
+
146
+ def reset_parameters(self):
147
+ torch.nn.init.xavier_uniform_(self.data_to_q.weight)
148
+ torch.nn.init.xavier_uniform_(self.data_to_kv.weight)
149
+ torch.nn.init.xavier_uniform_(self.data_to_kv.weight)
ops/multi_head_relative_pos_attention.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn
3
+ import torch.nn.functional as F
4
+ from typing import Optional, Dict, Any
5
+ from forgetting_transformer.ops.multi_head_attention import AttentionMask, MultiHeadAttentionBase, AttentionMergeMixin
6
+ import forgetting_transformer.ops.framework_mock as framework
7
+ import math
8
+ from matplotlib import cm
9
+
10
+ def shift(posmat: torch.Tensor) -> torch.Tensor:
11
+ # Slice out a matrix diagonally. Each successive row is sliced one position to the left compared.
12
+ # shape: [n_batch, n_head, n_out, n_in * 2 - 1]
13
+ # return: [n_batch, n_head, n_out, n_in]
14
+ p = F.pad(posmat, (0, 1, 0, 1)).flatten(-2) # [n_batch, n_head, (n_out + 1) * n_in * 2]
15
+ p = p.narrow(-1, posmat.shape[-1] // 2, posmat.shape[-1] * posmat.shape[-2]).view_as(posmat)
16
+
17
+ return p.narrow(-1, 0, (posmat.shape[-1] + 1) // 2)
18
+
19
+
20
+ class RelativeAttentionBase(MultiHeadAttentionBase):
21
+ def __init__(self, state_size: int, n_heads: int, dropout: float, projection_size: Optional[int] = None):
22
+ super().__init__(state_size, n_heads, dropout=dropout, projection_size=projection_size)
23
+ self.scale = torch.nn.Parameter(torch.tensor([self.scale]))
24
+ self.s_bias = torch.nn.Parameter(torch.tensor([0.0]))
25
+ self.vis_pos_vs_content = []
26
+
27
+ def get_attention_scores(self, mask: Optional[torch.Tensor],
28
+ q_content: torch.Tensor, k_content: torch.Tensor,
29
+ q_pos: torch.Tensor, k_pos: torch.Tensor,
30
+ pos_offset: int, ar_gate: Optional[torch.Tensor] = None) -> torch.Tensor:
31
+
32
+ # shape of q_content, q_pos, k_pos: [n_batch * n_heads, n_steps, data_size]
33
+ # k_pos: [n_heads, n_in * 2 - 1, data_size]
34
+ # ar_gate: [n_batch*n_heads, n_out, 1]
35
+ # Output shape [n_batch * n_heads, n_out, data_size]
36
+
37
+ n_batch = q_content.shape[0] // self.n_heads
38
+ n_out_steps = q_content.shape[1]
39
+
40
+ # content-content addressing
41
+ content = torch.bmm(q_content, self.dropout(k_content).transpose(1, 2))
42
+
43
+ # content-pos addressing.
44
+ pos = torch.matmul(q_pos.view(n_batch, self.n_heads, n_out_steps, -1), self.dropout(k_pos).transpose(-1, -2)) # [n_batch, n_head, n_out, n_in * 2 - 1]
45
+ fpos = shift(pos).flatten(0, 1)
46
+ if ar_gate is not None:
47
+ fpos = fpos * ar_gate + pos.flatten(0, 1)[..., fpos.shape[-1] - 1:] * (1 - ar_gate)
48
+
49
+ # return self._masked_softmax((fpos) * self.scale, mask)
50
+ if self.visualization_enabled:
51
+ self.vis_pos_vs_content.append((content.view(n_batch, self.n_heads, *content.shape[1:])[0] * self.scale,
52
+ fpos.view(n_batch, self.n_heads, *fpos.shape[1:])[0] * self.scale))
53
+
54
+ return self._masked_softmax((content + fpos) * self.scale, mask)
55
+
56
+ def _attention(self, mask: Optional[torch.Tensor],
57
+ q_content: torch.Tensor, k_content: torch.Tensor,
58
+ q_pos: torch.Tensor, k_pos: torch.Tensor,
59
+ v: torch.Tensor, pos_offset: int,
60
+ ar_gate: Optional[torch.Tensor] = None) -> [torch.Tensor, torch.Tensor]:
61
+
62
+ scores = self.get_attention_scores(mask, q_content, k_content, q_pos, k_pos, pos_offset, ar_gate)
63
+
64
+ # Scores shape: [n_batch * n_heads, n_out, n_in]
65
+ return self._attention_read(mask, scores, v)
66
+
67
+ def _get_pos_subset(self, pos_encoding: torch.Tensor, length: int, offset: int) -> torch.Tensor:
68
+ l_slice = 2 * length - 1
69
+ assert pos_encoding.shape[0] > l_slice
70
+ return pos_encoding.narrow(0, pos_encoding.shape[0] // 2 - length + 1 - offset, 2 * length - 1)
71
+
72
+ def plot(self, options: Dict[str, Any]) -> Dict[str, Any]:
73
+ r = {}
74
+ marks = options.get("steplabel")
75
+ if options.get("mha.plot_head_details") and self.vis_pos_vs_content:
76
+ for head in range(self.vis_pos_vs_content[0][0].shape[0]):
77
+ cont = torch.stack([layer[0][head] for _, layer in enumerate(self.vis_pos_vs_content)], 0)
78
+ pos = torch.stack([layer[1][head] for _, layer in enumerate(self.vis_pos_vs_content)], 0)
79
+ i = torch.stack([layer[head] for _, layer in enumerate(self.attention_to_visualize)], 0)
80
+ content = torch.stack([cont, pos], -1).softmax(-1)[..., 0]
81
+
82
+ color = cm.get_cmap("brg")(content.cpu().numpy())
83
+ color[..., -1] = (i * 0.95 + 0.05).cpu().numpy()
84
+
85
+ r[f"content_vs_pos_{head}"] = framework.visualize.plot.AnimatedHeatmap(color, ylabel="dest",
86
+ xlabel="src", textval=False, x_marks=marks, y_marks=marks, cmap="brg", colorbar=True,
87
+ colorbar_ticks=[0, 0.99], colorbar_labels=["pos", "con"], ignore_wrong_marks=True)
88
+
89
+ # r["attention_max"] = framework.visualize.plot.AnimatedHeatmap(
90
+ # torch.stack([layer.max(0)[0] for _, layer in enumerate(self.attention_to_visualize)], 0),
91
+ # ylabel="dest", xlabel="src", textval=False, x_marks=marks, y_marks=marks)
92
+ self.vis_pos_vs_content = []
93
+
94
+ r.update(super().plot(options))
95
+ return r
96
+
97
+
98
+
99
+ class FixedRelativeMultiheadAttentionBase(RelativeAttentionBase):
100
+ def __init__(self, state_size: int, n_heads: int, dropout: float = 0.0, input_size: Optional[int] = None,
101
+ projection_size: Optional[int] = None):
102
+ super().__init__(state_size, n_heads, dropout, projection_size)
103
+
104
+ self.input_size = state_size if input_size is None else input_size
105
+
106
+ self.pos_to_pq = torch.nn.Linear(state_size, self.n_heads * self.projection_size, bias=False)
107
+ self.register_buffer("pos_encoding", self._create_buffer(1000))
108
+
109
+ def _create_buffer(self, max_len: int):
110
+ return framework.layers.sinusoidal_pos_embedding(self.state_size, 2 * max_len - 1, -max_len + 1,
111
+ device=self.pos_to_pq.weight.device)
112
+
113
+ def get_pos(self, l: int, offset: int) -> torch.Tensor:
114
+ if self.pos_encoding.shape[0] < 2 * (l + offset) - 1:
115
+ self.pos_encoding = self._create_buffer(int(2**math.ceil(math.log2(2 * (l + offset) - 1))))
116
+
117
+ return self.pos_to_pq(self._get_pos_subset(self.pos_encoding, l, offset))
118
+
119
+
120
+ class FixedRelativeMultiheadAttention(AttentionMergeMixin, FixedRelativeMultiheadAttentionBase):
121
+ def __init__(self, state_size: int, n_heads: int, dropout: float = 0.0, global_pos_bias: bool = True,
122
+ global_content_bias: bool = True, input_size: Optional[int] = None, absolute_gate: bool = False,
123
+ projection_size: Optional[int] = None, output_size: Optional[int] = None):
124
+ super(AttentionMergeMixin, self).__init__(state_size, n_heads, dropout, input_size, projection_size=projection_size)
125
+
126
+ self.data_to_kv = torch.nn.Linear(state_size, 2 * n_heads * self.projection_size, bias=False)
127
+ self.data_to_q = torch.nn.Linear(self.input_size, n_heads * self.projection_size, bias=False)
128
+ self.data_to_absgate = torch.nn.Linear(self.input_size, n_heads) \
129
+ if absolute_gate else None
130
+
131
+ self.global_content_bias = torch.nn.Parameter(torch.zeros([n_heads, self.projection_size])) \
132
+ if global_content_bias else None
133
+ self.global_pos_bias = torch.nn.Parameter(torch.zeros([n_heads, self.projection_size])) \
134
+ if global_pos_bias else None
135
+
136
+ super(FixedRelativeMultiheadAttention, self).__init__(output_size)
137
+ self.reset_parameters()
138
+
139
+ def add_head_specific_bias(self, data: torch.Tensor, bias: Optional[torch.Tensor]) -> torch.Tensor:
140
+ # data [batch * n_heads, len, c]
141
+ # bias [n_heads, c]
142
+ return (data.view(-1, bias.shape[0], *data.shape[1:]) + bias.unsqueeze(1).type_as(data)).view_as(data) \
143
+ if bias is not None else data
144
+
145
+ def forward(self, curr_state: torch.Tensor, attend_to: torch.Tensor, mask: Optional[AttentionMask],
146
+ pos_offset: int = 0, need_weights: bool = False):
147
+ # curr_state: [§size, out_len, c]
148
+ # attend_to: [batch_size, in_len, c]
149
+ batch_size, in_len = attend_to.shape[0:2]
150
+ out_len = curr_state.shape[1]
151
+
152
+ k_content, v = self.transform_data(attend_to, self.data_to_kv, 2)
153
+ q, = self.transform_data(curr_state, self.data_to_q, 1)
154
+
155
+ k_pos = self.get_pos(in_len, pos_offset).view(-1, self.n_heads, self.projection_size).\
156
+ transpose(0, 1) # n_heads, 2*in_len -1 , projection_size
157
+
158
+ q_content = self.add_head_specific_bias(q, self.global_content_bias)
159
+ q_pos = self.add_head_specific_bias(q, self.global_pos_bias)
160
+
161
+
162
+ absgate = torch.sigmoid(self.transform_data(curr_state, self.data_to_absgate, 1)[0]) \
163
+ if self.data_to_absgate is not None else None
164
+
165
+ data, scores = self.merged_attention(batch_size, out_len, mask, q_content, k_content, q_pos, k_pos, v,
166
+ pos_offset, ar_gate=absgate, need_weights=need_weights)
167
+
168
+ if need_weights:
169
+ return data, scores
170
+ else:
171
+ return data
172
+
173
+ def reset_parameters(self):
174
+ # # super().reset_parameters()
175
+
176
+ torch.nn.init.xavier_uniform_(self.data_to_q.weight)
177
+ torch.nn.init.xavier_uniform_(self.pos_to_pq.weight)
178
+ torch.nn.init.xavier_uniform_(self.data_to_kv.weight)
179
+ torch.nn.init.xavier_uniform_(self.data_to_kv.weight)
180
+
181
+ if self.global_content_bias is not None:
182
+ self.global_content_bias.data.fill_(0)
183
+
184
+ if self.global_pos_bias is not None:
185
+ self.global_pos_bias.data.fill_(0)
ops/multi_head_relative_pos_attention.py.bak ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn
3
+ import torch.nn.functional as F
4
+ from typing import Optional, Dict, Any
5
+ from .multi_head_attention import AttentionMask, MultiHeadAttentionBase, AttentionMergeMixin
6
+ import framework
7
+ import math
8
+ from matplotlib import cm
9
+
10
+ def shift(posmat: torch.Tensor) -> torch.Tensor:
11
+ # Slice out a matrix diagonally. Each successive row is sliced one position to the left compared.
12
+ # shape: [n_batch, n_head, n_out, n_in * 2 - 1]
13
+ # return: [n_batch, n_head, n_out, n_in]
14
+ p = F.pad(posmat, (0, 1, 0, 1)).flatten(-2) # [n_batch, n_head, (n_out + 1) * n_in * 2]
15
+ p = p.narrow(-1, posmat.shape[-1] // 2, posmat.shape[-1] * posmat.shape[-2]).view_as(posmat)
16
+
17
+ return p.narrow(-1, 0, (posmat.shape[-1] + 1) // 2)
18
+
19
+
20
+ class RelativeAttentionBase(MultiHeadAttentionBase):
21
+ def __init__(self, state_size: int, n_heads: int, dropout: float, projection_size: Optional[int] = None):
22
+ super().__init__(state_size, n_heads, dropout=dropout, projection_size=projection_size)
23
+ self.scale = torch.nn.Parameter(torch.tensor([self.scale]))
24
+ self.s_bias = torch.nn.Parameter(torch.tensor([0.0]))
25
+ self.vis_pos_vs_content = []
26
+
27
+ def get_attention_scores(self, mask: Optional[torch.Tensor],
28
+ q_content: torch.Tensor, k_content: torch.Tensor,
29
+ q_pos: torch.Tensor, k_pos: torch.Tensor,
30
+ pos_offset: int, ar_gate: Optional[torch.Tensor] = None) -> torch.Tensor:
31
+
32
+ # shape of q_content, q_pos, k_pos: [n_batch * n_heads, n_steps, data_size]
33
+ # k_pos: [n_heads, n_in * 2 - 1, data_size]
34
+ # ar_gate: [n_batch*n_heads, n_out, 1]
35
+ # Output shape [n_batch * n_heads, n_out, data_size]
36
+
37
+ n_batch = q_content.shape[0] // self.n_heads
38
+ n_out_steps = q_content.shape[1]
39
+
40
+ # content-content addressing
41
+ content = torch.bmm(q_content, self.dropout(k_content).transpose(1, 2))
42
+
43
+ # content-pos addressing.
44
+ pos = torch.matmul(q_pos.view(n_batch, self.n_heads, n_out_steps, -1), self.dropout(k_pos).transpose(-1, -2)) # [n_batch, n_head, n_out, n_in * 2 - 1]
45
+ fpos = shift(pos).flatten(0, 1)
46
+ if ar_gate is not None:
47
+ fpos = fpos * ar_gate + pos.flatten(0, 1)[..., fpos.shape[-1] - 1:] * (1 - ar_gate)
48
+
49
+ # return self._masked_softmax((fpos) * self.scale, mask)
50
+ if self.visualization_enabled:
51
+ self.vis_pos_vs_content.append((content.view(n_batch, self.n_heads, *content.shape[1:])[0] * self.scale,
52
+ fpos.view(n_batch, self.n_heads, *fpos.shape[1:])[0] * self.scale))
53
+
54
+ return self._masked_softmax((content + fpos) * self.scale, mask)
55
+
56
+ def _attention(self, mask: Optional[torch.Tensor],
57
+ q_content: torch.Tensor, k_content: torch.Tensor,
58
+ q_pos: torch.Tensor, k_pos: torch.Tensor,
59
+ v: torch.Tensor, pos_offset: int,
60
+ ar_gate: Optional[torch.Tensor] = None) -> [torch.Tensor, torch.Tensor]:
61
+
62
+ scores = self.get_attention_scores(mask, q_content, k_content, q_pos, k_pos, pos_offset, ar_gate)
63
+
64
+ # Scores shape: [n_batch * n_heads, n_out, n_in]
65
+ return self._attention_read(mask, scores, v)
66
+
67
+ def _get_pos_subset(self, pos_encoding: torch.Tensor, length: int, offset: int) -> torch.Tensor:
68
+ l_slice = 2 * length - 1
69
+ assert pos_encoding.shape[0] > l_slice
70
+ return pos_encoding.narrow(0, pos_encoding.shape[0] // 2 - length + 1 - offset, 2 * length - 1)
71
+
72
+ def plot(self, options: Dict[str, Any]) -> Dict[str, Any]:
73
+ r = {}
74
+ marks = options.get("steplabel")
75
+ if options.get("mha.plot_head_details") and self.vis_pos_vs_content:
76
+ for head in range(self.vis_pos_vs_content[0][0].shape[0]):
77
+ cont = torch.stack([layer[0][head] for _, layer in enumerate(self.vis_pos_vs_content)], 0)
78
+ pos = torch.stack([layer[1][head] for _, layer in enumerate(self.vis_pos_vs_content)], 0)
79
+ i = torch.stack([layer[head] for _, layer in enumerate(self.attention_to_visualize)], 0)
80
+ content = torch.stack([cont, pos], -1).softmax(-1)[..., 0]
81
+
82
+ color = cm.get_cmap("brg")(content.cpu().numpy())
83
+ color[..., -1] = (i * 0.95 + 0.05).cpu().numpy()
84
+
85
+ r[f"content_vs_pos_{head}"] = framework.visualize.plot.AnimatedHeatmap(color, ylabel="dest",
86
+ xlabel="src", textval=False, x_marks=marks, y_marks=marks, cmap="brg", colorbar=True,
87
+ colorbar_ticks=[0, 0.99], colorbar_labels=["pos", "con"], ignore_wrong_marks=True)
88
+
89
+ # r["attention_max"] = framework.visualize.plot.AnimatedHeatmap(
90
+ # torch.stack([layer.max(0)[0] for _, layer in enumerate(self.attention_to_visualize)], 0),
91
+ # ylabel="dest", xlabel="src", textval=False, x_marks=marks, y_marks=marks)
92
+ self.vis_pos_vs_content = []
93
+
94
+ r.update(super().plot(options))
95
+ return r
96
+
97
+
98
+
99
+ class FixedRelativeMultiheadAttentionBase(RelativeAttentionBase):
100
+ def __init__(self, state_size: int, n_heads: int, dropout: float = 0.0, input_size: Optional[int] = None,
101
+ projection_size: Optional[int] = None):
102
+ super().__init__(state_size, n_heads, dropout, projection_size)
103
+
104
+ self.input_size = state_size if input_size is None else input_size
105
+
106
+ self.pos_to_pq = torch.nn.Linear(state_size, self.n_heads * self.projection_size, bias=False)
107
+ self.register_buffer("pos_encoding", self._create_buffer(1000))
108
+
109
+ def _create_buffer(self, max_len: int):
110
+ return framework.layers.sinusoidal_pos_embedding(self.state_size, 2 * max_len - 1, -max_len + 1,
111
+ device=self.pos_to_pq.weight.device)
112
+
113
+ def get_pos(self, l: int, offset: int) -> torch.Tensor:
114
+ if self.pos_encoding.shape[0] < 2 * (l + offset) - 1:
115
+ self.pos_encoding = self._create_buffer(int(2**math.ceil(math.log2(2 * (l + offset) - 1))))
116
+
117
+ return self.pos_to_pq(self._get_pos_subset(self.pos_encoding, l, offset))
118
+
119
+
120
+ class FixedRelativeMultiheadAttention(AttentionMergeMixin, FixedRelativeMultiheadAttentionBase):
121
+ def __init__(self, state_size: int, n_heads: int, dropout: float = 0.0, global_pos_bias: bool = True,
122
+ global_content_bias: bool = True, input_size: Optional[int] = None, absolute_gate: bool = False,
123
+ projection_size: Optional[int] = None, output_size: Optional[int] = None):
124
+ super(AttentionMergeMixin, self).__init__(state_size, n_heads, dropout, input_size, projection_size=projection_size)
125
+
126
+ self.data_to_kv = torch.nn.Linear(state_size, 2 * n_heads * self.projection_size, bias=False)
127
+ self.data_to_q = torch.nn.Linear(self.input_size, n_heads * self.projection_size, bias=False)
128
+ self.data_to_absgate = torch.nn.Linear(self.input_size, n_heads) \
129
+ if absolute_gate else None
130
+
131
+ self.global_content_bias = torch.nn.Parameter(torch.zeros([n_heads, self.projection_size])) \
132
+ if global_content_bias else None
133
+ self.global_pos_bias = torch.nn.Parameter(torch.zeros([n_heads, self.projection_size])) \
134
+ if global_pos_bias else None
135
+
136
+ super(FixedRelativeMultiheadAttention, self).__init__(output_size)
137
+ self.reset_parameters()
138
+
139
+ def add_head_specific_bias(self, data: torch.Tensor, bias: Optional[torch.Tensor]) -> torch.Tensor:
140
+ # data [batch * n_heads, len, c]
141
+ # bias [n_heads, c]
142
+ return (data.view(-1, bias.shape[0], *data.shape[1:]) + bias.unsqueeze(1).type_as(data)).view_as(data) \
143
+ if bias is not None else data
144
+
145
+ def forward(self, curr_state: torch.Tensor, attend_to: torch.Tensor, mask: Optional[AttentionMask],
146
+ pos_offset: int = 0, need_weights: bool = False):
147
+ # curr_state: [§size, out_len, c]
148
+ # attend_to: [batch_size, in_len, c]
149
+ batch_size, in_len = attend_to.shape[0:2]
150
+ out_len = curr_state.shape[1]
151
+
152
+ k_content, v = self.transform_data(attend_to, self.data_to_kv, 2)
153
+ q, = self.transform_data(curr_state, self.data_to_q, 1)
154
+
155
+ k_pos = self.get_pos(in_len, pos_offset).view(-1, self.n_heads, self.projection_size).\
156
+ transpose(0, 1) # n_heads, 2*in_len -1 , projection_size
157
+
158
+ q_content = self.add_head_specific_bias(q, self.global_content_bias)
159
+ q_pos = self.add_head_specific_bias(q, self.global_pos_bias)
160
+
161
+
162
+ absgate = torch.sigmoid(self.transform_data(curr_state, self.data_to_absgate, 1)[0]) \
163
+ if self.data_to_absgate is not None else None
164
+
165
+ data, scores = self.merged_attention(batch_size, out_len, mask, q_content, k_content, q_pos, k_pos, v,
166
+ pos_offset, ar_gate=absgate, need_weights=need_weights)
167
+
168
+ if need_weights:
169
+ return data, scores
170
+ else:
171
+ return data
172
+
173
+ def reset_parameters(self):
174
+ # # super().reset_parameters()
175
+
176
+ torch.nn.init.xavier_uniform_(self.data_to_q.weight)
177
+ torch.nn.init.xavier_uniform_(self.pos_to_pq.weight)
178
+ torch.nn.init.xavier_uniform_(self.data_to_kv.weight)
179
+ torch.nn.init.xavier_uniform_(self.data_to_kv.weight)
180
+
181
+ if self.global_content_bias is not None:
182
+ self.global_content_bias.data.fill_(0)
183
+
184
+ if self.global_pos_bias is not None:
185
+ self.global_pos_bias.data.fill_(0)
ops/sliding_window_attention_std.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Sliding Window / Hard Attention
3
+ Based on "Context Limitations Make Neural Language Models More Human-Like"
4
+ (Kuribayashi et al., 2022)
5
+ """
6
+
7
+ import math
8
+ import torch
9
+ import torch.nn.functional as F
10
+ from einops import rearrange
11
+ from typing import Optional
12
+
13
+
14
+ def sliding_window_attention_std(
15
+ q: torch.Tensor,
16
+ k: torch.Tensor,
17
+ v: torch.Tensor,
18
+ *,
19
+ head_first: bool = False,
20
+ seq_start: Optional[torch.Tensor] = None,
21
+ sm_scale: Optional[float] = None,
22
+ window_size: int = 2, # 默认2-gram(看前1个token)
23
+ ) -> torch.Tensor:
24
+ """
25
+ Sliding Window Attention
26
+
27
+ 硬截断:只能attend到最近window_size个token
28
+ """
29
+
30
+ if not head_first:
31
+ q = rearrange(q, "b t h d -> b h t d")
32
+ k = rearrange(k, "b t h d -> b h t d")
33
+ v = rearrange(v, "b t h d -> b h t d")
34
+
35
+ B, H, T_q, D = q.shape
36
+ T_k = k.shape[2]
37
+
38
+ if sm_scale is None:
39
+ sm_scale = 1.0 / math.sqrt(D)
40
+
41
+ # Compute logits
42
+ logits = torch.matmul(q.float(), k.float().transpose(-2, -1)) * sm_scale
43
+
44
+ # Create sliding window mask
45
+ mask = create_sliding_window_mask(T_q, T_k, window_size, device=q.device)
46
+ logits = logits.masked_fill(~mask, float('-inf'))
47
+
48
+ # Seq start mask
49
+ if seq_start is not None:
50
+ seq_mask = torch.arange(T_k, device=q.device)[None, None, None, :] < seq_start[None, :, None, None]
51
+ logits = logits.masked_fill(seq_mask, float('-inf'))
52
+
53
+ # Standard softmax
54
+ weights = F.softmax(logits, dim=-1)
55
+
56
+ # Apply to values
57
+ out = torch.matmul(weights, v)
58
+
59
+ if not head_first:
60
+ out = rearrange(out, "b h t d -> b t h d")
61
+
62
+ return out
63
+
64
+
65
+ def create_sliding_window_mask(
66
+ T_q: int,
67
+ T_k: int,
68
+ window_size: int,
69
+ device: torch.device
70
+ ) -> torch.Tensor:
71
+ """
72
+ 创建sliding window mask
73
+
74
+ window_size=1: 只看前1个token (2-gram)
75
+ window_size=2: 只看前2个token (3-gram)
76
+ """
77
+ # 基础causal mask
78
+ mask = torch.tril(torch.ones(T_q, T_k, dtype=torch.bool, device=device))
79
+
80
+ # 应用window限制
81
+ if window_size > 0 and window_size < T_k:
82
+ for i in range(T_q):
83
+ # 只保留 [i-window_size+1, i] 范围
84
+ start = max(0, i - window_size + 1)
85
+ if start > 0:
86
+ mask[i, :start] = False
87
+
88
+ return mask[None, None, :, :] # [1, 1, T_q, T_k]
ops/stickbreaking_attention_std.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Stick-breaking Attention - 官方Triton实现
3
+ """
4
+
5
+ from stickbreaking_attention.sb_attn import sb_attn
6
+ import math
7
+ import torch
8
+ from einops import rearrange
9
+ from typing import Optional
10
+
11
+
12
+ def stickbreaking_attention_std(
13
+ q: torch.Tensor,
14
+ k: torch.Tensor,
15
+ v: torch.Tensor,
16
+ *,
17
+ head_first: bool = False,
18
+ seq_start: Optional[torch.Tensor] = None,
19
+ sm_scale: Optional[float] = None,
20
+ normalize: bool = True,
21
+ attend_current: bool = False,
22
+ ) -> torch.Tensor:
23
+ """Stick-breaking attention using official Triton implementation"""
24
+
25
+ if not head_first:
26
+ q = rearrange(q, "b t h d -> b h t d")
27
+ k = rearrange(k, "b t h d -> b h t d")
28
+ v = rearrange(v, "b t h d -> b h t d")
29
+
30
+ B, H, T_q, D = q.shape
31
+
32
+ if sm_scale is None:
33
+ sm_scale = 1.0 / math.sqrt(D)
34
+
35
+ # 官方Triton实现
36
+ # 返回 (output, remainder)
37
+ out, rem = sb_attn(
38
+ q, k, v,
39
+ inv_temp=sm_scale,
40
+ attend_current=attend_current
41
+ )
42
+
43
+ if not head_first:
44
+ out = rearrange(out, "b h t d -> b t h d")
45
+
46
+ return out
ops/transformer.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn
3
+ import torch.nn.functional as F
4
+ from .multi_head_attention import MultiHeadAttention, AttentionMask
5
+ from typing import Optional, Callable, Dict
6
+ from dataclasses import dataclass
7
+ # This file is based on PyTorch's internal implementation
8
+
9
+ ActivationFunction = Callable[[torch.Tensor], torch.Tensor]
10
+
11
+
12
+ class TransformerEncoderLayer(torch.nn.Module):
13
+ def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,
14
+ attention_dropout=0):
15
+ super(TransformerEncoderLayer, self).__init__()
16
+ self.self_attn = MultiHeadAttention(d_model, nhead, dropout=attention_dropout)
17
+ self.linear1 = torch.nn.Linear(d_model, dim_feedforward)
18
+ self.dropout = torch.nn.Dropout(dropout)
19
+ self.linear2 = torch.nn.Linear(dim_feedforward, d_model)
20
+
21
+ self.norm1 = torch.nn.LayerNorm(d_model)
22
+ self.norm2 = torch.nn.LayerNorm(d_model)
23
+ self.dropout1 = torch.nn.Dropout(dropout)
24
+ self.dropout2 = torch.nn.Dropout(dropout)
25
+
26
+ self.activation = activation
27
+ self.reset_parameters()
28
+
29
+ def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None) -> torch.Tensor:
30
+ src2 = self.self_attn(src, src, mask)
31
+ src = src + self.dropout1(src2)
32
+ src = self.norm1(src)
33
+ src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
34
+ src = src + self.dropout2(src2)
35
+ src = self.norm2(src)
36
+ return src
37
+
38
+ def reset_parameters(self):
39
+ torch.nn.init.xavier_uniform_(self.linear1.weight, gain=torch.nn.init.calculate_gain('relu') \
40
+ if self.activation is F.relu else 1.0)
41
+ torch.nn.init.xavier_uniform_(self.linear2.weight)
42
+
43
+
44
+ class TransformerDecoderLayer(torch.nn.Module):
45
+ def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,
46
+ attention_dropout=0):
47
+ super(TransformerDecoderLayer, self).__init__()
48
+
49
+ self.self_attn = MultiHeadAttention(d_model, nhead, dropout=attention_dropout)
50
+ self.multihead_attn = MultiHeadAttention(d_model, nhead, dropout=attention_dropout)
51
+ # Implementation of Feedforward model
52
+ self.linear1 = torch.nn.Linear(d_model, dim_feedforward)
53
+ self.dropout = torch.nn.Dropout(dropout)
54
+ self.linear2 = torch.nn.Linear(dim_feedforward, d_model)
55
+
56
+ self.norm1 = torch.nn.LayerNorm(d_model)
57
+ self.norm2 = torch.nn.LayerNorm(d_model)
58
+ self.norm3 = torch.nn.LayerNorm(d_model)
59
+ self.dropout1 = torch.nn.Dropout(dropout)
60
+ self.dropout2 = torch.nn.Dropout(dropout)
61
+ self.dropout3 = torch.nn.Dropout(dropout)
62
+
63
+ self.activation = activation
64
+ self.reset_parameters()
65
+
66
+ def forward(self, tgt: torch.Tensor, memory: torch.Tensor, tgt_mask: Optional[torch.Tensor] = None,
67
+ memory_key_padding_mask: Optional[torch.Tensor] = None,
68
+ full_target: Optional[torch.Tensor] = None, pos_offset: int = 0) -> torch.Tensor:
69
+
70
+ assert pos_offset == 0 or tgt_mask is None
71
+ tgt2 = self.self_attn(tgt, tgt if full_target is None else full_target, mask=AttentionMask(None, tgt_mask))
72
+ tgt = tgt + self.dropout1(tgt2)
73
+ tgt = self.norm1(tgt)
74
+ tgt2 = self.multihead_attn(tgt, memory, mask=AttentionMask(memory_key_padding_mask, None))
75
+ tgt = tgt + self.dropout2(tgt2)
76
+ tgt = self.norm2(tgt)
77
+ tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
78
+ tgt = tgt + self.dropout3(tgt2)
79
+ tgt = self.norm3(tgt)
80
+ return tgt
81
+
82
+ def reset_parameters(self):
83
+ torch.nn.init.xavier_uniform_(self.linear1.weight, gain=torch.nn.init.calculate_gain('relu') \
84
+ if self.activation is F.relu else 1.0)
85
+ torch.nn.init.xavier_uniform_(self.linear2.weight)
86
+
87
+
88
+ class TransformerDecoderBase(torch.nn.Module):
89
+ @dataclass
90
+ class State:
91
+ step: int
92
+ state: Dict[int, torch.Tensor]
93
+
94
+ def __init__(self, d_model: int):
95
+ super().__init__()
96
+ self.d_model = d_model
97
+
98
+ def create_state(self, batch_size: int, max_length: int, device: torch.device) -> State:
99
+ return self.State(0, {i: torch.empty([batch_size, max_length, self.d_model], device=device)
100
+ for i in range(len(self.layers))})
101
+
102
+ def one_step_forward(self, state: State, data: torch.Tensor, *args, **kwargs):
103
+ assert data.shape[1] == 1, f"For one-step forward should have one timesteps, but shape is {data.shape}"
104
+ assert state.step < state.state[0].shape[1]
105
+
106
+ for i, l in enumerate(self.layers):
107
+ state.state[i][:, state.step:state.step + 1] = data
108
+ data = l(data, *args, **kwargs, full_target=state.state[i][:, :state.step + 1],
109
+ pos_offset=state.step)
110
+
111
+ state.step += 1
112
+ return data
113
+
114
+
115
+ class TransformerEncoder(torch.nn.Module):
116
+ def __init__(self, layer, n_layers: int, *args, **kwargs):
117
+ super().__init__()
118
+ self.layers = torch.nn.ModuleList([layer(*args, **kwargs) for _ in range(n_layers)])
119
+
120
+ def forward(self, data: torch.Tensor, *args, **kwargs):
121
+ for l in self.layers:
122
+ data = l(data, *args, **kwargs)
123
+ return data
124
+
125
+
126
+ class TransformerDecoder(TransformerDecoderBase):
127
+ def __init__(self, layer, n_layers: int, d_model: int, *args, **kwargs):
128
+ super().__init__(d_model)
129
+ self.layers = torch.nn.ModuleList([layer(d_model, *args, **kwargs) for _ in range(n_layers)])
130
+
131
+ def forward(self, data: torch.Tensor, *args, **kwargs):
132
+ for l in self.layers:
133
+ data = l(data, *args, **kwargs)
134
+ return data
135
+
136
+
137
+ def TransformerEncoderWithLayer(layer = TransformerEncoder):
138
+ return lambda *args, **kwargs: TransformerEncoder(layer, *args, **kwargs)
139
+
140
+
141
+ def TransformerDecoderWithLayer(layer = TransformerDecoder):
142
+ return lambda *args, **kwargs: TransformerDecoder(layer, *args, **kwargs)
143
+
144
+
145
+ class Transformer(torch.nn.Module):
146
+ def __init__(self, d_model: int = 512, nhead: int = 8, num_encoder_layers: int = 6,
147
+ num_decoder_layers: int = 6, dim_feedforward: int = 2048, dropout: float = 0.1,
148
+ activation: ActivationFunction = F.relu, encoder_layer=TransformerEncoderWithLayer(),
149
+ decoder_layer=TransformerDecoderWithLayer(), attention_dropout: float = 0):
150
+ super().__init__()
151
+
152
+ self.encoder = encoder_layer(num_encoder_layers, d_model, nhead, dim_feedforward,
153
+ dropout, activation, attention_dropout)
154
+ self.decoder = decoder_layer(num_decoder_layers, d_model, nhead, dim_feedforward,
155
+ dropout, activation, attention_dropout)
156
+
157
+ def forward(self, src: torch.Tensor, tgt: torch.Tensor, tgt_mask: Optional[torch.Tensor] = None,
158
+ src_mask: Optional[AttentionMask] = None):
159
+
160
+ memory = self.encoder(src, src_mask)
161
+ return self.decoder(tgt, memory, tgt_mask, src_mask.src_length_mask if src_mask is not None else None)
162
+
163
+ @staticmethod
164
+ def generate_square_subsequent_mask(sz: int, device: torch.device) -> torch.Tensor:
165
+ return torch.triu(torch.ones(sz, sz, dtype=torch.bool, device=device), diagonal=1)
ops/vanilla_attention_std.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Vanilla Transformer 的标准 Softmax Attention
3
+ 用于替换 flash_attn 的实现
4
+ """
5
+ import math
6
+ import torch
7
+ import torch.nn.functional as F
8
+ from einops import rearrange
9
+ from typing import Optional, Tuple
10
+
11
+ def vanilla_attention_std(
12
+ q: torch.Tensor,
13
+ k: torch.Tensor,
14
+ v: torch.Tensor,
15
+ causal: bool = True,
16
+ window_size: Optional[Tuple[int, int]] = None,
17
+ sm_scale: Optional[float] = None,
18
+ ) -> torch.Tensor:
19
+ """
20
+ 标准 Softmax Attention,兼容 flash_attn_func 的输入格式
21
+
22
+ Args:
23
+ q, k, v: [batch, seq_len, num_heads, head_dim] 格式
24
+ causal: 是否使用因果mask
25
+ window_size: 滑动窗口大小 (left, right),(-1, -1) 表示无限制
26
+ sm_scale: softmax 缩放因子
27
+
28
+ Returns:
29
+ output: [batch, seq_len, num_heads, head_dim] 格式
30
+ """
31
+ B, T_q, H, D = q.shape
32
+ T_k = k.shape[1]
33
+
34
+ if sm_scale is None:
35
+ sm_scale = 1.0 / math.sqrt(D)
36
+
37
+ # 转换为 [B, H, T, D] 格式进行计算
38
+ q = rearrange(q, 'b t h d -> b h t d')
39
+ k = rearrange(k, 'b t h d -> b h t d')
40
+ v = rearrange(v, 'b t h d -> b h t d')
41
+
42
+ # 计算 attention scores
43
+ scores = torch.matmul(q.float(), k.float().transpose(-2, -1)) * sm_scale
44
+
45
+ # Causal mask
46
+ if causal:
47
+ P_SEQ = T_k - T_q # 处理 KV cache 的情况
48
+ causal_mask = torch.triu(
49
+ torch.ones((T_q, T_k), dtype=torch.bool, device=q.device),
50
+ diagonal=P_SEQ + 1
51
+ )
52
+ scores = scores.masked_fill(causal_mask[None, None, :, :], float('-inf'))
53
+
54
+ # Window mask (sliding window attention)
55
+ if window_size is not None and window_size != (-1, -1):
56
+ left_window, right_window = window_size
57
+ window_mask = torch.ones((T_q, T_k), dtype=torch.bool, device=q.device)
58
+ for i in range(T_q):
59
+ # 计算每个查询位置的有效窗口范围
60
+ start = max(0, i - left_window)
61
+ end = min(T_k, i + right_window + 1)
62
+ window_mask[i, start:end] = False
63
+ scores = scores.masked_fill(window_mask[None, None, :, :], float('-inf'))
64
+
65
+ # Softmax
66
+ attn_weights = F.softmax(scores, dim=-1)
67
+ attn_weights = torch.nan_to_num(attn_weights, 0.0)
68
+
69
+ # Apply attention to values
70
+ output = torch.matmul(attn_weights.to(v.dtype), v)
71
+
72
+ # 转换回 [B, T, H, D] 格式
73
+ output = rearrange(output, 'b h t d -> b t h d')
74
+
75
+ return output
76
+
77
+
78
+ def vanilla_attention_varlen_std(
79
+ q: torch.Tensor,
80
+ k: torch.Tensor,
81
+ v: torch.Tensor,
82
+ cu_seqlens_q: torch.Tensor,
83
+ cu_seqlens_k: torch.Tensor,
84
+ max_seqlen_q: int,
85
+ max_seqlen_k: int,
86
+ causal: bool = True,
87
+ window_size: Optional[Tuple[int, int]] = None,
88
+ sm_scale: Optional[float] = None,
89
+ ) -> torch.Tensor:
90
+ """
91
+ 变长序列的标准 Softmax Attention,兼容 flash_attn_varlen_func
92
+
93
+ Args:
94
+ q: [total_q_tokens, num_heads, head_dim]
95
+ k: [total_k_tokens, num_kv_heads, head_dim]
96
+ v: [total_k_tokens, num_kv_heads, head_dim]
97
+ cu_seqlens_q: 累积序列长度 [batch_size + 1]
98
+ cu_seqlens_k: 累积序列长度 [batch_size + 1]
99
+ max_seqlen_q: 最大查询序列长度
100
+ max_seqlen_k: 最大键值序列长度
101
+
102
+ Returns:
103
+ output: [total_q_tokens, num_heads, head_dim]
104
+ """
105
+ batch_size = cu_seqlens_q.shape[0] - 1
106
+ H = q.shape[1]
107
+ D = q.shape[2]
108
+
109
+ if sm_scale is None:
110
+ sm_scale = 1.0 / math.sqrt(D)
111
+
112
+ outputs = []
113
+
114
+ # 逐批次处理
115
+ for b in range(batch_size):
116
+ q_start, q_end = cu_seqlens_q[b].item(), cu_seqlens_q[b+1].item()
117
+ k_start, k_end = cu_seqlens_k[b].item(), cu_seqlens_k[b+1].item()
118
+
119
+ if q_start == q_end: # 空序列
120
+ continue
121
+
122
+ # 提取当前批次的 q, k, v
123
+ q_b = q[q_start:q_end] # [T_q, H, D]
124
+ k_b = k[k_start:k_end] # [T_k, H, D]
125
+ v_b = v[k_start:k_end] # [T_k, H, D]
126
+
127
+ T_q = q_b.shape[0]
128
+ T_k = k_b.shape[0]
129
+
130
+ # 转换为 [H, T, D] 格式
131
+ q_b = rearrange(q_b, 't h d -> h t d')
132
+ k_b = rearrange(k_b, 't h d -> h t d')
133
+ v_b = rearrange(v_b, 't h d -> h t d')
134
+
135
+ # 计算 attention scores
136
+ scores = torch.matmul(q_b.float(), k_b.float().transpose(-2, -1)) * sm_scale
137
+
138
+ # Causal mask
139
+ if causal:
140
+ P_SEQ = T_k - T_q
141
+ causal_mask = torch.triu(
142
+ torch.ones((T_q, T_k), dtype=torch.bool, device=q.device),
143
+ diagonal=P_SEQ + 1
144
+ )
145
+ scores = scores.masked_fill(causal_mask[None, :, :], float('-inf'))
146
+
147
+ # Window mask
148
+ if window_size is not None and window_size != (-1, -1):
149
+ left_window, right_window = window_size
150
+ window_mask = torch.ones((T_q, T_k), dtype=torch.bool, device=q.device)
151
+ for i in range(T_q):
152
+ start = max(0, i - left_window)
153
+ end = min(T_k, i + right_window + 1)
154
+ window_mask[i, start:end] = False
155
+ scores = scores.masked_fill(window_mask[None, :, :], float('-inf'))
156
+
157
+ # Softmax
158
+ attn_weights = F.softmax(scores, dim=-1)
159
+ attn_weights = torch.nan_to_num(attn_weights, 0.0)
160
+
161
+ # Apply attention
162
+ output_b = torch.matmul(attn_weights.to(v_b.dtype), v_b)
163
+
164
+ # 转换回 [T, H, D] 格式
165
+ output_b = rearrange(output_b, 'h t d -> t h d')
166
+ outputs.append(output_b)
167
+
168
+ # 拼接所有批次的输出
169
+ output = torch.cat(outputs, dim=0)
170
+
171
+ return output