Voldemort108X commited on
Commit
6cf5b11
·
verified ·
1 Parent(s): 75db438

Add files using upload-large-folder tool

Browse files
Code/Baselines/flash-attention/flash_attn.egg-info/SOURCES.txt ADDED
The diff for this file is too large to render. See raw diff
 
Code/Baselines/flash-attention/flash_attn/flash_attn_interface.py ADDED
@@ -0,0 +1,1606 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023, Tri Dao.
2
+
3
+ from typing import Optional, Sequence, Tuple, Union
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ import os
8
+
9
+ # isort: off
10
+ # We need to import the CUDA kernels after importing torch
11
+ USE_TRITON_ROCM = os.getenv("FLASH_ATTENTION_TRITON_AMD_ENABLE", "FALSE") == "TRUE"
12
+ if USE_TRITON_ROCM:
13
+ from .flash_attn_triton_amd import interface_fa as flash_attn_gpu
14
+ else:
15
+ import flash_attn_2_cuda as flash_attn_gpu
16
+
17
+ # isort: on
18
+
19
+ def maybe_contiguous(x):
20
+ return x.contiguous() if x is not None and x.stride(-1) != 1 else x
21
+
22
+
23
+ def _get_block_size_n(device, head_dim, is_dropout, is_causal):
24
+ # This should match the block sizes in the CUDA kernel
25
+ assert head_dim <= 256
26
+ major, minor = torch.cuda.get_device_capability(device)
27
+ is_sm8x = major == 8 and minor > 0 # Only include sm86 and sm89, exclude sm80 (A100)
28
+ is_sm80 = major == 8 and minor == 0
29
+ is_sm90 = major == 9 and minor == 0
30
+ if head_dim <= 32:
31
+ return 128
32
+ if head_dim <= 64:
33
+ return 128 if not is_dropout else 64
34
+ elif head_dim <= 96:
35
+ return 64
36
+ elif head_dim <= 128:
37
+ if is_sm8x:
38
+ return 64 if (not is_dropout and is_causal) else 32
39
+ else:
40
+ return 64 if not is_dropout else 32
41
+ elif head_dim <= 192:
42
+ return 64
43
+ elif head_dim <= 224:
44
+ return 64
45
+ elif head_dim <= 256:
46
+ return 64
47
+
48
+
49
+ def round_multiple(x, m):
50
+ return (x + m - 1) // m * m
51
+
52
+
53
+ # torch.compile() support is only enabled for pytorch >= 2.4
54
+ # The reason for this is that we are using the new custom_op and register_fake
55
+ # APIs, which support inplace modification of inputs in the function itself
56
+ if torch.__version__ >= "2.4.0":
57
+ _torch_custom_op_wrapper = torch.library.custom_op
58
+ _torch_register_fake_wrapper = torch.library.register_fake
59
+ else:
60
+ def noop_custom_op_wrapper(name, fn=None, /, *, mutates_args, device_types=None, schema=None):
61
+ def wrap(func):
62
+ return func
63
+ if fn is None:
64
+ return wrap
65
+ return fn
66
+ def noop_register_fake_wrapper(op, fn=None, /, *, lib=None, _stacklevel=1):
67
+ def wrap(func):
68
+ return func
69
+ if fn is None:
70
+ return wrap
71
+ return fn
72
+ _torch_custom_op_wrapper = noop_custom_op_wrapper
73
+ _torch_register_fake_wrapper = noop_register_fake_wrapper
74
+
75
+
76
+ @_torch_custom_op_wrapper("flash_attn::_flash_attn_forward", mutates_args=(), device_types="cuda")
77
+ def _flash_attn_forward(
78
+ q: torch.Tensor,
79
+ k: torch.Tensor,
80
+ v: torch.Tensor,
81
+ dropout_p: float,
82
+ softmax_scale: float,
83
+ causal: bool,
84
+ window_size_left: int,
85
+ window_size_right: int,
86
+ softcap: float,
87
+ alibi_slopes: Optional[torch.Tensor],
88
+ return_softmax: bool
89
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
90
+ q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
91
+ out, softmax_lse, S_dmask, rng_state = flash_attn_gpu.fwd(
92
+ q,
93
+ k,
94
+ v,
95
+ None,
96
+ alibi_slopes,
97
+ dropout_p,
98
+ softmax_scale,
99
+ causal,
100
+ window_size_left,
101
+ window_size_right,
102
+ softcap,
103
+ return_softmax,
104
+ None,
105
+ )
106
+ return out, softmax_lse, S_dmask, rng_state
107
+
108
+
109
+ @_torch_register_fake_wrapper("flash_attn::_flash_attn_forward")
110
+ def _flash_attn_forward_fake(
111
+ q: torch.Tensor,
112
+ k: torch.Tensor,
113
+ v: torch.Tensor,
114
+ dropout_p: float,
115
+ softmax_scale: float,
116
+ causal: bool,
117
+ window_size_left: int,
118
+ window_size_right: int,
119
+ softcap: float,
120
+ alibi_slopes: Optional[torch.Tensor],
121
+ return_softmax: bool
122
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
123
+ q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
124
+ batch_size, seqlen_q, num_heads, head_size = q.shape
125
+ seqlen_k = k.shape[1]
126
+ out = torch.empty_like(q)
127
+ softmax_lse = torch.empty((batch_size, num_heads, seqlen_q), dtype=torch.float32, device=q.device, layout=q.layout)
128
+ p = torch.empty((0,), dtype=q.dtype, device=q.device, layout=q.layout)
129
+ if return_softmax:
130
+ p = torch.empty((batch_size, num_heads, round_multiple(seqlen_q, 128), round_multiple(seqlen_k, 128)), dtype=q.dtype, device=q.device, layout=q.layout)
131
+ rng_state = torch.empty((2,), dtype=torch.int64, device=q.device)
132
+
133
+ return out, softmax_lse, p, rng_state
134
+
135
+
136
+ if torch.__version__ >= "2.4.0":
137
+ _wrapped_flash_attn_forward = torch.ops.flash_attn._flash_attn_forward
138
+ else:
139
+ _wrapped_flash_attn_forward = _flash_attn_forward
140
+
141
+
142
+ @_torch_custom_op_wrapper("flash_attn::_flash_attn_varlen_forward", mutates_args=(), device_types="cuda")
143
+ def _flash_attn_varlen_forward(
144
+ q: torch.Tensor,
145
+ k: torch.Tensor,
146
+ v: torch.Tensor,
147
+ cu_seqlens_q: torch.Tensor,
148
+ cu_seqlens_k: torch.Tensor,
149
+ max_seqlen_q: int,
150
+ max_seqlen_k: int,
151
+ dropout_p: float,
152
+ softmax_scale: float,
153
+ causal: bool,
154
+ window_size_left: int = -1,
155
+ window_size_right: int = -1,
156
+ softcap: float = 0.0,
157
+ alibi_slopes: Optional[torch.Tensor] = None,
158
+ return_softmax: bool = False,
159
+ block_table: Optional[torch.Tensor] = None,
160
+ leftpad_k: Optional[torch.Tensor] = None,
161
+ seqused_k: Optional[torch.Tensor] = None,
162
+ zero_tensors: bool = False,
163
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
164
+ q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
165
+ out, softmax_lse, S_dmask, rng_state = flash_attn_gpu.varlen_fwd(
166
+ q,
167
+ k,
168
+ v,
169
+ None,
170
+ cu_seqlens_q,
171
+ cu_seqlens_k,
172
+ seqused_k,
173
+ leftpad_k,
174
+ block_table,
175
+ alibi_slopes,
176
+ max_seqlen_q,
177
+ max_seqlen_k,
178
+ dropout_p,
179
+ softmax_scale,
180
+ zero_tensors,
181
+ causal,
182
+ window_size_left,
183
+ window_size_right,
184
+ softcap,
185
+ return_softmax,
186
+ None,
187
+ )
188
+ # if out.isnan().any() or softmax_lse.isnan().any():
189
+ # breakpoint()
190
+ return out, softmax_lse, S_dmask, rng_state
191
+
192
+
193
+ @_torch_register_fake_wrapper("flash_attn::_flash_attn_varlen_forward")
194
+ def _flash_attn_varlen_forward_fake(
195
+ q: torch.Tensor,
196
+ k: torch.Tensor,
197
+ v: torch.Tensor,
198
+ cu_seqlens_q: torch.Tensor,
199
+ cu_seqlens_k: torch.Tensor,
200
+ max_seqlen_q: int,
201
+ max_seqlen_k: int,
202
+ dropout_p: float,
203
+ softmax_scale: float,
204
+ causal: bool,
205
+ window_size_left: int = -1,
206
+ window_size_right: int = -1,
207
+ softcap: float = 0.0,
208
+ alibi_slopes: Optional[torch.Tensor] = None,
209
+ return_softmax: bool = False,
210
+ block_table: Optional[torch.Tensor] = None,
211
+ leftpad_k: Optional[torch.Tensor] = None,
212
+ seqused_k: Optional[torch.Tensor] = None,
213
+ zero_tensors: bool = False,
214
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
215
+ q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
216
+ paged_kv = block_table is not None
217
+ batch_size = cu_seqlens_q.numel() - 1
218
+ total_q, num_heads, _ = q.shape
219
+
220
+ out = torch.empty_like(q)
221
+ softmax_lse = torch.empty((num_heads, total_q), dtype=torch.float32, device=q.device, layout=q.layout)
222
+ p = torch.empty((0,), dtype=q.dtype, device=q.device, layout=q.layout)
223
+ seqlen_q_rounded = round_multiple(max_seqlen_q, 128)
224
+ seqlen_k_rounded = round_multiple(max_seqlen_k, 128)
225
+ if return_softmax:
226
+ p = torch.empty((batch_size, num_heads, seqlen_q_rounded, seqlen_k_rounded), dtype=q.dtype, device=q.device, layout=q.layout)
227
+ rng_state = torch.empty((2,), dtype=torch.int64, device=q.device)
228
+ return out, softmax_lse, p, rng_state
229
+
230
+
231
+ if torch.__version__ >= "2.4.0":
232
+ _wrapped_flash_attn_varlen_forward = torch.ops.flash_attn._flash_attn_varlen_forward
233
+ else:
234
+ _wrapped_flash_attn_varlen_forward = _flash_attn_varlen_forward
235
+
236
+
237
+ @_torch_custom_op_wrapper("flash_attn::_flash_attn_backward", mutates_args=("dq", "dk", "dv"), device_types="cuda")
238
+ def _flash_attn_backward(
239
+ dout: torch.Tensor,
240
+ q: torch.Tensor,
241
+ k: torch.Tensor,
242
+ v: torch.Tensor,
243
+ out: torch.Tensor,
244
+ softmax_lse: torch.Tensor,
245
+ dq: Optional[torch.Tensor],
246
+ dk: Optional[torch.Tensor],
247
+ dv: Optional[torch.Tensor],
248
+ dropout_p: float,
249
+ softmax_scale: float,
250
+ causal: bool,
251
+ window_size_left: int,
252
+ window_size_right: int,
253
+ softcap: float,
254
+ alibi_slopes: Optional[torch.Tensor],
255
+ deterministic: bool,
256
+ rng_state: Optional[torch.Tensor] = None,
257
+ ) -> torch.Tensor:
258
+ # dq, dk, dv are allocated by us so they should already be contiguous
259
+ dout, q, k, v, out = [maybe_contiguous(x) for x in (dout, q, k, v, out)]
260
+ (
261
+ dq,
262
+ dk,
263
+ dv,
264
+ softmax_d,
265
+ ) = flash_attn_gpu.bwd(
266
+ dout,
267
+ q,
268
+ k,
269
+ v,
270
+ out,
271
+ softmax_lse,
272
+ dq,
273
+ dk,
274
+ dv,
275
+ alibi_slopes,
276
+ dropout_p,
277
+ softmax_scale,
278
+ causal,
279
+ window_size_left,
280
+ window_size_right,
281
+ softcap,
282
+ deterministic,
283
+ None,
284
+ rng_state,
285
+ )
286
+ return softmax_d
287
+
288
+
289
+ @_torch_register_fake_wrapper("flash_attn::_flash_attn_backward")
290
+ def _flash_attn_backward_fake(
291
+ dout: torch.Tensor,
292
+ q: torch.Tensor,
293
+ k: torch.Tensor,
294
+ v: torch.Tensor,
295
+ out: torch.Tensor,
296
+ softmax_lse: torch.Tensor,
297
+ dq: Optional[torch.Tensor],
298
+ dk: Optional[torch.Tensor],
299
+ dv: Optional[torch.Tensor],
300
+ dropout_p: float,
301
+ softmax_scale: float,
302
+ causal: bool,
303
+ window_size_left: int,
304
+ window_size_right: int,
305
+ softcap: float,
306
+ alibi_slopes: Optional[torch.Tensor],
307
+ deterministic: bool,
308
+ rng_state: Optional[torch.Tensor] = None,
309
+ ) -> torch.Tensor:
310
+ dout, q, k, v, out = [maybe_contiguous(x) for x in (dout, q, k, v, out)]
311
+ if dq is None:
312
+ dq = torch.empty_like(q)
313
+ if dk is None:
314
+ dk = torch.empty_like(k)
315
+ if dv is None:
316
+ dv = torch.empty_like(v)
317
+ batch_size, seqlen_q, num_heads, _ = q.shape
318
+ softmax_d = torch.empty((batch_size, num_heads, round_multiple(seqlen_q, 128)), device=q.device, dtype=torch.float32)
319
+
320
+ return softmax_d
321
+
322
+
323
+ if torch.__version__ >= "2.4.0":
324
+ _wrapped_flash_attn_backward = torch.ops.flash_attn._flash_attn_backward
325
+ else:
326
+ _wrapped_flash_attn_backward = _flash_attn_backward
327
+
328
+
329
+ @_torch_custom_op_wrapper("flash_attn::_flash_attn_varlen_backward", mutates_args=("dq", "dk", "dv"), device_types="cuda")
330
+ def _flash_attn_varlen_backward(
331
+ dout: torch.Tensor,
332
+ q: torch.Tensor,
333
+ k: torch.Tensor,
334
+ v: torch.Tensor,
335
+ out: torch.Tensor,
336
+ softmax_lse: torch.Tensor,
337
+ dq: Optional[torch.Tensor],
338
+ dk: Optional[torch.Tensor],
339
+ dv: Optional[torch.Tensor],
340
+ cu_seqlens_q: torch.Tensor,
341
+ cu_seqlens_k: torch.Tensor,
342
+ max_seqlen_q: int,
343
+ max_seqlen_k: int,
344
+ dropout_p: float,
345
+ softmax_scale: float,
346
+ causal: bool,
347
+ window_size_left: int,
348
+ window_size_right: int,
349
+ softcap: float,
350
+ alibi_slopes: Optional[torch.Tensor],
351
+ deterministic: bool,
352
+ rng_state: Optional[torch.Tensor] = None,
353
+ zero_tensors: bool = False,
354
+ ) -> torch.Tensor:
355
+ # dq, dk, dv are allocated by us so they should already be contiguous
356
+ dout, q, k, v, out = [maybe_contiguous(x) for x in (dout, q, k, v, out)]
357
+ (
358
+ dq,
359
+ dk,
360
+ dv,
361
+ softmax_d,
362
+ ) = flash_attn_gpu.varlen_bwd(
363
+ dout,
364
+ q,
365
+ k,
366
+ v,
367
+ out,
368
+ softmax_lse,
369
+ dq,
370
+ dk,
371
+ dv,
372
+ cu_seqlens_q,
373
+ cu_seqlens_k,
374
+ alibi_slopes,
375
+ max_seqlen_q,
376
+ max_seqlen_k,
377
+ dropout_p,
378
+ softmax_scale,
379
+ zero_tensors,
380
+ causal,
381
+ window_size_left,
382
+ window_size_right,
383
+ softcap,
384
+ deterministic,
385
+ None,
386
+ rng_state,
387
+ )
388
+ # if dk.isnan().any() or dk.isnan().any() or dv.isnan().any() or softmax_d.isnan().any():
389
+ # breakpoint()
390
+ return softmax_d
391
+
392
+
393
+ @_torch_register_fake_wrapper("flash_attn::_flash_attn_varlen_backward")
394
+ def _flash_attn_varlen_backward_fake(
395
+ dout: torch.Tensor,
396
+ q: torch.Tensor,
397
+ k: torch.Tensor,
398
+ v: torch.Tensor,
399
+ out: torch.Tensor,
400
+ softmax_lse: torch.Tensor,
401
+ dq: Optional[torch.Tensor],
402
+ dk: Optional[torch.Tensor],
403
+ dv: Optional[torch.Tensor],
404
+ cu_seqlens_q: torch.Tensor,
405
+ cu_seqlens_k: torch.Tensor,
406
+ max_seqlen_q: int,
407
+ max_seqlen_k: int,
408
+ dropout_p: float,
409
+ softmax_scale: float,
410
+ causal: bool,
411
+ window_size_left: int,
412
+ window_size_right: int,
413
+ softcap: float,
414
+ alibi_slopes: Optional[torch.Tensor],
415
+ deterministic: bool,
416
+ rng_state: Optional[torch.Tensor] = None,
417
+ zero_tensors: bool = False,
418
+ ) -> torch.Tensor:
419
+ dout, q, k, v, out = [maybe_contiguous(x) for x in (dout, q, k, v, out)]
420
+ batch_size = cu_seqlens_q.numel() - 1
421
+ total_q, num_heads, _ = q.shape
422
+
423
+ if dq is None:
424
+ dq = torch.empty_like(q)
425
+ if dk is None:
426
+ dk = torch.empty_like(k)
427
+ if dv is None:
428
+ dv = torch.empty_like(v)
429
+ softmax_d = torch.empty((num_heads, total_q + 128 * batch_size), device=q.device, dtype=torch.float32)
430
+
431
+ return softmax_d
432
+
433
+
434
+ if torch.__version__ >= "2.4.0":
435
+ _wrapped_flash_attn_varlen_backward = torch.ops.flash_attn._flash_attn_varlen_backward
436
+ else:
437
+ _wrapped_flash_attn_varlen_backward = _flash_attn_varlen_backward
438
+
439
+
440
+ class FlashAttnQKVPackedFunc(torch.autograd.Function):
441
+ @staticmethod
442
+ def forward(
443
+ ctx,
444
+ qkv,
445
+ dropout_p,
446
+ softmax_scale,
447
+ causal,
448
+ window_size,
449
+ softcap,
450
+ alibi_slopes,
451
+ deterministic,
452
+ return_softmax,
453
+ is_grad_enabled,
454
+ ):
455
+ is_grad = is_grad_enabled and qkv.requires_grad
456
+ if softmax_scale is None:
457
+ softmax_scale = qkv.shape[-1] ** (-0.5)
458
+ q, k, v = qkv[:, :, 0].detach(), qkv[:, :, 1].detach(), qkv[:, :, 2].detach()
459
+ head_size_og = q.size(3)
460
+ if head_size_og % 8 != 0:
461
+ q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
462
+ k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
463
+ v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
464
+ out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_forward(
465
+ q,
466
+ k,
467
+ v,
468
+ dropout_p,
469
+ softmax_scale,
470
+ causal=causal,
471
+ window_size_left=window_size[0],
472
+ window_size_right=window_size[1],
473
+ softcap=softcap,
474
+ alibi_slopes=alibi_slopes,
475
+ return_softmax=return_softmax and dropout_p > 0,
476
+ )
477
+ if is_grad:
478
+ ctx.save_for_backward(q, k, v, out_padded, softmax_lse, rng_state)
479
+ ctx.dropout_p = dropout_p
480
+ ctx.softmax_scale = softmax_scale
481
+ ctx.causal = causal
482
+ ctx.window_size = window_size
483
+ ctx.softcap = softcap
484
+ ctx.alibi_slopes = alibi_slopes
485
+ ctx.deterministic = deterministic
486
+ out = out_padded[..., :head_size_og]
487
+ return out if not return_softmax else (out, softmax_lse, S_dmask)
488
+
489
+ @staticmethod
490
+ def backward(ctx, dout, *args):
491
+ q, k, v, out, softmax_lse, rng_state = ctx.saved_tensors
492
+ qkv_shape = q.shape[:-2] + (3, *q.shape[-2:])
493
+ dqkv = torch.empty(qkv_shape, dtype=q.dtype, device=q.device)
494
+ head_size_og = dout.size(3)
495
+ dout_padded = dout
496
+ if head_size_og % 8 != 0:
497
+ dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
498
+ _wrapped_flash_attn_backward(
499
+ dout_padded,
500
+ q,
501
+ k,
502
+ v,
503
+ out,
504
+ softmax_lse,
505
+ dqkv[:, :, 0],
506
+ dqkv[:, :, 1],
507
+ dqkv[:, :, 2],
508
+ ctx.dropout_p,
509
+ ctx.softmax_scale,
510
+ ctx.causal,
511
+ ctx.window_size[0],
512
+ ctx.window_size[1],
513
+ ctx.softcap,
514
+ ctx.alibi_slopes,
515
+ ctx.deterministic,
516
+ rng_state=rng_state,
517
+ )
518
+ dqkv = dqkv[..., : dout.shape[-1]] # We could have padded the head dimension
519
+ return dqkv, None, None, None, None, None, None, None, None, None
520
+
521
+
522
+ class FlashAttnVarlenQKVPackedFunc(torch.autograd.Function):
523
+ @staticmethod
524
+ def forward(
525
+ ctx,
526
+ qkv,
527
+ cu_seqlens,
528
+ max_seqlen,
529
+ dropout_p,
530
+ softmax_scale,
531
+ causal,
532
+ window_size,
533
+ softcap,
534
+ alibi_slopes,
535
+ deterministic,
536
+ return_softmax,
537
+ is_grad_enabled,
538
+ ):
539
+ is_grad = is_grad_enabled and qkv.requires_grad
540
+ if softmax_scale is None:
541
+ softmax_scale = qkv.shape[-1] ** (-0.5)
542
+ q, k, v = qkv[:, 0].detach(), qkv[:, 1].detach(), qkv[:, 2].detach()
543
+ head_size_og = q.size(2)
544
+ if head_size_og % 8 != 0:
545
+ q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
546
+ k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
547
+ v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
548
+ out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_varlen_forward(
549
+ q,
550
+ k,
551
+ v,
552
+ cu_seqlens,
553
+ cu_seqlens,
554
+ max_seqlen,
555
+ max_seqlen,
556
+ dropout_p,
557
+ softmax_scale,
558
+ causal=causal,
559
+ window_size_left=window_size[0],
560
+ window_size_right=window_size[1],
561
+ softcap=softcap,
562
+ alibi_slopes=alibi_slopes,
563
+ return_softmax=return_softmax and dropout_p > 0,
564
+ block_table=None,
565
+ )
566
+ if is_grad:
567
+ ctx.save_for_backward(q, k, v, out_padded, softmax_lse, cu_seqlens, rng_state)
568
+ ctx.dropout_p = dropout_p
569
+ ctx.max_seqlen = max_seqlen
570
+ ctx.softmax_scale = softmax_scale
571
+ ctx.causal = causal
572
+ ctx.window_size = window_size
573
+ ctx.softcap = softcap
574
+ ctx.alibi_slopes = alibi_slopes
575
+ ctx.deterministic = deterministic
576
+ out = out_padded[..., :head_size_og]
577
+ return out if not return_softmax else (out, softmax_lse, S_dmask)
578
+
579
+ @staticmethod
580
+ def backward(ctx, dout, *args):
581
+ q, k, v, out, softmax_lse, cu_seqlens, rng_state = ctx.saved_tensors
582
+ qkv_shape = q.shape[:-2] + (3, *q.shape[-2:])
583
+ dqkv = torch.empty(qkv_shape, dtype=q.dtype, device=q.device)
584
+ head_size_og = dout.size(2)
585
+ dout_padded = dout
586
+ if head_size_og % 8 != 0:
587
+ dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
588
+ _wrapped_flash_attn_varlen_backward(
589
+ dout_padded,
590
+ q,
591
+ k,
592
+ v,
593
+ out,
594
+ softmax_lse,
595
+ dqkv[:, 0],
596
+ dqkv[:, 1],
597
+ dqkv[:, 2],
598
+ cu_seqlens,
599
+ cu_seqlens,
600
+ ctx.max_seqlen,
601
+ ctx.max_seqlen,
602
+ ctx.dropout_p,
603
+ ctx.softmax_scale,
604
+ ctx.causal,
605
+ ctx.window_size[0],
606
+ ctx.window_size[1],
607
+ ctx.softcap,
608
+ ctx.alibi_slopes,
609
+ ctx.deterministic,
610
+ rng_state=rng_state,
611
+ )
612
+ dqkv = dqkv[..., : dout.shape[-1]] # We could have padded the head dimension
613
+ return dqkv, None, None, None, None, None, None, None, None, None, None, None
614
+
615
+
616
+ class FlashAttnKVPackedFunc(torch.autograd.Function):
617
+ @staticmethod
618
+ def forward(
619
+ ctx,
620
+ q,
621
+ kv,
622
+ dropout_p,
623
+ softmax_scale,
624
+ causal,
625
+ window_size,
626
+ softcap,
627
+ alibi_slopes,
628
+ deterministic,
629
+ return_softmax,
630
+ is_grad_enabled,
631
+ ):
632
+ is_grad = is_grad_enabled and any(
633
+ x.requires_grad for x in [q, kv]
634
+ )
635
+ if softmax_scale is None:
636
+ softmax_scale = q.shape[-1] ** (-0.5)
637
+ k, v = kv[:, :, 0].detach(), kv[:, :, 1].detach()
638
+ head_size_og = q.size(3)
639
+ if head_size_og % 8 != 0:
640
+ q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
641
+ k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
642
+ v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
643
+ out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_forward(
644
+ q,
645
+ k,
646
+ v,
647
+ dropout_p,
648
+ softmax_scale,
649
+ causal=causal,
650
+ window_size_left=window_size[0],
651
+ window_size_right=window_size[1],
652
+ softcap=softcap,
653
+ alibi_slopes=alibi_slopes,
654
+ return_softmax=return_softmax and dropout_p > 0,
655
+ )
656
+ if is_grad:
657
+ ctx.save_for_backward(q, k, v, out_padded, softmax_lse, rng_state)
658
+ ctx.dropout_p = dropout_p
659
+ ctx.softmax_scale = softmax_scale
660
+ ctx.causal = causal
661
+ ctx.window_size = window_size
662
+ ctx.softcap = softcap
663
+ ctx.alibi_slopes = alibi_slopes
664
+ ctx.deterministic = deterministic
665
+ out = out_padded[..., :head_size_og]
666
+ return out if not return_softmax else (out, softmax_lse, S_dmask)
667
+
668
+ @staticmethod
669
+ def backward(ctx, dout, *args):
670
+ q, k, v, out, softmax_lse, rng_state = ctx.saved_tensors
671
+ dq = torch.empty_like(q)
672
+ kv_shape = k.shape[:-2] + (2, *k.shape[-2:])
673
+ dkv = torch.empty(kv_shape, dtype=k.dtype, device=k.device)
674
+ head_size_og = dout.size(3)
675
+ dout_padded = dout
676
+ if head_size_og % 8 != 0:
677
+ dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
678
+ _wrapped_flash_attn_backward(
679
+ dout_padded,
680
+ q,
681
+ k,
682
+ v,
683
+ out,
684
+ softmax_lse,
685
+ dq,
686
+ dkv[:, :, 0],
687
+ dkv[:, :, 1],
688
+ ctx.dropout_p,
689
+ ctx.softmax_scale,
690
+ ctx.causal,
691
+ ctx.window_size[0],
692
+ ctx.window_size[1],
693
+ ctx.softcap,
694
+ ctx.alibi_slopes,
695
+ ctx.deterministic,
696
+ rng_state=rng_state,
697
+ )
698
+ dq = dq[..., : dout.shape[-1]] # We could have padded the head dimension
699
+ dkv = dkv[..., : dout.shape[-1]]
700
+ return dq, dkv, None, None, None, None, None, None, None, None, None
701
+
702
+
703
+ class FlashAttnVarlenKVPackedFunc(torch.autograd.Function):
704
+ @staticmethod
705
+ def forward(
706
+ ctx,
707
+ q,
708
+ kv,
709
+ cu_seqlens_q,
710
+ cu_seqlens_k,
711
+ max_seqlen_q,
712
+ max_seqlen_k,
713
+ dropout_p,
714
+ softmax_scale,
715
+ causal,
716
+ window_size,
717
+ softcap,
718
+ alibi_slopes,
719
+ deterministic,
720
+ return_softmax,
721
+ is_grad_enabled,
722
+ ):
723
+ is_grad = is_grad_enabled and any(
724
+ x.requires_grad for x in [q, kv]
725
+ )
726
+ if softmax_scale is None:
727
+ softmax_scale = q.shape[-1] ** (-0.5)
728
+ k, v = kv[:, 0].detach(), kv[:, 1].detach()
729
+ head_size_og = q.size(2)
730
+ if head_size_og % 8 != 0:
731
+ q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
732
+ k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
733
+ v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
734
+ out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_varlen_forward(
735
+ q,
736
+ k,
737
+ v,
738
+ cu_seqlens_q,
739
+ cu_seqlens_k,
740
+ max_seqlen_q,
741
+ max_seqlen_k,
742
+ dropout_p,
743
+ softmax_scale,
744
+ causal=causal,
745
+ window_size_left=window_size[0],
746
+ window_size_right=window_size[1],
747
+ softcap=softcap,
748
+ alibi_slopes=alibi_slopes,
749
+ return_softmax=return_softmax and dropout_p > 0,
750
+ block_table=None,
751
+ )
752
+ if is_grad:
753
+ ctx.save_for_backward(
754
+ q, k, v, out_padded, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state
755
+ )
756
+ ctx.dropout_p = dropout_p
757
+ ctx.max_seqlen_q = max_seqlen_q
758
+ ctx.max_seqlen_k = max_seqlen_k
759
+ ctx.softmax_scale = softmax_scale
760
+ ctx.causal = causal
761
+ ctx.window_size = window_size
762
+ ctx.softcap = softcap
763
+ ctx.alibi_slopes = alibi_slopes
764
+ ctx.deterministic = deterministic
765
+ out = out_padded[..., :head_size_og]
766
+ return out if not return_softmax else (out, softmax_lse, S_dmask)
767
+
768
+ @staticmethod
769
+ def backward(ctx, dout, *args):
770
+ q, k, v, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state = ctx.saved_tensors
771
+ dq = torch.empty_like(q)
772
+ kv_shape = k.shape[:-2] + (2, *k.shape[-2:])
773
+ dkv = torch.empty(kv_shape, dtype=k.dtype, device=k.device)
774
+ head_size_og = dout.size(2)
775
+ dout_padded = dout
776
+ if head_size_og % 8 != 0:
777
+ dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
778
+ _wrapped_flash_attn_varlen_backward(
779
+ dout_padded,
780
+ q,
781
+ k,
782
+ v,
783
+ out,
784
+ softmax_lse,
785
+ dq,
786
+ dkv[:, 0],
787
+ dkv[:, 1],
788
+ cu_seqlens_q,
789
+ cu_seqlens_k,
790
+ ctx.max_seqlen_q,
791
+ ctx.max_seqlen_k,
792
+ ctx.dropout_p,
793
+ ctx.softmax_scale,
794
+ ctx.causal,
795
+ ctx.window_size[0],
796
+ ctx.window_size[1],
797
+ ctx.softcap,
798
+ ctx.alibi_slopes,
799
+ ctx.deterministic,
800
+ rng_state=rng_state,
801
+ )
802
+ dq = dq[..., : dout.shape[-1]] # We could have padded the head dimension
803
+ dkv = dkv[..., : dout.shape[-1]]
804
+ return dq, dkv, None, None, None, None, None, None, None, None, None, None, None, None, None
805
+
806
+
807
+ class FlashAttnFunc(torch.autograd.Function):
808
+ @staticmethod
809
+ def forward(
810
+ ctx,
811
+ q,
812
+ k,
813
+ v,
814
+ dropout_p,
815
+ softmax_scale,
816
+ causal,
817
+ window_size,
818
+ softcap,
819
+ alibi_slopes,
820
+ deterministic,
821
+ return_softmax,
822
+ is_grad_enabled,
823
+ ):
824
+ is_grad = is_grad_enabled and any(
825
+ x.requires_grad for x in [q, k, v]
826
+ )
827
+ if softmax_scale is None:
828
+ softmax_scale = q.shape[-1] ** (-0.5)
829
+ head_size_og = q.size(3)
830
+ if head_size_og % 8 != 0:
831
+ q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
832
+ k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
833
+ v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
834
+ out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_forward(
835
+ q,
836
+ k,
837
+ v,
838
+ dropout_p,
839
+ softmax_scale,
840
+ causal=causal,
841
+ window_size_left=window_size[0],
842
+ window_size_right=window_size[1],
843
+ softcap=softcap,
844
+ alibi_slopes=alibi_slopes,
845
+ return_softmax=return_softmax and dropout_p > 0,
846
+ )
847
+ if is_grad:
848
+ ctx.save_for_backward(q, k, v, out_padded, softmax_lse, rng_state)
849
+ ctx.dropout_p = dropout_p
850
+ ctx.softmax_scale = softmax_scale
851
+ ctx.causal = causal
852
+ ctx.window_size = window_size
853
+ ctx.softcap = softcap
854
+ ctx.alibi_slopes = alibi_slopes
855
+ ctx.deterministic = deterministic
856
+ out = out_padded[..., :head_size_og]
857
+ return out if not return_softmax else (out, softmax_lse, S_dmask)
858
+
859
+ @staticmethod
860
+ def backward(ctx, dout, *args):
861
+ q, k, v, out, softmax_lse, rng_state = ctx.saved_tensors
862
+ dq, dk, dv = torch.empty_like(q), torch.empty_like(k), torch.empty_like(v)
863
+ head_size_og = dout.size(3)
864
+ dout_padded = dout
865
+ if head_size_og % 8 != 0:
866
+ dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
867
+ _wrapped_flash_attn_backward(
868
+ dout_padded,
869
+ q,
870
+ k,
871
+ v,
872
+ out,
873
+ softmax_lse,
874
+ dq,
875
+ dk,
876
+ dv,
877
+ ctx.dropout_p,
878
+ ctx.softmax_scale,
879
+ ctx.causal,
880
+ ctx.window_size[0],
881
+ ctx.window_size[1],
882
+ ctx.softcap,
883
+ ctx.alibi_slopes,
884
+ ctx.deterministic,
885
+ rng_state=rng_state,
886
+ )
887
+ dq = dq[..., : dout.shape[-1]] # We could have padded the head dimension
888
+ dk = dk[..., : dout.shape[-1]]
889
+ dv = dv[..., : dout.shape[-1]]
890
+ return dq, dk, dv, None, None, None, None, None, None, None, None, None
891
+
892
+
893
+ class FlashAttnVarlenFunc(torch.autograd.Function):
894
+ @staticmethod
895
+ def forward(
896
+ ctx,
897
+ q,
898
+ k,
899
+ v,
900
+ cu_seqlens_q,
901
+ cu_seqlens_k,
902
+ max_seqlen_q,
903
+ max_seqlen_k,
904
+ dropout_p,
905
+ softmax_scale,
906
+ causal,
907
+ window_size,
908
+ softcap,
909
+ alibi_slopes,
910
+ deterministic,
911
+ return_softmax,
912
+ block_table,
913
+ is_grad_enabled,
914
+ ):
915
+ is_grad = is_grad_enabled and any(
916
+ x.requires_grad for x in [q, k, v]
917
+ )
918
+ if softmax_scale is None:
919
+ softmax_scale = q.shape[-1] ** (-0.5)
920
+ head_size_og = q.size(2)
921
+ if head_size_og % 8 != 0:
922
+ q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
923
+ k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
924
+ v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
925
+ out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_varlen_forward(
926
+ q,
927
+ k,
928
+ v,
929
+ cu_seqlens_q,
930
+ cu_seqlens_k,
931
+ max_seqlen_q,
932
+ max_seqlen_k,
933
+ dropout_p,
934
+ softmax_scale,
935
+ causal=causal,
936
+ window_size_left=window_size[0],
937
+ window_size_right=window_size[1],
938
+ softcap=softcap,
939
+ alibi_slopes=alibi_slopes,
940
+ return_softmax=return_softmax and dropout_p > 0,
941
+ block_table=block_table,
942
+ )
943
+ if is_grad:
944
+ ctx.save_for_backward(
945
+ q, k, v, out_padded, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state
946
+ )
947
+ ctx.dropout_p = dropout_p
948
+ ctx.max_seqlen_q = max_seqlen_q
949
+ ctx.max_seqlen_k = max_seqlen_k
950
+ ctx.softmax_scale = softmax_scale
951
+ ctx.causal = causal
952
+ ctx.window_size = window_size
953
+ ctx.softcap = softcap
954
+ ctx.alibi_slopes = alibi_slopes
955
+ ctx.deterministic = deterministic
956
+
957
+ out = out_padded[..., :head_size_og]
958
+ return out if not return_softmax else (out, softmax_lse, S_dmask)
959
+
960
+ @staticmethod
961
+ def backward(ctx, dout, *args):
962
+ q, k, v, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state = ctx.saved_tensors
963
+ dq, dk, dv = torch.empty_like(q), torch.empty_like(k), torch.empty_like(v)
964
+ head_size_og = dout.size(2)
965
+ dout_padded = dout
966
+ if head_size_og % 8 != 0:
967
+ dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
968
+ _wrapped_flash_attn_varlen_backward(
969
+ dout_padded,
970
+ q,
971
+ k,
972
+ v,
973
+ out,
974
+ softmax_lse,
975
+ dq,
976
+ dk,
977
+ dv,
978
+ cu_seqlens_q,
979
+ cu_seqlens_k,
980
+ ctx.max_seqlen_q,
981
+ ctx.max_seqlen_k,
982
+ ctx.dropout_p,
983
+ ctx.softmax_scale,
984
+ ctx.causal,
985
+ ctx.window_size[0],
986
+ ctx.window_size[1],
987
+ ctx.softcap,
988
+ ctx.alibi_slopes,
989
+ ctx.deterministic,
990
+ rng_state=rng_state,
991
+ )
992
+ dq = dq[..., : dout.shape[-1]] # We could have padded the head dimension
993
+ dk = dk[..., : dout.shape[-1]]
994
+ dv = dv[..., : dout.shape[-1]]
995
+ return dq, dk, dv, None, None, None, None, None, None, None, None, None, None, None, None, None, None
996
+
997
+
998
+ def flash_attn_qkvpacked_func(
999
+ qkv,
1000
+ dropout_p=0.0,
1001
+ softmax_scale=None,
1002
+ causal=False,
1003
+ window_size=(-1, -1), # -1 means infinite context window
1004
+ softcap=0.0, # <=0.0 means deactivate
1005
+ alibi_slopes=None,
1006
+ deterministic=False,
1007
+ return_attn_probs=False,
1008
+ ):
1009
+ """dropout_p should be set to 0.0 during evaluation
1010
+ If Q, K, V are already stacked into 1 tensor, this function will be faster than
1011
+ calling flash_attn_func on Q, K, V since the backward pass avoids explicit concatenation
1012
+ of the gradients of Q, K, V.
1013
+ For multi-query and grouped-query attention (MQA/GQA), please see
1014
+ flash_attn_kvpacked_func and flash_attn_func.
1015
+
1016
+ If window_size != (-1, -1), implements sliding window local attention. Query at position i
1017
+ will only attend to keys between [i - window_size[0], i + window_size[1]] inclusive.
1018
+
1019
+ Arguments:
1020
+ qkv: (batch_size, seqlen, 3, nheads, headdim)
1021
+ dropout_p: float. Dropout probability.
1022
+ softmax_scale: float. The scaling of QK^T before applying softmax.
1023
+ Default to 1 / sqrt(headdim).
1024
+ causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
1025
+ window_size: (left, right). If not (-1, -1), implements sliding window local attention.
1026
+ softcap: float. Anything > 0 activates softcapping attention.
1027
+ alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of (-alibi_slope * |i - j|) is added to
1028
+ the attention score of query i and key j.
1029
+ deterministic: bool. Whether to use the deterministic implementation of the backward pass,
1030
+ which is slightly slower and uses more memory. The forward pass is always deterministic.
1031
+ return_attn_probs: bool. Whether to return the attention probabilities. This option is for
1032
+ testing only. The returned probabilities are not guaranteed to be correct
1033
+ (they might not have the right scaling).
1034
+ Return:
1035
+ out: (batch_size, seqlen, nheads, headdim).
1036
+ softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
1037
+ logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
1038
+ normalization factor).
1039
+ S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
1040
+ The output of softmax (possibly with different scaling). It also encodes the dropout
1041
+ pattern (negative means that location was dropped, nonnegative means it was kept).
1042
+ """
1043
+ return FlashAttnQKVPackedFunc.apply(
1044
+ qkv,
1045
+ dropout_p,
1046
+ softmax_scale,
1047
+ causal,
1048
+ window_size,
1049
+ softcap,
1050
+ alibi_slopes,
1051
+ deterministic,
1052
+ return_attn_probs,
1053
+ torch.is_grad_enabled(),
1054
+ )
1055
+
1056
+
1057
+ def flash_attn_kvpacked_func(
1058
+ q,
1059
+ kv,
1060
+ dropout_p=0.0,
1061
+ softmax_scale=None,
1062
+ causal=False,
1063
+ window_size=(-1, -1), # -1 means infinite context window
1064
+ softcap=0.0, # 0.0 means deactivated
1065
+ alibi_slopes=None,
1066
+ deterministic=False,
1067
+ return_attn_probs=False,
1068
+ ):
1069
+ """dropout_p should be set to 0.0 during evaluation
1070
+ If K, V are already stacked into 1 tensor, this function will be faster than
1071
+ calling flash_attn_func on Q, K, V since the backward pass avoids explicit concatenation
1072
+ of the gradients of K, V.
1073
+ Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
1074
+ than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
1075
+ For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
1076
+ 0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
1077
+
1078
+ If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
1079
+ For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
1080
+ 1 1 1 1 0
1081
+ 1 1 1 1 1
1082
+ If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
1083
+ 0 0
1084
+ 0 0
1085
+ 0 0
1086
+ 1 0
1087
+ 1 1
1088
+ If the row of the mask is all zero, the output will be zero.
1089
+
1090
+ If window_size != (-1, -1), implements sliding window local attention. Query at position i
1091
+ will only attend to keys between
1092
+ [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
1093
+
1094
+ Arguments:
1095
+ q: (batch_size, seqlen, nheads, headdim)
1096
+ kv: (batch_size, seqlen, 2, nheads_k, headdim)
1097
+ dropout_p: float. Dropout probability.
1098
+ softmax_scale: float. The scaling of QK^T before applying softmax.
1099
+ Default to 1 / sqrt(headdim).
1100
+ causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
1101
+ window_size: (left, right). If not (-1, -1), implements sliding window local attention.
1102
+ softcap: float. Anything > 0 activates softcapping attention.
1103
+ alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
1104
+ (-alibi_slope * |i + seqlen_k - seqlen_q - j|)
1105
+ is added to the attention score of query i and key j.
1106
+ deterministic: bool. Whether to use the deterministic implementation of the backward pass,
1107
+ which is slightly slower and uses more memory. The forward pass is always deterministic.
1108
+ return_attn_probs: bool. Whether to return the attention probabilities. This option is for
1109
+ testing only. The returned probabilities are not guaranteed to be correct
1110
+ (they might not have the right scaling).
1111
+ Return:
1112
+ out: (batch_size, seqlen, nheads, headdim).
1113
+ softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
1114
+ logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
1115
+ normalization factor).
1116
+ S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
1117
+ The output of softmax (possibly with different scaling). It also encodes the dropout
1118
+ pattern (negative means that location was dropped, nonnegative means it was kept).
1119
+ """
1120
+ return FlashAttnKVPackedFunc.apply(
1121
+ q,
1122
+ kv,
1123
+ dropout_p,
1124
+ softmax_scale,
1125
+ causal,
1126
+ window_size,
1127
+ softcap,
1128
+ alibi_slopes,
1129
+ deterministic,
1130
+ return_attn_probs,
1131
+ torch.is_grad_enabled(),
1132
+ )
1133
+
1134
+
1135
+ def flash_attn_func(
1136
+ q,
1137
+ k,
1138
+ v,
1139
+ dropout_p=0.0,
1140
+ softmax_scale=None,
1141
+ causal=False,
1142
+ window_size=(-1, -1), # -1 means infinite context window
1143
+ softcap=0.0, # 0.0 means deactivated
1144
+ alibi_slopes=None,
1145
+ deterministic=False,
1146
+ return_attn_probs=False,
1147
+ ):
1148
+ """dropout_p should be set to 0.0 during evaluation
1149
+ Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
1150
+ than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
1151
+ For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
1152
+ 0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
1153
+
1154
+ If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
1155
+ For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
1156
+ 1 1 1 1 0
1157
+ 1 1 1 1 1
1158
+ If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
1159
+ 0 0
1160
+ 0 0
1161
+ 0 0
1162
+ 1 0
1163
+ 1 1
1164
+ If the row of the mask is all zero, the output will be zero.
1165
+
1166
+ If window_size != (-1, -1), implements sliding window local attention. Query at position i
1167
+ will only attend to keys between
1168
+ [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
1169
+
1170
+ Arguments:
1171
+ q: (batch_size, seqlen, nheads, headdim)
1172
+ k: (batch_size, seqlen, nheads_k, headdim)
1173
+ v: (batch_size, seqlen, nheads_k, headdim)
1174
+ dropout_p: float. Dropout probability.
1175
+ softmax_scale: float. The scaling of QK^T before applying softmax.
1176
+ Default to 1 / sqrt(headdim).
1177
+ causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
1178
+ window_size: (left, right). If not (-1, -1), implements sliding window local attention.
1179
+ alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
1180
+ (-alibi_slope * |i + seqlen_k - seqlen_q - j|)
1181
+ is added to the attention score of query i and key j.
1182
+ deterministic: bool. Whether to use the deterministic implementation of the backward pass,
1183
+ which is slightly slower and uses more memory. The forward pass is always deterministic.
1184
+ return_attn_probs: bool. Whether to return the attention probabilities. This option is for
1185
+ testing only. The returned probabilities are not guaranteed to be correct
1186
+ (they might not have the right scaling).
1187
+ Return:
1188
+ out: (batch_size, seqlen, nheads, headdim).
1189
+ softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
1190
+ logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
1191
+ normalization factor).
1192
+ S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
1193
+ The output of softmax (possibly with different scaling). It also encodes the dropout
1194
+ pattern (negative means that location was dropped, nonnegative means it was kept).
1195
+ """
1196
+ return FlashAttnFunc.apply(
1197
+ q,
1198
+ k,
1199
+ v,
1200
+ dropout_p,
1201
+ softmax_scale,
1202
+ causal,
1203
+ window_size,
1204
+ softcap,
1205
+ alibi_slopes,
1206
+ deterministic,
1207
+ return_attn_probs,
1208
+ torch.is_grad_enabled(),
1209
+ )
1210
+
1211
+
1212
+ def flash_attn_varlen_qkvpacked_func(
1213
+ qkv,
1214
+ cu_seqlens,
1215
+ max_seqlen,
1216
+ dropout_p=0.0,
1217
+ softmax_scale=None,
1218
+ causal=False,
1219
+ window_size=(-1, -1), # -1 means infinite context window
1220
+ softcap=0.0, # 0.0 means deactivated
1221
+ alibi_slopes=None,
1222
+ deterministic=False,
1223
+ return_attn_probs=False,
1224
+ ):
1225
+ """dropout_p should be set to 0.0 during evaluation
1226
+ If Q, K, V are already stacked into 1 tensor, this function will be faster than
1227
+ calling flash_attn_varlen_func on Q, K, V since the backward pass avoids explicit concatenation
1228
+ of the gradients of Q, K, V.
1229
+ For multi-query and grouped-query attention (MQA/GQA), please see
1230
+ flash_attn_varlen_kvpacked_func and flash_attn_varlen_func.
1231
+
1232
+ If window_size != (-1, -1), implements sliding window local attention. Query at position i
1233
+ will only attend to keys between [i - window_size[0], i + window_size[1]] inclusive.
1234
+
1235
+ Arguments:
1236
+ qkv: (total, 3, nheads, headdim), where total = total number of tokens in the batch.
1237
+ cu_seqlens: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
1238
+ of the sequences in the batch, used to index into qkv.
1239
+ max_seqlen: int. Maximum sequence length in the batch.
1240
+ dropout_p: float. Dropout probability.
1241
+ softmax_scale: float. The scaling of QK^T before applying softmax.
1242
+ Default to 1 / sqrt(headdim).
1243
+ causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
1244
+ window_size: (left, right). If not (-1, -1), implements sliding window local attention.
1245
+ softcap: float. Anything > 0 activates softcapping attention.
1246
+ alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of (-alibi_slope * |i - j|)
1247
+ is added to the attention score of query i and key j.
1248
+ deterministic: bool. Whether to use the deterministic implementation of the backward pass,
1249
+ which is slightly slower and uses more memory. The forward pass is always deterministic.
1250
+ return_attn_probs: bool. Whether to return the attention probabilities. This option is for
1251
+ testing only. The returned probabilities are not guaranteed to be correct
1252
+ (they might not have the right scaling).
1253
+ Return:
1254
+ out: (total, nheads, headdim).
1255
+ softmax_lse [optional, if return_attn_probs=True]: (nheads, total_q_seqlen). The
1256
+ logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
1257
+ normalization factor).
1258
+ S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
1259
+ The output of softmax (possibly with different scaling). It also encodes the dropout
1260
+ pattern (negative means that location was dropped, nonnegative means it was kept).
1261
+ """
1262
+ return FlashAttnVarlenQKVPackedFunc.apply(
1263
+ qkv,
1264
+ cu_seqlens,
1265
+ max_seqlen,
1266
+ dropout_p,
1267
+ softmax_scale,
1268
+ causal,
1269
+ window_size,
1270
+ softcap,
1271
+ alibi_slopes,
1272
+ deterministic,
1273
+ return_attn_probs,
1274
+ torch.is_grad_enabled(),
1275
+ )
1276
+
1277
+
1278
+ def flash_attn_varlen_kvpacked_func(
1279
+ q,
1280
+ kv,
1281
+ cu_seqlens_q,
1282
+ cu_seqlens_k,
1283
+ max_seqlen_q,
1284
+ max_seqlen_k,
1285
+ dropout_p=0.0,
1286
+ softmax_scale=None,
1287
+ causal=False,
1288
+ window_size=(-1, -1), # -1 means infinite context window
1289
+ softcap=0.0, # 0.0 means deactivated
1290
+ alibi_slopes=None,
1291
+ deterministic=False,
1292
+ return_attn_probs=False,
1293
+ ):
1294
+ """dropout_p should be set to 0.0 during evaluation
1295
+ If K, V are already stacked into 1 tensor, this function will be faster than
1296
+ calling flash_attn_func on Q, K, V since the backward pass avoids explicit concatenation
1297
+ of the gradients of K, V.
1298
+ Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
1299
+ than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
1300
+ For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
1301
+ 0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
1302
+
1303
+ If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
1304
+ For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
1305
+ 1 1 1 1 0
1306
+ 1 1 1 1 1
1307
+ If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
1308
+ 0 0
1309
+ 0 0
1310
+ 0 0
1311
+ 1 0
1312
+ 1 1
1313
+ If the row of the mask is all zero, the output will be zero.
1314
+
1315
+ If window_size != (-1, -1), implements sliding window local attention. Query at position i
1316
+ will only attend to keys between
1317
+ [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
1318
+
1319
+ Arguments:
1320
+ q: (total_q, nheads, headdim), where total_q = total number of query tokens in the batch.
1321
+ kv: (total_k, 2, nheads_k, headdim), where total_k = total number of key tokens in the batch.
1322
+ cu_seqlens_q: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
1323
+ of the sequences in the batch, used to index into q.
1324
+ cu_seqlens_k: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
1325
+ of the sequences in the batch, used to index into kv.
1326
+ max_seqlen_q: int. Maximum query sequence length in the batch.
1327
+ max_seqlen_k: int. Maximum key sequence length in the batch.
1328
+ dropout_p: float. Dropout probability.
1329
+ softmax_scale: float. The scaling of QK^T before applying softmax.
1330
+ Default to 1 / sqrt(headdim).
1331
+ causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
1332
+ window_size: (left, right). If not (-1, -1), implements sliding window local attention.
1333
+ softcap: float. Anything > 0 activates softcapping attention.
1334
+ alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
1335
+ (-alibi_slope * |i + seqlen_k - seqlen_q - j|)
1336
+ is added to the attention score of query i and key j.
1337
+ deterministic: bool. Whether to use the deterministic implementation of the backward pass,
1338
+ which is slightly slower and uses more memory. The forward pass is always deterministic.
1339
+ return_attn_probs: bool. Whether to return the attention probabilities. This option is for
1340
+ testing only. The returned probabilities are not guaranteed to be correct
1341
+ (they might not have the right scaling).
1342
+ Return:
1343
+ out: (total, nheads, headdim).
1344
+ softmax_lse [optional, if return_attn_probs=True]: (nheads, total_q_seqlen). The
1345
+ logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
1346
+ normalization factor).
1347
+ S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
1348
+ The output of softmax (possibly with different scaling). It also encodes the dropout
1349
+ pattern (negative means that location was dropped, nonnegative means it was kept).
1350
+ """
1351
+ return FlashAttnVarlenKVPackedFunc.apply(
1352
+ q,
1353
+ kv,
1354
+ cu_seqlens_q,
1355
+ cu_seqlens_k,
1356
+ max_seqlen_q,
1357
+ max_seqlen_k,
1358
+ dropout_p,
1359
+ softmax_scale,
1360
+ causal,
1361
+ window_size,
1362
+ softcap,
1363
+ alibi_slopes,
1364
+ deterministic,
1365
+ return_attn_probs,
1366
+ torch.is_grad_enabled(),
1367
+ )
1368
+
1369
+
1370
+ def flash_attn_varlen_func(
1371
+ q,
1372
+ k,
1373
+ v,
1374
+ cu_seqlens_q,
1375
+ cu_seqlens_k,
1376
+ max_seqlen_q,
1377
+ max_seqlen_k,
1378
+ dropout_p=0.0,
1379
+ softmax_scale=None,
1380
+ causal=False,
1381
+ window_size=(-1, -1), # -1 means infinite context window
1382
+ softcap=0.0, # 0.0 means deactivated
1383
+ alibi_slopes=None,
1384
+ deterministic=False,
1385
+ return_attn_probs=False,
1386
+ block_table=None,
1387
+ ):
1388
+ """dropout_p should be set to 0.0 during evaluation
1389
+ Supports multi-query and grouped-query attention (MQA/GQA) by passing in K, V with fewer heads
1390
+ than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
1391
+ For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
1392
+ 0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
1393
+
1394
+ If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
1395
+ For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
1396
+ 1 1 1 1 0
1397
+ 1 1 1 1 1
1398
+ If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
1399
+ 0 0
1400
+ 0 0
1401
+ 0 0
1402
+ 1 0
1403
+ 1 1
1404
+ If the row of the mask is all zero, the output will be zero.
1405
+
1406
+ If window_size != (-1, -1), implements sliding window local attention. Query at position i
1407
+ will only attend to keys between
1408
+ [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
1409
+
1410
+ Arguments:
1411
+ q: (total_q, nheads, headdim), where total_q = total number of query tokens in the batch.
1412
+ k: (total_k, nheads_k, headdim), where total_k = total number of key tokens in the batch.
1413
+ v: (total_k, nheads_k, headdim), where total_k = total number of key tokens in the batch.
1414
+ cu_seqlens_q: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
1415
+ of the sequences in the batch, used to index into q.
1416
+ cu_seqlens_k: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
1417
+ of the sequences in the batch, used to index into kv.
1418
+ max_seqlen_q: int. Maximum query sequence length in the batch.
1419
+ max_seqlen_k: int. Maximum key sequence length in the batch.
1420
+ dropout_p: float. Dropout probability.
1421
+ softmax_scale: float. The scaling of QK^T before applying softmax.
1422
+ Default to 1 / sqrt(headdim).
1423
+ causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
1424
+ window_size: (left, right). If not (-1, -1), implements sliding window local attention.
1425
+ softcap: float. Anything > 0 activates softcapping attention.
1426
+ alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
1427
+ (-alibi_slope * |i + seqlen_k - seqlen_q - j|)
1428
+ is added to the attention score of query i and key j.
1429
+ deterministic: bool. Whether to use the deterministic implementation of the backward pass,
1430
+ which is slightly slower and uses more memory. The forward pass is always deterministic.
1431
+ return_attn_probs: bool. Whether to return the attention probabilities. This option is for
1432
+ testing only. The returned probabilities are not guaranteed to be correct
1433
+ (they might not have the right scaling).
1434
+ Return:
1435
+ out: (total, nheads, headdim).
1436
+ softmax_lse [optional, if return_attn_probs=True]: (nheads, total_q_seqlen). The
1437
+ logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
1438
+ normalization factor).
1439
+ S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
1440
+ The output of softmax (possibly with different scaling). It also encodes the dropout
1441
+ pattern (negative means that location was dropped, nonnegative means it was kept).
1442
+ """
1443
+ return FlashAttnVarlenFunc.apply(
1444
+ q,
1445
+ k,
1446
+ v,
1447
+ cu_seqlens_q,
1448
+ cu_seqlens_k,
1449
+ max_seqlen_q,
1450
+ max_seqlen_k,
1451
+ dropout_p,
1452
+ softmax_scale,
1453
+ causal,
1454
+ window_size,
1455
+ softcap,
1456
+ alibi_slopes,
1457
+ deterministic,
1458
+ return_attn_probs,
1459
+ block_table,
1460
+ torch.is_grad_enabled(),
1461
+ )
1462
+
1463
+
1464
+ def flash_attn_with_kvcache(
1465
+ q,
1466
+ k_cache,
1467
+ v_cache,
1468
+ k=None,
1469
+ v=None,
1470
+ rotary_cos=None,
1471
+ rotary_sin=None,
1472
+ cache_seqlens: Optional[Union[(int, torch.Tensor)]] = None,
1473
+ cache_batch_idx: Optional[torch.Tensor] = None,
1474
+ cache_leftpad: Optional[torch.Tensor] = None,
1475
+ block_table: Optional[torch.Tensor] = None,
1476
+ softmax_scale=None,
1477
+ causal=False,
1478
+ window_size=(-1, -1), # -1 means infinite context window
1479
+ softcap=0.0, # 0.0 means deactivated
1480
+ rotary_interleaved=True,
1481
+ alibi_slopes=None,
1482
+ num_splits=0,
1483
+ return_softmax_lse=False,
1484
+ ):
1485
+ """
1486
+ If k and v are not None, k_cache and v_cache will be updated *inplace* with the new values from
1487
+ k and v. This is useful for incremental decoding: you can pass in the cached keys/values from
1488
+ the previous step, and update them with the new keys/values from the current step, and do
1489
+ attention with the updated cache, all in 1 kernel.
1490
+
1491
+ If you pass in k / v, you must make sure that the cache is large enough to hold the new values.
1492
+ For example, the KV cache could be pre-allocated with the max sequence length, and you can use
1493
+ cache_seqlens to keep track of the current sequence lengths of each sequence in the batch.
1494
+
1495
+ Also apply rotary embedding if rotary_cos and rotary_sin are passed in. The key @k will be
1496
+ rotated by rotary_cos and rotary_sin at indices cache_seqlens, cache_seqlens + 1, etc.
1497
+ If causal or local (i.e., window_size != (-1, -1)), the query @q will be rotated by rotary_cos
1498
+ and rotary_sin at indices cache_seqlens, cache_seqlens + 1, etc.
1499
+ If not causal and not local, the query @q will be rotated by rotary_cos and rotary_sin at
1500
+ indices cache_seqlens only (i.e. we consider all tokens in @q to be at position cache_seqlens).
1501
+
1502
+ See tests/test_flash_attn.py::test_flash_attn_kvcache for examples of how to use this function.
1503
+
1504
+ Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
1505
+ than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
1506
+ For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
1507
+ 0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
1508
+
1509
+ If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
1510
+ For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
1511
+ 1 1 1 1 0
1512
+ 1 1 1 1 1
1513
+ If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
1514
+ 0 0
1515
+ 0 0
1516
+ 0 0
1517
+ 1 0
1518
+ 1 1
1519
+ If the row of the mask is all zero, the output will be zero.
1520
+
1521
+ If window_size != (-1, -1), implements sliding window local attention. Query at position i
1522
+ will only attend to keys between
1523
+ [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
1524
+
1525
+ Note: Does not support backward pass.
1526
+
1527
+ Arguments:
1528
+ q: (batch_size, seqlen, nheads, headdim)
1529
+ k_cache: (batch_size_cache, seqlen_cache, nheads_k, headdim) if there's no block_table,
1530
+ or (num_blocks, page_block_size, nheads_k, headdim) if there's a block_table (i.e. paged KV cache)
1531
+ page_block_size must be a multiple of 256.
1532
+ v_cache: (batch_size_cache, seqlen_cache, nheads_k, headdim) if there's no block_table,
1533
+ or (num_blocks, page_block_size, nheads_k, headdim) if there's a block_table (i.e. paged KV cache)
1534
+ k [optional]: (batch_size, seqlen_new, nheads_k, headdim). If not None, we concatenate
1535
+ k with k_cache, starting at the indices specified by cache_seqlens.
1536
+ v [optional]: (batch_size, seqlen_new, nheads_k, headdim). Similar to k.
1537
+ rotary_cos [optional]: (seqlen_ro, rotary_dim / 2). If not None, we apply rotary embedding
1538
+ to k and q. Only applicable if k and v are passed in. rotary_dim must be divisible by 16.
1539
+ rotary_sin [optional]: (seqlen_ro, rotary_dim / 2). Similar to rotary_cos.
1540
+ cache_seqlens: int, or (batch_size,), dtype torch.int32. The sequence lengths of the
1541
+ KV cache.
1542
+ cache_batch_idx: (batch_size,), dtype torch.int32. The indices used to index into the KV cache.
1543
+ If None, we assume that the batch indices are [0, 1, 2, ..., batch_size - 1].
1544
+ If the indices are not distinct, and k and v are provided, the values updated in the cache
1545
+ might come from any of the duplicate indices.
1546
+ cache_leftpad: (batch_size,), dtype torch.int32. The index that the KV cache starts. If None, assume 0.
1547
+ block_table [optional]: (batch_size, max_num_blocks_per_seq), dtype torch.int32.
1548
+ softmax_scale: float. The scaling of QK^T before applying softmax.
1549
+ Default to 1 / sqrt(headdim).
1550
+ causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
1551
+ window_size: (left, right). If not (-1, -1), implements sliding window local attention.
1552
+ softcap: float. Anything > 0 activates softcapping attention.
1553
+ rotary_interleaved: bool. Only applicable if rotary_cos and rotary_sin are passed in.
1554
+ If True, rotary embedding will combine dimensions 0 & 1, 2 & 3, etc. If False,
1555
+ rotary embedding will combine dimensions 0 & rotary_dim / 2, 1 & rotary_dim / 2 + 1
1556
+ (i.e. GPT-NeoX style).
1557
+ alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
1558
+ (-alibi_slope * |i + seqlen_k - seqlen_q - j|)
1559
+ is added to the attention score of query i and key j.
1560
+ num_splits: int. If > 1, split the key/value into this many chunks along the sequence.
1561
+ If num_splits == 1, we don't split the key/value. If num_splits == 0, we use a heuristic
1562
+ to automatically determine the number of splits.
1563
+ Don't change this unless you know what you are doing.
1564
+ return_softmax_lse: bool. Whether to return the logsumexp of the attention scores.
1565
+
1566
+ Return:
1567
+ out: (batch_size, seqlen, nheads, headdim).
1568
+ softmax_lse [optional, if return_softmax_lse=True]: (batch_size, nheads, seqlen). The
1569
+ logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
1570
+ normalization factor).
1571
+ """
1572
+ assert k_cache.stride(-1) == 1, "k_cache must have contiguous last dimension"
1573
+ assert v_cache.stride(-1) == 1, "v_cache must have contiguous last dimension"
1574
+ q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
1575
+ if softmax_scale is None:
1576
+ softmax_scale = q.shape[-1] ** (-0.5)
1577
+ if cache_seqlens is not None and isinstance(cache_seqlens, int):
1578
+ cache_seqlens = torch.full(
1579
+ (k_cache.shape[0],), cache_seqlens, dtype=torch.int32, device=k_cache.device
1580
+ )
1581
+ cache_seqlens = maybe_contiguous(cache_seqlens)
1582
+ cache_batch_idx = maybe_contiguous(cache_batch_idx)
1583
+ block_table = maybe_contiguous(block_table)
1584
+ out, softmax_lse = flash_attn_gpu.fwd_kvcache(
1585
+ q,
1586
+ k_cache,
1587
+ v_cache,
1588
+ k,
1589
+ v,
1590
+ cache_seqlens,
1591
+ rotary_cos,
1592
+ rotary_sin,
1593
+ cache_batch_idx,
1594
+ cache_leftpad,
1595
+ block_table,
1596
+ alibi_slopes,
1597
+ None,
1598
+ softmax_scale,
1599
+ causal,
1600
+ window_size[0],
1601
+ window_size[1],
1602
+ softcap,
1603
+ rotary_interleaved,
1604
+ num_splits,
1605
+ )
1606
+ return (out, softmax_lse) if return_softmax_lse else out
Code/Baselines/flash-attention/flash_attn/flash_attn_triton_og.py ADDED
@@ -0,0 +1,365 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # [2022-10-23] Downloaded from https://github.com/openai/triton/blob/master/python/tutorials/06-fused-attention.py
2
+ # for benchmarking.
3
+ # We fixed a few dtype cast to make it work for bf16
4
+
5
+ """
6
+ Fused Attention
7
+ ===============
8
+ This is a Triton implementation of the Flash Attention algorithm
9
+ (see: Dao et al., https://arxiv.org/pdf/2205.14135v2.pdf; Rabe and Staats https://arxiv.org/pdf/2112.05682v2.pdf)
10
+ """
11
+
12
+ import pytest
13
+ import torch
14
+ import triton
15
+ import triton.language as tl
16
+
17
+
18
+ @triton.jit
19
+ def _fwd_kernel(
20
+ Q,
21
+ K,
22
+ V,
23
+ sm_scale,
24
+ TMP,
25
+ L,
26
+ M, # NOTE: TMP is a scratchpad buffer to workaround a compiler bug
27
+ Out,
28
+ stride_qz,
29
+ stride_qh,
30
+ stride_qm,
31
+ stride_qk,
32
+ stride_kz,
33
+ stride_kh,
34
+ stride_kn,
35
+ stride_kk,
36
+ stride_vz,
37
+ stride_vh,
38
+ stride_vk,
39
+ stride_vn,
40
+ stride_oz,
41
+ stride_oh,
42
+ stride_om,
43
+ stride_on,
44
+ Z,
45
+ H,
46
+ N_CTX,
47
+ BLOCK_M: tl.constexpr,
48
+ BLOCK_DMODEL: tl.constexpr,
49
+ BLOCK_N: tl.constexpr,
50
+ ):
51
+ start_m = tl.program_id(0)
52
+ off_hz = tl.program_id(1)
53
+ # initialize offsets
54
+ offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
55
+ offs_n = tl.arange(0, BLOCK_N)
56
+ offs_d = tl.arange(0, BLOCK_DMODEL)
57
+ off_q = off_hz * stride_qh + offs_m[:, None] * stride_qm + offs_d[None, :] * stride_qk
58
+ off_k = off_hz * stride_qh + offs_n[:, None] * stride_kn + offs_d[None, :] * stride_kk
59
+ off_v = off_hz * stride_qh + offs_n[:, None] * stride_qm + offs_d[None, :] * stride_qk
60
+ # Initialize pointers to Q, K, V
61
+ q_ptrs = Q + off_q
62
+ k_ptrs = K + off_k
63
+ v_ptrs = V + off_v
64
+ # initialize pointer to m and l
65
+ t_ptrs = TMP + off_hz * N_CTX + offs_m
66
+ m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
67
+ l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
68
+ acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
69
+ # load q: it will stay in SRAM throughout
70
+ q = tl.load(q_ptrs)
71
+ # loop over k, v and update accumulator
72
+ for start_n in range(0, (start_m + 1) * BLOCK_M, BLOCK_N):
73
+ start_n = tl.multiple_of(start_n, BLOCK_N)
74
+ # -- compute qk ----
75
+ k = tl.load(k_ptrs + start_n * stride_kn)
76
+ qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
77
+ qk += tl.dot(q, k, trans_b=True)
78
+ qk *= sm_scale
79
+ qk += tl.where(offs_m[:, None] >= (start_n + offs_n[None, :]), 0, float("-inf"))
80
+ # -- compute m_ij, p, l_ij
81
+ m_ij = tl.max(qk, 1)
82
+ p = tl.exp(qk - m_ij[:, None])
83
+ l_ij = tl.sum(p, 1)
84
+ # -- update m_i and l_i
85
+ m_i_new = tl.maximum(m_i, m_ij)
86
+ alpha = tl.exp(m_i - m_i_new)
87
+ beta = tl.exp(m_ij - m_i_new)
88
+ l_i_new = alpha * l_i + beta * l_ij
89
+ # -- update output accumulator --
90
+ # scale p
91
+ p_scale = beta / l_i_new
92
+ p = p * p_scale[:, None]
93
+ # scale acc
94
+ acc_scale = l_i / l_i_new * alpha
95
+ tl.store(t_ptrs, acc_scale)
96
+ acc_scale = tl.load(t_ptrs) # BUG: have to store and immediately load
97
+ acc = acc * acc_scale[:, None]
98
+ # update acc
99
+ v = tl.load(v_ptrs + start_n * stride_vk)
100
+ p = p.to(v.dtype)
101
+ acc += tl.dot(p, v)
102
+ # update m_i and l_i
103
+ l_i = l_i_new
104
+ m_i = m_i_new
105
+ # rematerialize offsets to save registers
106
+ start_m = tl.program_id(0)
107
+ offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
108
+ # write back l and m
109
+ l_ptrs = L + off_hz * N_CTX + offs_m
110
+ m_ptrs = M + off_hz * N_CTX + offs_m
111
+ tl.store(l_ptrs, l_i)
112
+ tl.store(m_ptrs, m_i)
113
+ # initialize pointers to output
114
+ offs_n = tl.arange(0, BLOCK_DMODEL)
115
+ off_o = off_hz * stride_oh + offs_m[:, None] * stride_om + offs_n[None, :] * stride_on
116
+ out_ptrs = Out + off_o
117
+ tl.store(out_ptrs, acc)
118
+
119
+
120
+ @triton.jit
121
+ def _bwd_preprocess(
122
+ Out,
123
+ DO,
124
+ L,
125
+ NewDO,
126
+ Delta,
127
+ BLOCK_M: tl.constexpr,
128
+ D_HEAD: tl.constexpr,
129
+ ):
130
+ off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)
131
+ off_n = tl.arange(0, D_HEAD)
132
+ # load
133
+ o = tl.load(Out + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32)
134
+ do = tl.load(DO + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32)
135
+ denom = tl.load(L + off_m).to(tl.float32)
136
+ # compute
137
+ do = do / denom[:, None]
138
+ delta = tl.sum(o * do, axis=1)
139
+ # write-back
140
+ tl.store(NewDO + off_m[:, None] * D_HEAD + off_n[None, :], do)
141
+ tl.store(Delta + off_m, delta)
142
+
143
+
144
+ @triton.jit
145
+ def _bwd_kernel(
146
+ Q,
147
+ K,
148
+ V,
149
+ sm_scale,
150
+ Out,
151
+ DO,
152
+ DQ,
153
+ DK,
154
+ DV,
155
+ L,
156
+ M,
157
+ D,
158
+ stride_qz,
159
+ stride_qh,
160
+ stride_qm,
161
+ stride_qk,
162
+ stride_kz,
163
+ stride_kh,
164
+ stride_kn,
165
+ stride_kk,
166
+ stride_vz,
167
+ stride_vh,
168
+ stride_vk,
169
+ stride_vn,
170
+ Z,
171
+ H,
172
+ N_CTX,
173
+ num_block,
174
+ BLOCK_M: tl.constexpr,
175
+ BLOCK_DMODEL: tl.constexpr,
176
+ BLOCK_N: tl.constexpr,
177
+ ):
178
+ off_hz = tl.program_id(0)
179
+ off_z = off_hz // H
180
+ off_h = off_hz % H
181
+ # offset pointers for batch/head
182
+ Q += off_z * stride_qz + off_h * stride_qh
183
+ K += off_z * stride_qz + off_h * stride_qh
184
+ V += off_z * stride_qz + off_h * stride_qh
185
+ DO += off_z * stride_qz + off_h * stride_qh
186
+ DQ += off_z * stride_qz + off_h * stride_qh
187
+ DK += off_z * stride_qz + off_h * stride_qh
188
+ DV += off_z * stride_qz + off_h * stride_qh
189
+ for start_n in range(0, num_block):
190
+ lo = start_n * BLOCK_M
191
+ # initialize row/col offsets
192
+ offs_qm = lo + tl.arange(0, BLOCK_M)
193
+ offs_n = start_n * BLOCK_M + tl.arange(0, BLOCK_M)
194
+ offs_m = tl.arange(0, BLOCK_N)
195
+ offs_k = tl.arange(0, BLOCK_DMODEL)
196
+ # initialize pointers to value-like data
197
+ q_ptrs = Q + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk)
198
+ k_ptrs = K + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk)
199
+ v_ptrs = V + (offs_n[:, None] * stride_qm + offs_k[None, :] * stride_qk)
200
+ do_ptrs = DO + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk)
201
+ dq_ptrs = DQ + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk)
202
+ # pointer to row-wise quantities in value-like data
203
+ D_ptrs = D + off_hz * N_CTX
204
+ m_ptrs = M + off_hz * N_CTX
205
+ # initialize dv amd dk
206
+ dv = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
207
+ dk = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
208
+ # k and v stay in SRAM throughout
209
+ k = tl.load(k_ptrs)
210
+ v = tl.load(v_ptrs)
211
+ # loop over rows
212
+ for start_m in range(lo, num_block * BLOCK_M, BLOCK_M):
213
+ offs_m_curr = start_m + offs_m
214
+ # load q, k, v, do on-chip
215
+ q = tl.load(q_ptrs)
216
+ # recompute p = softmax(qk, dim=-1).T
217
+ # NOTE: `do` is pre-divided by `l`; no normalization here
218
+ qk = tl.dot(q, k, trans_b=True)
219
+ qk = tl.where(offs_m_curr[:, None] >= (offs_n[None, :]), qk, float("-inf"))
220
+ m = tl.load(m_ptrs + offs_m_curr)
221
+ p = tl.exp(qk * sm_scale - m[:, None])
222
+ # compute dv
223
+ do = tl.load(do_ptrs)
224
+ dv += tl.dot(p.to(do.dtype), do, trans_a=True)
225
+ # compute dp = dot(v, do)
226
+ Di = tl.load(D_ptrs + offs_m_curr)
227
+ dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) - Di[:, None]
228
+ dp += tl.dot(do, v, trans_b=True)
229
+ # compute ds = p * (dp - delta[:, None])
230
+ ds = p * dp * sm_scale
231
+ # compute dk = dot(ds.T, q)
232
+ dk += tl.dot(ds.to(q.dtype), q, trans_a=True)
233
+ # # compute dq
234
+ dq = tl.load(dq_ptrs, eviction_policy="evict_last")
235
+ dq += tl.dot(ds.to(k.dtype), k)
236
+ tl.store(dq_ptrs, dq, eviction_policy="evict_last")
237
+ # # increment pointers
238
+ dq_ptrs += BLOCK_M * stride_qm
239
+ q_ptrs += BLOCK_M * stride_qm
240
+ do_ptrs += BLOCK_M * stride_qm
241
+ # write-back
242
+ dv_ptrs = DV + (offs_n[:, None] * stride_qm + offs_k[None, :] * stride_qk)
243
+ dk_ptrs = DK + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk)
244
+ tl.store(dv_ptrs, dv)
245
+ tl.store(dk_ptrs, dk)
246
+
247
+
248
+ class _attention(torch.autograd.Function):
249
+ @staticmethod
250
+ def forward(ctx, q, k, v, sm_scale):
251
+ BLOCK = 128
252
+ # shape constraints
253
+ Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1]
254
+ assert Lq == Lk and Lk == Lv
255
+ assert Lk in {16, 32, 64, 128}
256
+ o = torch.empty_like(q)
257
+ grid = (triton.cdiv(q.shape[2], BLOCK), q.shape[0] * q.shape[1])
258
+ tmp = torch.empty(
259
+ (q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32
260
+ )
261
+ L = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
262
+ m = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
263
+ num_warps = 4 if Lk <= 64 else 8
264
+
265
+ _fwd_kernel[grid](
266
+ q,
267
+ k,
268
+ v,
269
+ sm_scale,
270
+ tmp,
271
+ L,
272
+ m,
273
+ o,
274
+ q.stride(0),
275
+ q.stride(1),
276
+ q.stride(2),
277
+ q.stride(3),
278
+ k.stride(0),
279
+ k.stride(1),
280
+ k.stride(2),
281
+ k.stride(3),
282
+ v.stride(0),
283
+ v.stride(1),
284
+ v.stride(2),
285
+ v.stride(3),
286
+ o.stride(0),
287
+ o.stride(1),
288
+ o.stride(2),
289
+ o.stride(3),
290
+ q.shape[0],
291
+ q.shape[1],
292
+ q.shape[2],
293
+ BLOCK_M=BLOCK,
294
+ BLOCK_N=BLOCK,
295
+ BLOCK_DMODEL=Lk,
296
+ num_warps=num_warps,
297
+ num_stages=1,
298
+ )
299
+ ctx.save_for_backward(q, k, v, o, L, m)
300
+ ctx.BLOCK = BLOCK
301
+ ctx.grid = grid
302
+ ctx.sm_scale = sm_scale
303
+ ctx.BLOCK_DMODEL = Lk
304
+ return o
305
+
306
+ @staticmethod
307
+ def backward(ctx, do):
308
+ q, k, v, o, l, m = ctx.saved_tensors
309
+ do = do.contiguous()
310
+ dq = torch.zeros_like(q, dtype=torch.float32)
311
+ dk = torch.empty_like(k)
312
+ dv = torch.empty_like(v)
313
+ do_scaled = torch.empty_like(do)
314
+ delta = torch.empty_like(l)
315
+ _bwd_preprocess[(ctx.grid[0] * ctx.grid[1],)](
316
+ o,
317
+ do,
318
+ l,
319
+ do_scaled,
320
+ delta,
321
+ BLOCK_M=ctx.BLOCK,
322
+ D_HEAD=ctx.BLOCK_DMODEL,
323
+ )
324
+
325
+ # NOTE: kernel currently buggy for other values of `num_warps`
326
+ num_warps = 8
327
+ _bwd_kernel[(ctx.grid[1],)](
328
+ q,
329
+ k,
330
+ v,
331
+ ctx.sm_scale,
332
+ o,
333
+ do_scaled,
334
+ dq,
335
+ dk,
336
+ dv,
337
+ l,
338
+ m,
339
+ delta,
340
+ q.stride(0),
341
+ q.stride(1),
342
+ q.stride(2),
343
+ q.stride(3),
344
+ k.stride(0),
345
+ k.stride(1),
346
+ k.stride(2),
347
+ k.stride(3),
348
+ v.stride(0),
349
+ v.stride(1),
350
+ v.stride(2),
351
+ v.stride(3),
352
+ q.shape[0],
353
+ q.shape[1],
354
+ q.shape[2],
355
+ ctx.grid[0],
356
+ BLOCK_M=ctx.BLOCK,
357
+ BLOCK_N=ctx.BLOCK,
358
+ BLOCK_DMODEL=ctx.BLOCK_DMODEL,
359
+ num_warps=num_warps,
360
+ num_stages=1,
361
+ )
362
+ return dq.to(q.dtype), dk, dv, None
363
+
364
+
365
+ attention = _attention.apply
Code/Baselines/flash-attention/flash_attn/pyproject.toml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ [tool.black]
2
+ line-length = 100
3
+ target-version = 'py39'
4
+ [tool.ruff]
5
+ line-length = 100
6
+ target-version = 'py39'
Code/Baselines/flash-attention/hopper/flash_api.cpp ADDED
@@ -0,0 +1,1716 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2024, Jay Shah, Ganesh Bikshandi, Ying Zhang, Vijay Thakkar, Pradeep Ramani, Tri Dao.
3
+ ******************************************************************************/
4
+
5
+ #include <Python.h>
6
+ #include <torch/nn/functional/padding.h>
7
+ #include <ATen/cuda/CUDAContextLight.h>
8
+ #include <c10/cuda/CUDAGuard.h>
9
+
10
+ #include <cutlass/numeric_types.h>
11
+
12
+ #include "flash.h"
13
+ #include "static_switch.h"
14
+ #include "tile_size.h"
15
+ #include "heuristics.h"
16
+ #include "cuda_check.h"
17
+
18
+
19
+ extern "C" {
20
+ /* Creates a dummy empty _C module that can be imported from Python.
21
+ The import from Python will load the .so consisting of this file
22
+ in this extension, so that the TORCH_LIBRARY static initializers
23
+ below are run. */
24
+ PyObject* PyInit__C(void)
25
+ {
26
+ static struct PyModuleDef module_def = {
27
+ PyModuleDef_HEAD_INIT,
28
+ "_C", /* name of module */
29
+ NULL, /* module documentation, may be NULL */
30
+ -1, /* size of per-interpreter state of the module,
31
+ or -1 if the module keeps state in global variables. */
32
+ NULL, /* methods */
33
+ };
34
+ return PyModule_Create(&module_def);
35
+ }
36
+ }
37
+
38
+ #define CHECK_DEVICE(x) TORCH_CHECK(x.is_cuda(), #x " must be on CUDA")
39
+ #define CHECK_SHAPE(x, ...) TORCH_CHECK(x.sizes() == torch::IntArrayRef({__VA_ARGS__}), #x " must have shape (" #__VA_ARGS__ ")")
40
+ #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
41
+
42
+ void set_params_fprop(Flash_fwd_params &params,
43
+ // sizes
44
+ const size_t b,
45
+ const size_t seqlen_q,
46
+ const size_t seqlen_k,
47
+ const size_t seqlen_q_rounded,
48
+ const size_t seqlen_k_rounded,
49
+ const size_t h,
50
+ const size_t h_k,
51
+ const size_t d,
52
+ const size_t d_rounded,
53
+ // device pointers
54
+ const at::Tensor q,
55
+ const at::Tensor k,
56
+ const at::Tensor v,
57
+ at::Tensor out,
58
+ void *cu_seqlens_q_d,
59
+ void *cu_seqlens_k_d,
60
+ void *seqused_q,
61
+ void *seqused_k,
62
+ void *softmax_lse_d,
63
+ float p_dropout,
64
+ float softmax_scale,
65
+ int window_size_left,
66
+ int window_size_right,
67
+ int attention_chunk,
68
+ const float softcap=0.f,
69
+ const int sm_margin=0) {
70
+
71
+ // Reset the parameters
72
+ params = {};
73
+
74
+ params.is_bf16 = q.dtype() == torch::kBFloat16;
75
+ params.is_e4m3 = q.dtype() == torch::kFloat8_e4m3fn;
76
+
77
+ // Set the pointers and strides.
78
+ params.q_ptr = q.data_ptr();
79
+ params.k_ptr = k.data_ptr();
80
+ params.v_ptr = v.data_ptr();
81
+ // All stride are in elements, not bytes.
82
+ params.q_row_stride = q.stride(-3);
83
+ params.k_row_stride = k.stride(-3);
84
+ params.v_row_stride = v.stride(-3);
85
+ params.q_head_stride = q.stride(-2);
86
+ params.k_head_stride = k.stride(-2);
87
+ params.v_head_stride = v.stride(-2);
88
+ params.v_dim_stride = v.stride(-1);
89
+ params.o_ptr = out.data_ptr();
90
+ params.o_row_stride = out.stride(-3);
91
+ params.o_head_stride = out.stride(-2);
92
+
93
+ if (cu_seqlens_q_d == nullptr) {
94
+ params.q_batch_stride = q.stride(0);
95
+ params.o_batch_stride = out.stride(0);
96
+ }
97
+ if (cu_seqlens_k_d == nullptr) {
98
+ params.k_batch_stride = k.stride(0);
99
+ params.v_batch_stride = v.stride(0);
100
+ }
101
+
102
+ params.cu_seqlens_q = static_cast<int *>(cu_seqlens_q_d);
103
+ params.cu_seqlens_k = static_cast<int *>(cu_seqlens_k_d);
104
+ params.seqused_q = static_cast<int *>(seqused_q);
105
+ params.seqused_k = static_cast<int *>(seqused_k);
106
+
107
+ // Softmax sum
108
+ params.softmax_lse_ptr = softmax_lse_d;
109
+
110
+ // Set the dimensions.
111
+ params.b = b;
112
+ params.h = h;
113
+ params.h_k = h_k;
114
+ params.seqlen_q = seqlen_q;
115
+ params.seqlen_k = seqlen_k;
116
+ params.seqlen_q_rounded = seqlen_q_rounded;
117
+ params.seqlen_k_rounded = seqlen_k_rounded;
118
+ params.d = d;
119
+ params.d_rounded = d_rounded;
120
+
121
+ // Set the different scale values.
122
+ params.scale_softmax = softmax_scale;
123
+ params.softcap = softcap;
124
+
125
+ // Set this to probability of keeping an element to simplify things.
126
+ params.p_dropout = 1.f - p_dropout;
127
+ // Convert p from float to int so we don't have to convert the random uint to float to compare.
128
+ // [Minor] We want to round down since when we do the comparison we use <= instead of <
129
+ // params.p_dropout_in_uint = uint32_t(std::floor(params.p_dropout * 4294967295.0));
130
+ // params.p_dropout_in_uint16_t = uint16_t(std::floor(params.p_dropout * 65535.0));
131
+ params.p_dropout_in_uint8_t = uint8_t(std::floor(params.p_dropout * 255.0));
132
+ params.rp_dropout = 1.f / params.p_dropout;
133
+ TORCH_CHECK(p_dropout < 1.f);
134
+ #ifdef FLASHATTENTION_DISABLE_DROPOUT
135
+ TORCH_CHECK(p_dropout == 0.0f, "This flash attention build does not support dropout.");
136
+ #endif
137
+
138
+ // Causal is the special case where window_size_right == 0 and window_size_left < 0.
139
+ // Local is the more general case where window_size_right >= 0 or window_size_left >= 0.
140
+ params.is_causal = window_size_left < 0 && window_size_right == 0 && attention_chunk == 0;
141
+ params.is_local = (window_size_left >= 0 || window_size_right >= 0 || attention_chunk >= 1) && !params.is_causal;
142
+
143
+ // TODO: check this
144
+ if (window_size_left < 0) { window_size_left = seqlen_k - 1; }
145
+ if (window_size_right < 0) { window_size_right = seqlen_q - 1; }
146
+ if (attention_chunk > 0) {
147
+ window_size_left = std::min(window_size_left, attention_chunk - 1);
148
+ window_size_right = std::min(window_size_right, attention_chunk - 1);
149
+ }
150
+ params.window_size_left = window_size_left;
151
+ params.window_size_right = window_size_right;
152
+ params.attention_chunk = attention_chunk;
153
+
154
+ params.arch = at::cuda::getCurrentDeviceProperties()->major * 10 + at::cuda::getCurrentDeviceProperties()->minor;
155
+ params.num_sm = at::cuda::getCurrentDeviceProperties()->multiProcessorCount - sm_margin;
156
+
157
+ #ifdef FLASHATTENTION_DISABLE_LOCAL
158
+ TORCH_CHECK(!params.is_local, "This flash attention build does not support local attention.");
159
+ #endif
160
+ }
161
+
162
+ void set_params_dgrad(Flash_bwd_params &params,
163
+ // sizes
164
+ const size_t b,
165
+ const size_t seqlen_q,
166
+ const size_t seqlen_k,
167
+ const size_t seqlen_q_rounded,
168
+ const size_t seqlen_k_rounded,
169
+ const size_t h,
170
+ const size_t h_k,
171
+ const size_t d,
172
+ const size_t d_rounded,
173
+ // device pointers
174
+ const at::Tensor q,
175
+ const at::Tensor k,
176
+ const at::Tensor v,
177
+ const at::Tensor out,
178
+ const at::Tensor dout,
179
+ at::Tensor dq,
180
+ at::Tensor dk,
181
+ at::Tensor dv,
182
+ void *cu_seqlens_q_d,
183
+ void *cu_seqlens_k_d,
184
+ void *seqused_q,
185
+ void *seqused_k,
186
+ void *dq_accum_d,
187
+ void *dk_accum_d,
188
+ void *dv_accum_d,
189
+ void *softmax_lse_d,
190
+ void *dsoftmax_sum_d,
191
+ float p_dropout,
192
+ float softmax_scale,
193
+ int window_size_left,
194
+ int window_size_right,
195
+ int attention_chunk,
196
+ const float softcap=0.f,
197
+ bool deterministic=false,
198
+ int const sm_margin=0) {
199
+
200
+ set_params_fprop(params,
201
+ b, seqlen_q, seqlen_k, seqlen_q_rounded, seqlen_k_rounded, h, h_k, d, d_rounded,
202
+ q, k, v, out,
203
+ cu_seqlens_q_d,
204
+ cu_seqlens_k_d,
205
+ seqused_q,
206
+ seqused_k,
207
+ softmax_lse_d,
208
+ p_dropout,
209
+ softmax_scale,
210
+ window_size_left,
211
+ window_size_right,
212
+ attention_chunk,
213
+ softcap,
214
+ sm_margin);
215
+
216
+ // Set the pointers and strides.
217
+ params.do_ptr = dout.data_ptr();
218
+ params.do_row_stride = dout.stride(-3);
219
+ params.do_head_stride = dout.stride(-2);
220
+ params.dq_ptr = dq.data_ptr();
221
+ params.dk_ptr = dk.data_ptr();
222
+ params.dv_ptr = dv.data_ptr();
223
+ params.dq_row_stride = dq.stride(-3);
224
+ params.dk_row_stride = dk.stride(-3);
225
+ params.dv_row_stride = dv.stride(-3);
226
+ params.dq_head_stride = dq.stride(-2);
227
+ params.dk_head_stride = dk.stride(-2);
228
+ params.dv_head_stride = dv.stride(-2);
229
+
230
+ if (cu_seqlens_q_d == nullptr) {
231
+ params.do_batch_stride = dout.stride(0);
232
+ params.dq_batch_stride = dq.stride(0);
233
+ params.dk_batch_stride = dk.stride(0);
234
+ params.dv_batch_stride = dv.stride(0);
235
+ }
236
+
237
+ params.dq_accum_ptr = dq_accum_d;
238
+ params.dk_accum_ptr = dk_accum_d;
239
+ params.dv_accum_ptr = dv_accum_d;
240
+
241
+ // Softmax sum
242
+ params.dsoftmax_sum = dsoftmax_sum_d;
243
+
244
+ params.deterministic = deterministic;
245
+ }
246
+
247
+ template <int Arch, int Split, bool PagedKVNonTMA, bool PackGQA, bool Has_softcap>
248
+ void run_mha_fwd_constexpr(Flash_fwd_params &params, cudaStream_t stream) {
249
+ if (!params.is_e4m3) {
250
+ if (params.is_bf16) {
251
+ #ifndef FLASHATTENTION_DISABLE_HDIM64
252
+ if (params.d <= 64) {
253
+ if constexpr (Arch == 90) {
254
+ if (params.dv > 256) {
255
+ return run_mha_fwd_<Arch, cutlass::bfloat16_t, 64, 512, Split, PagedKVNonTMA, Has_softcap, PackGQA>(params, stream);
256
+ } else if (params.dv > 64) {
257
+ return run_mha_fwd_<Arch, cutlass::bfloat16_t, 64, 256, Split, PagedKVNonTMA, Has_softcap, PackGQA>(params, stream);
258
+ }
259
+ }
260
+ return run_mha_fwd_<Arch, cutlass::bfloat16_t, 64, 64, Split, PagedKVNonTMA, Has_softcap, PackGQA>(params, stream);
261
+ }
262
+ #endif
263
+ #ifndef FLASHATTENTION_DISABLE_HDIM96
264
+ if (params.d <= 96) { return run_mha_fwd_<Arch, cutlass::bfloat16_t, 96, 96, Split, PagedKVNonTMA, Has_softcap, PackGQA>(params, stream); }
265
+ #endif
266
+ #ifndef FLASHATTENTION_DISABLE_HDIM128
267
+ if (params.d <= 128) { return run_mha_fwd_<Arch, cutlass::bfloat16_t, 128, 128, Split, PagedKVNonTMA, Has_softcap, PackGQA>(params, stream); }
268
+ #endif
269
+ #ifndef FLASHATTENTION_DISABLE_HDIM192
270
+ if (params.d <= 192) {
271
+ if constexpr (Arch == 90) {
272
+ if (params.dv <= 128) {
273
+ return run_mha_fwd_<Arch, cutlass::bfloat16_t, 192, 128, Split, PagedKVNonTMA, Has_softcap, PackGQA>(params, stream);
274
+ }
275
+ }
276
+ return run_mha_fwd_<Arch, cutlass::bfloat16_t, 192, 192, Split, PagedKVNonTMA, Has_softcap, PackGQA>(params, stream);
277
+ }
278
+ #endif
279
+ #ifndef FLASHATTENTION_DISABLE_HDIM256
280
+ if (params.d <= 256) { return run_mha_fwd_<Arch, cutlass::bfloat16_t, 256, 256, Split, PagedKVNonTMA, Has_softcap, PackGQA>(params, stream); }
281
+ #endif
282
+ } else {
283
+ #ifndef FLASHATTENTION_DISABLE_FP16
284
+ #ifndef FLASHATTENTION_DISABLE_HDIM64
285
+ if (params.d <= 64) {
286
+ if constexpr (Arch == 90) {
287
+ if (params.dv > 256) {
288
+ return run_mha_fwd_<Arch, cutlass::half_t, 64, 512, Split, PagedKVNonTMA, Has_softcap, PackGQA>(params, stream);
289
+ } else if (params.dv > 64) {
290
+ return run_mha_fwd_<Arch, cutlass::half_t, 64, 256, Split, PagedKVNonTMA, Has_softcap, PackGQA>(params, stream);
291
+ }
292
+ }
293
+ return run_mha_fwd_<Arch, cutlass::half_t, 64, 64, Split, PagedKVNonTMA, Has_softcap, PackGQA>(params, stream);
294
+ }
295
+ #endif
296
+ #ifndef FLASHATTENTION_DISABLE_HDIM96
297
+ if (params.d <= 96) { return run_mha_fwd_<Arch, cutlass::half_t, 96, 96, Split, PagedKVNonTMA, Has_softcap, PackGQA>(params, stream); }
298
+ #endif
299
+ #ifndef FLASHATTENTION_DISABLE_HDIM128
300
+ if (params.d <= 128) { return run_mha_fwd_<Arch, cutlass::half_t, 128, 128, Split, PagedKVNonTMA, Has_softcap, PackGQA>(params, stream); }
301
+ #endif
302
+ #ifndef FLASHATTENTION_DISABLE_HDIM192
303
+ if (params.d <= 192) {
304
+ if constexpr (Arch == 90) {
305
+ if (params.dv <= 128) {
306
+ return run_mha_fwd_<Arch, cutlass::half_t, 192, 128, Split, PagedKVNonTMA, Has_softcap, PackGQA>(params, stream);
307
+ }
308
+ }
309
+ return run_mha_fwd_<Arch, cutlass::half_t, 192, 192, Split, PagedKVNonTMA, Has_softcap, PackGQA>(params, stream);
310
+ }
311
+ #endif
312
+ #ifndef FLASHATTENTION_DISABLE_HDIM256
313
+ if (params.d <= 256) { return run_mha_fwd_<Arch, cutlass::half_t, 256, 256, Split, PagedKVNonTMA, Has_softcap, PackGQA>(params, stream); }
314
+ #endif
315
+ #else
316
+ TORCH_CHECK(false, "This flash attention build does not support FP16.");
317
+ #endif
318
+ }
319
+ } else {
320
+ #ifndef FLASHATTENTION_DISABLE_FP8
321
+ #ifndef FLASHATTENTION_DISABLE_HDIM64
322
+ if (params.d <= 64) { return run_mha_fwd_<90, cutlass::float_e4m3_t, 64, 64, Split, PagedKVNonTMA, Has_softcap, PackGQA>(params, stream); }
323
+ #endif
324
+ #ifndef FLASHATTENTION_DISABLE_HDIM96
325
+ if (params.d <= 96) { return run_mha_fwd_<90, cutlass::float_e4m3_t, 96, 96, Split, PagedKVNonTMA, Has_softcap, PackGQA>(params, stream); }
326
+ #endif
327
+ #ifndef FLASHATTENTION_DISABLE_HDIM128
328
+ if (params.d <= 128) { return run_mha_fwd_<90, cutlass::float_e4m3_t, 128, 128, Split, PagedKVNonTMA, Has_softcap, PackGQA>(params, stream); }
329
+ #endif
330
+ #ifndef FLASHATTENTION_DISABLE_HDIM192
331
+ if (params.d <= 192) {
332
+ if constexpr (Arch == 90) {
333
+ if (params.dv <= 128) {
334
+ return run_mha_fwd_<90, cutlass::float_e4m3_t, 192, 128, Split, PagedKVNonTMA, Has_softcap, PackGQA>(params, stream);
335
+ }
336
+ }
337
+ return run_mha_fwd_<90, cutlass::float_e4m3_t, 192, 192, Split, PagedKVNonTMA, Has_softcap, PackGQA>(params, stream);
338
+ }
339
+ #endif
340
+ #ifndef FLASHATTENTION_DISABLE_HDIM256
341
+ if (params.d <= 256) { return run_mha_fwd_<90, cutlass::float_e4m3_t, 256, 256, Split, PagedKVNonTMA, Has_softcap, PackGQA>(params, stream); }
342
+ #endif
343
+ #else
344
+ TORCH_CHECK(false, "This flash attention build does not support FP8.");
345
+ #endif
346
+ }
347
+ }
348
+
349
+ void run_mha_fwd(Flash_fwd_params &params, cudaStream_t stream) {
350
+ // HEADDIM_SWITCH(params.d, [&] {
351
+ // run_mha_fwd_<cutlass::half_t, kHeadSize>(params, stream);
352
+ // });
353
+ TORCH_CHECK(params.num_splits >= 1);
354
+ ARCH_SWITCH(params.arch, Arch, [&] {
355
+ SPLIT_SWITCH(params.num_splits > 1, Split, [&] {
356
+ PAGEDKV_SWITCH(params.page_table && !params.pagedkv_tma, PagedKVNonTMA, [&] {
357
+ PACKGQA_SWITCH(params.pack_gqa, PackGQA_, [&] {
358
+ // Always enable PackGQA for Sm8x or PagedKVNonTMA or Split to reduce compilation
359
+ static constexpr bool PackGQA = PackGQA_ || Arch < 90 || PagedKVNonTMA || Split;
360
+ SOFTCAP_SWITCH(params.softcap > 0.0, Has_softcap, [&] {
361
+ run_mha_fwd_constexpr<Arch, Split, PagedKVNonTMA, PackGQA, Has_softcap>(params, stream);
362
+ });
363
+ });
364
+ });
365
+ });
366
+ });
367
+ }
368
+
369
+ void run_mha_fwd_combine(Flash_fwd_params &params, cudaStream_t stream, bool enable_pdl=false) {
370
+ #ifndef FLASHATTENTION_DISABLE_SPLIT
371
+ // If hdim is 96 or 192, it's faster to round them to 128 or 256 respectively
372
+ // so that kBlockM is smaller and we have more parallelism.
373
+ if (params.is_fp32) {
374
+ if (params.dv <= 64) {
375
+ run_mha_fwd_combine_<float, float, 64>(params, stream, enable_pdl);
376
+ } else {
377
+ run_mha_fwd_combine_<float, float, 128>(params, stream, enable_pdl);
378
+ }
379
+ } else if (params.is_bf16) {
380
+ if (params.dv <= 64) {
381
+ run_mha_fwd_combine_<cutlass::bfloat16_t, float, 64>(params, stream, enable_pdl);
382
+ } else {
383
+ run_mha_fwd_combine_<cutlass::bfloat16_t, float, 128>(params, stream, enable_pdl);
384
+ }
385
+ } else {
386
+ if (params.dv <= 64) {
387
+ run_mha_fwd_combine_<cutlass::half_t, float, 64>(params, stream, enable_pdl);
388
+ } else {
389
+ run_mha_fwd_combine_<cutlass::half_t, float, 128>(params, stream, enable_pdl);
390
+ }
391
+ }
392
+ #else
393
+ TORCH_CHECK(false, "This flash attention build does not support combine kernels.");
394
+ #endif
395
+ }
396
+
397
+ inline bool get_pagedkv_tma(Flash_fwd_params const& params) {
398
+ if (params.arch < 90 || !params.page_table || params.leftpad_k || params.knew_ptr) { return false; }
399
+ // This needs to match the kernel configs
400
+ auto kBlockMN_kernel_args_sm90 = tile_size_fwd_sm90(params.d_rounded, params.dv_rounded, params.is_causal, params.is_local, params.is_e4m3 ? 1 : 2 /*element_size*/, false /*v_colmajor*/, false /*paged_kv_non_TMA*/, params.softcap > 0.f);
401
+ int const kBlockM = std::get<0>(kBlockMN_kernel_args_sm90);
402
+ int const kBlockN = std::get<1>(kBlockMN_kernel_args_sm90);
403
+ // Heuristic: when seqlen_q <= kBlockM, we're not compute bound, and somehow using TMA is slower,
404
+ // at least for MLA.
405
+ return params.page_size % kBlockN == 0 && params.seqlen_q * (params.h / params.h_k) > kBlockM;
406
+ }
407
+
408
+ inline bool get_pack_gqa(Flash_fwd_params const& params) {
409
+ // Always enable PackGQA for Sm8x or PagedKVNonTMA or Split to reduce compilation and binary size.
410
+ // Has little effect on speed.
411
+ if (params.arch < 90 || (params.page_table && !params.pagedkv_tma) || params.num_splits > 1) { return true; }
412
+ #ifdef FLASHATTENTION_DISABLE_PACKGQA
413
+ return false;
414
+ #else
415
+ // params.page_table must already be set
416
+ if (params.h == params.h_k) { return false; }
417
+ // This needs to match the kernel configs
418
+ auto kBlockMN_kernel_args_sm90 = tile_size_fwd_sm90(params.d_rounded, params.dv_rounded, params.is_causal, params.is_local, params.is_e4m3 ? 1 : 2 /*element_size*/, false /*v_colmajor*/, params.page_table && !params.pagedkv_tma, params.softcap > 0.f);
419
+ int const kBlockM = std::get<0>(kBlockMN_kernel_args_sm90);
420
+ return should_pack_gqa(params.cu_seqlens_q || params.seqused_q, params.seqlen_q, params.h / params.h_k, kBlockM);
421
+ #endif
422
+ }
423
+
424
+ inline int get_num_splits(Flash_fwd_params const& params) {
425
+ #ifdef FLASHATTENTION_DISABLE_SPLIT
426
+ return 1;
427
+ #else
428
+ // Always enable PackGQA for Split
429
+ // params.page_table must already be set
430
+ // This needs to match the kernel configs
431
+ bool varlen = params.cu_seqlens_q || params.cu_seqlens_k || params.seqused_q || params.seqused_k || params.leftpad_k;
432
+ auto kBlockMN_kernel_args_sm90 = tile_size_fwd_sm90(params.d_rounded, params.dv_rounded, params.is_causal, params.is_local, params.is_e4m3 ? 1 : 2 /*element_size*/, false /*v_colmajor*/, params.page_table && !params.pagedkv_tma, params.softcap > 0.f);
433
+ // Strictly speaking we need to pass in (varlen && params.num_splits > 1) but num_splits
434
+ // has not been set here. It's OK though because we might just underestimate kBlockN a bit
435
+ auto kBlockMN_kernel_args_sm8x = tile_size_fwd_sm8x(params.arch == 86 || params.arch == 89, params.d_rounded, params.dv_rounded, params.is_causal, params.is_local, params.is_e4m3 ? 1 : 2 /*element_size*/, params.page_table, varlen, params.softcap > 0.f, params.knew_ptr);
436
+ int const kBlockM = params.arch >= 90 ? std::get<0>(kBlockMN_kernel_args_sm90) : std::get<0>(kBlockMN_kernel_args_sm8x);
437
+ int const kBlockN = params.arch >= 90 ? std::get<1>(kBlockMN_kernel_args_sm90) : std::get<1>(kBlockMN_kernel_args_sm8x);
438
+ int seqlen_q_packgqa = params.seqlen_q * (params.h / params.h_k);
439
+ // If is_local, we're not going to load all of seqlen_k
440
+ int const seqlen_k_loaded = !params.is_local
441
+ ? params.seqlen_k
442
+ : std::max(0, std::min(params.seqlen_k, params.window_size_right + params.window_size_left + 1 + kBlockM));
443
+ int const num_n_blocks = (seqlen_k_loaded + kBlockN - 1) / kBlockN;
444
+ int const num_m_blocks = (seqlen_q_packgqa + kBlockM - 1) / kBlockM;
445
+ int const size_one_kv_head = params.seqlen_k * (params.d + params.dv) * (params.is_e4m3 ? 1 : 2);
446
+ // Always enable PackGQA for Split
447
+ // If varlen, we use dynamic split, so this heuristic just needs to get an upper bound on num_splits.
448
+ // We assume the case where there's 1 long sequence and the rest are short, i.e. pretending
449
+ // that batch = 1.
450
+ int total_mblocks = (params.num_splits_dynamic_ptr ? 1 : params.b) * params.h_k * num_m_blocks;
451
+ return num_splits_heuristic(total_mblocks, params.num_sm, num_n_blocks, num_m_blocks, size_one_kv_head, params.is_causal || params.is_local, 128);
452
+ #endif
453
+ }
454
+
455
+ inline int get_max_headdim() {
456
+ #ifndef FLASHATTENTION_DISABLE_HDIM256
457
+ return 256;
458
+ #endif
459
+ #ifndef FLASHATTENTION_DISABLE_HDIM192
460
+ return 192;
461
+ #endif
462
+ #ifndef FLASHATTENTION_DISABLE_HDIM128
463
+ return 128;
464
+ #endif
465
+ #ifndef FLASHATTENTION_DISABLE_HDIM96
466
+ return 96;
467
+ #endif
468
+ #ifndef FLASHATTENTION_DISABLE_HDIM64
469
+ return 64;
470
+ #endif
471
+ return 0;
472
+ }
473
+
474
+ inline int round_up_headdim(int head_size) {
475
+ #ifndef FLASHATTENTION_DISABLE_HDIM64
476
+ if (head_size <= 64) { return 64; }
477
+ #endif
478
+ #ifndef FLASHATTENTION_DISABLE_HDIM96
479
+ if (head_size <= 96) { return 96; }
480
+ #endif
481
+ #ifndef FLASHATTENTION_DISABLE_HDIM128
482
+ if (head_size <= 128) { return 128; }
483
+ #endif
484
+ #ifndef FLASHATTENTION_DISABLE_HDIM192
485
+ if (head_size <= 192) { return 192; }
486
+ #endif
487
+ #ifndef FLASHATTENTION_DISABLE_HDIM256
488
+ if (head_size <= 256) { return 256; }
489
+ #endif
490
+ return 256;
491
+ }
492
+
493
+ inline int round_up_headdimv(int head_size) {
494
+ if (head_size <= 64) { return 64; }
495
+ if (head_size <= 96) { return 96; }
496
+ if (head_size <= 128) { return 128; }
497
+ if (head_size <= 192) { return 192; }
498
+ if (head_size <= 256) { return 256; }
499
+ return 512;
500
+ }
501
+
502
+ // Only applicable to the case where seqused_k (i.e. cache_seqlens) is available
503
+ at::Tensor
504
+ mha_fwd_get_scheduler_metadata(
505
+ int64_t batch_size,
506
+ int64_t max_seqlen_q,
507
+ int64_t max_seqlen_k,
508
+ int64_t num_heads,
509
+ int64_t num_heads_k,
510
+ int64_t headdim,
511
+ int64_t headdim_v,
512
+ at::ScalarType qkv_dtype,
513
+ at::Tensor seqused_k, // b
514
+ std::optional<at::Tensor> cu_seqlens_q_, // b+1
515
+ std::optional<at::Tensor> cu_seqlens_k_, // b+1
516
+ std::optional<at::Tensor> cu_seqlens_k_new_, // b+1
517
+ std::optional<at::Tensor> seqused_q_, // b. If given, only this many elements of each batch element's queries and outputs are used.
518
+ std::optional<at::Tensor> leftpad_k_, // b
519
+ std::optional<int64_t> page_size,
520
+ int64_t max_seqlen_k_new, // 0 means we're not appending new KV
521
+ bool is_causal,
522
+ int64_t window_size_left,
523
+ int64_t window_size_right,
524
+ int64_t attention_chunk,
525
+ bool has_softcap,
526
+ int64_t num_splits,
527
+ std::optional<bool> pack_gqa_,
528
+ int64_t sm_margin
529
+ ) {
530
+
531
+ TORCH_CHECK(qkv_dtype == at::ScalarType::Half || qkv_dtype == at::ScalarType::BFloat16 || qkv_dtype == at::ScalarType::Float8_e4m3fn,
532
+ "FlashAttention only supports fp16, bf16, and fp8_e4m3 data type");
533
+ TORCH_CHECK(num_heads % num_heads_k == 0, "Number of heads in key/value must divide number of heads in query");
534
+
535
+ // Reset the parameters
536
+ Flash_fwd_params params{};
537
+ params.is_bf16 = qkv_dtype == at::ScalarType::BFloat16;
538
+ params.is_e4m3 = qkv_dtype == at::ScalarType::Float8_e4m3fn;
539
+ params.b = batch_size;
540
+ params.seqlen_q = max_seqlen_q;
541
+ params.seqlen_k = max_seqlen_k;
542
+ params.h = num_heads;
543
+ params.h_k = num_heads_k;
544
+ params.d = headdim;
545
+ params.dv = headdim_v;
546
+ params.d_rounded = round_up_headdim(headdim);
547
+ params.dv_rounded = headdim_v == headdim ? params.d_rounded : round_up_headdimv(headdim_v);
548
+ params.seqlen_knew = max_seqlen_k_new;
549
+
550
+ bool const is_varlen_q = cu_seqlens_q_.has_value();
551
+ params.cu_seqlens_q = is_varlen_q ? cu_seqlens_q_.value().data_ptr<int>() : nullptr;
552
+ bool const is_varlen_k = cu_seqlens_k_.has_value();
553
+ params.cu_seqlens_k = is_varlen_k ? cu_seqlens_k_.value().data_ptr<int>() : nullptr;
554
+ params.cu_seqlens_knew = cu_seqlens_k_new_.has_value() ? cu_seqlens_k_new_.value().data_ptr<int>() : nullptr;
555
+ params.seqused_q = seqused_q_.has_value() ? seqused_q_.value().data_ptr<int>() : nullptr;
556
+ params.seqused_k = seqused_k.data_ptr<int>();
557
+ params.leftpad_k = leftpad_k_.has_value() ? leftpad_k_.value().data_ptr<int>() : nullptr;
558
+ params.knew_ptr = params.seqlen_knew > 0 ? reinterpret_cast<int*>(1) : nullptr;
559
+ if (window_size_left >= max_seqlen_k - 1) { window_size_left = -1; }
560
+ if (window_size_right >= max_seqlen_q - 1) { window_size_right = -1; }
561
+ // causal=true is the same as causal=false in this case
562
+ if (max_seqlen_q == 1 && window_size_left == -1 && window_size_right == -1 && attention_chunk == 0) {
563
+ // Special case of hdim 128 where we want causal to have kBlockN=128, better for pagedKV and TMA
564
+ if ((headdim <= 64 || headdim > 128) || !page_size.has_value()) {
565
+ is_causal = false;
566
+ }
567
+ }
568
+ if (is_causal) { window_size_right = 0; }
569
+
570
+ params.is_causal = window_size_left < 0 && window_size_right == 0 && attention_chunk == 0;
571
+ params.is_local = (window_size_left >= 0 || window_size_right >= 0 || attention_chunk >= 1) && !params.is_causal;
572
+ if (window_size_left < 0) { window_size_left = max_seqlen_k - 1; }
573
+ if (window_size_right < 0) { window_size_right = max_seqlen_q - 1; }
574
+ if (attention_chunk > 0) {
575
+ window_size_left = std::min(window_size_left, attention_chunk - 1);
576
+ window_size_right = std::min(window_size_right, attention_chunk - 1);
577
+ }
578
+ params.window_size_left = window_size_left;
579
+ params.window_size_right = window_size_right;
580
+ params.attention_chunk = attention_chunk;
581
+ params.arch = at::cuda::getCurrentDeviceProperties()->major * 10 + at::cuda::getCurrentDeviceProperties()->minor;
582
+ params.num_sm = at::cuda::getCurrentDeviceProperties()->multiProcessorCount - sm_margin;
583
+ params.softcap = has_softcap ? 1.0f : 0.0f;
584
+
585
+ params.page_size = page_size.has_value() ? page_size.value() : 1;
586
+ params.page_table = !page_size.has_value() ? nullptr : reinterpret_cast<int*>(1);
587
+
588
+ bool const use_dynamic_split = params.b <= 992;
589
+ params.num_splits_dynamic_ptr = !use_dynamic_split ? nullptr : reinterpret_cast<int*>(1);
590
+
591
+ params.pagedkv_tma = get_pagedkv_tma(params);
592
+ params.num_splits = num_splits <= 0 ? get_num_splits(params) : num_splits;
593
+ // Always enable PackGQA for Split, and get_pack_gqa requires params.num_splits to decide
594
+ params.pack_gqa = pack_gqa_.has_value() ? pack_gqa_.value() : get_pack_gqa(params);
595
+
596
+ bool is_varlen = true;
597
+
598
+ // Otherwise the kernel will be launched from cuda:0 device
599
+ // Cast to char to avoid compiler warning about narrowing
600
+ at::cuda::CUDAGuard device_guard{(char)seqused_k.get_device()};
601
+
602
+ auto opts = seqused_k.options();
603
+ // This needs to be set after get_num_splits
604
+ at::Tensor tile_count_semaphore; // Contains the semaphore and optionally num_splits_dynamic
605
+ bool const scheduler_needs_semaphore = params.arch >= 90 || params.num_splits > 1;
606
+ if (scheduler_needs_semaphore || use_dynamic_split) {
607
+ tile_count_semaphore = torch::empty({int(scheduler_needs_semaphore) + int(use_dynamic_split) * params.b}, opts.dtype(torch::kInt32));
608
+ if (scheduler_needs_semaphore) {
609
+ if (!use_dynamic_split) { tile_count_semaphore.zero_(); } // If varlen we'll manually do the zero-ing
610
+ params.tile_count_semaphore = tile_count_semaphore.data_ptr<int>();
611
+ } else {
612
+ params.tile_count_semaphore = nullptr;
613
+ }
614
+ params.num_splits_dynamic_ptr = use_dynamic_split ? tile_count_semaphore.data_ptr<int>() + 1 : nullptr;
615
+ }
616
+
617
+ if (params.num_splits_dynamic_ptr) {
618
+ auto kBlockMN_kernel_args_sm90 = tile_size_fwd_sm90(params.d_rounded, params.dv_rounded, params.is_causal, params.is_local, params.is_e4m3 ? 1 : 2 /*element_size*/, false /*v_colmajor*/, params.page_table && !params.pagedkv_tma, params.softcap > 0.f);
619
+ auto kBlockMN_kernel_args_sm8x = tile_size_fwd_sm8x(params.arch == 86 || params.arch == 89, params.d_rounded, params.dv_rounded, params.is_causal, params.is_local, params.is_e4m3 ? 1 : 2 /*element_size*/, params.page_table, is_varlen && params.num_splits > 1, params.softcap > 0.f, params.knew_ptr);
620
+ int const kBlockM = params.arch >= 90 ? std::get<0>(kBlockMN_kernel_args_sm90) : std::get<0>(kBlockMN_kernel_args_sm8x);
621
+ int const kBlockN = params.arch >= 90 ? std::get<1>(kBlockMN_kernel_args_sm90) : std::get<1>(kBlockMN_kernel_args_sm8x);
622
+ auto stream = at::cuda::getCurrentCUDAStream().stream();
623
+ prepare_varlen_num_blocks(params, stream, params.pack_gqa, kBlockM, kBlockN, false /*enable_pdl*/);
624
+ CHECK_CUDA_KERNEL_LAUNCH();
625
+ }
626
+ return tile_count_semaphore;
627
+ }
628
+
629
+ // b: batch_size
630
+ // b_k: batch_size_k
631
+ // s_q: seqlen_q
632
+ // s_k: seqlen_k
633
+ // s_k_new: seqlen_k_new
634
+ // h: num_heads
635
+ // h_k: num_heads_k
636
+ // d: head_size
637
+ std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor>
638
+ mha_fwd(at::Tensor q, // (b, s_q, h, d) or (total_q, h, d) if there is cu_seqlens_q
639
+ at::Tensor k, // (b_k, s_k, h_k, d) or (total_k, h_k, d) if there is cu_seqlens_k or (num_pages, page_size, h_k, d) if there is page_table.
640
+ at::Tensor v, // (b_k, s_k, h_k, dv) or (total_k, h_k, dv) if there is cu_seqlens_k or (num_pages, page_size, h_k, dv) if there is page_table.
641
+ std::optional<at::Tensor> k_new_, // (b, s_k_new, h_k, d) or (total_k_new, h_k, d) if there is cu_seqlens_k_new
642
+ std::optional<at::Tensor> v_new_, // (b, s_k_new, h_k, dv) or (total_k_new, h_k, dv) if there is cu_seqlens_k_new
643
+ std::optional<at::Tensor> q_v_, // (b, s_q, h, dv) or (total_q_new, h, dv) if there is cu_seqlens_q
644
+ std::optional<at::Tensor> out_, // (b, s_q, h, dv) or (total_q, h, dv) if there is cu_seqlens_q
645
+ std::optional<at::Tensor> cu_seqlens_q_, // b+1
646
+ std::optional<at::Tensor> cu_seqlens_k_, // b+1
647
+ std::optional<at::Tensor> cu_seqlens_k_new_, // b+1
648
+ std::optional<at::Tensor> seqused_q_, // b. If given, only this many elements of each batch element's queries and outputs are used.
649
+ std::optional<at::Tensor> seqused_k_, // b. If given, only this many elements of each batch element's keys are used.
650
+ std::optional<int64_t> max_seqlen_q_,
651
+ // TODO: check if we need max_seqlen_k
652
+ std::optional<int64_t> max_seqlen_k_,
653
+ std::optional<at::Tensor> page_table_, // (b_k, max_num_pages_per_seq)
654
+ std::optional<at::Tensor> kv_batch_idx_, // b. indices to index into the KV cache
655
+ std::optional<at::Tensor> leftpad_k_, // b
656
+ std::optional<at::Tensor> rotary_cos_, // seqlen_ro x (rotary_dim / 2)
657
+ std::optional<at::Tensor> rotary_sin_, // seqlen_ro x (rotary_dim / 2)
658
+ std::optional<at::Tensor> seqlens_rotary_, // b
659
+ std::optional<at::Tensor> q_descale_, // (b, h_k), not (b, h)
660
+ std::optional<at::Tensor> k_descale_, // (b, h_k)
661
+ std::optional<at::Tensor> v_descale_, // (b, h_k)
662
+ std::optional<double> softmax_scale_,
663
+ bool is_causal,
664
+ int64_t window_size_left,
665
+ int64_t window_size_right,
666
+ int64_t attention_chunk,
667
+ double softcap,
668
+ bool is_rotary_interleaved, // if true, rotary combines indices 0 & 1, else indices 0 & rotary_dim / 2
669
+ std::optional<at::Tensor> scheduler_metadata_, // (b + 1)
670
+ int64_t num_splits,
671
+ std::optional<bool> pack_gqa_,
672
+ int64_t sm_margin
673
+ ) {
674
+
675
+ auto dprops = at::cuda::getCurrentDeviceProperties();
676
+ bool is_sm8x = dprops->major >= 8;
677
+ TORCH_CHECK(is_sm8x, "FlashAttention only supports Ampere GPUs or newer.");
678
+
679
+ auto q_type = q.scalar_type();
680
+ TORCH_CHECK(q_type == at::ScalarType::Half || q_type == at::ScalarType::BFloat16 || q_type == at::ScalarType::Float8_e4m3fn,
681
+ "FlashAttention only supports fp16, bf16, and fp8_e4m3 data type");
682
+ if (dprops->major < 9) {
683
+ TORCH_CHECK(q_type == at::ScalarType::Half || q_type == at::ScalarType::BFloat16,
684
+ "FlashAttention on Ampere/Ada cards only supports fp16 and bf16 data type");
685
+ }
686
+ TORCH_CHECK(k.scalar_type() == q_type, "query and key must have the same dtype");
687
+ TORCH_CHECK(v.scalar_type() == q_type, "query and value must have the same dtype");
688
+
689
+ CHECK_DEVICE(q); CHECK_DEVICE(k); CHECK_DEVICE(v);
690
+
691
+ TORCH_CHECK(q.stride(-1) == 1, "Input tensor must have contiguous last dimension");
692
+ TORCH_CHECK(k.stride(-1) == 1, "Input tensor must have contiguous last dimension");
693
+ TORCH_CHECK(v.stride(-1) == 1, "Input tensor must have contiguous last dimension");
694
+
695
+ at::Tensor page_table;
696
+ const bool paged_KV = page_table_.has_value();
697
+ if (paged_KV) {
698
+ page_table = page_table_.value();
699
+ CHECK_DEVICE(page_table);
700
+ TORCH_CHECK(page_table.dtype() == torch::kInt32, "page_table must have dtype torch.int32");
701
+ TORCH_CHECK(page_table.stride(-1) == 1, "page_table must have contiguous last dimension");
702
+ }
703
+
704
+ at::Tensor cu_seqlens_q;
705
+ bool const is_varlen_q = cu_seqlens_q_.has_value();
706
+ if (is_varlen_q) {
707
+ cu_seqlens_q = cu_seqlens_q_.value();
708
+ CHECK_DEVICE(cu_seqlens_q); CHECK_CONTIGUOUS(cu_seqlens_q);
709
+ TORCH_CHECK(cu_seqlens_q.dtype() == torch::kInt32, "cu_seqlens_q must have dtype torch.int32");
710
+ TORCH_CHECK(max_seqlen_q_.has_value(), "max_seqlen_q must be provided if cu_seqlens_q is provided");
711
+ }
712
+ at::Tensor cu_seqlens_k;
713
+ bool const is_varlen_k = cu_seqlens_k_.has_value();
714
+ if (is_varlen_k) {
715
+ cu_seqlens_k = cu_seqlens_k_.value();
716
+ CHECK_DEVICE(cu_seqlens_k); CHECK_CONTIGUOUS(cu_seqlens_k);
717
+ TORCH_CHECK(cu_seqlens_k.dtype() == torch::kInt32, "cu_seqlens_k must have dtype torch.int32");
718
+ TORCH_CHECK(max_seqlen_k_.has_value(), "max_seqlen_k must be provided if cu_seqlens_k is provided");
719
+ TORCH_CHECK(!paged_KV, "If cu_seqlens_k is passed in, then page table is not supported");
720
+ TORCH_CHECK(!kv_batch_idx_.has_value(), "If cu_seqlens_k is passed in, then page table is not supported");
721
+ }
722
+
723
+ auto const sizes = q.sizes();
724
+ const int batch_size = !is_varlen_q ? sizes[0] : cu_seqlens_q.size(0) - 1;
725
+ int seqlen_q = !is_varlen_q ? sizes[1] : max_seqlen_q_.value();
726
+ int total_q = !is_varlen_q ? batch_size * sizes[1] : sizes[0];
727
+ int num_heads = q.size(-2);
728
+ int const head_size = q.size(-1);
729
+ int const head_size_v = v.size(-1);
730
+ int const max_num_pages_per_seq = !paged_KV ? 0 : page_table.size(1);
731
+ int const num_pages = !paged_KV ? 0 : k.size(0);
732
+ int const page_size = !paged_KV ? 1 : k.size(1);
733
+ int const seqlen_k = !is_varlen_k ? (!paged_KV ? k.size(1) : max_num_pages_per_seq * page_size) : max_seqlen_k_.value();
734
+ int const total_k = !is_varlen_k ? batch_size * k.size(1) : k.size(0);
735
+ int const num_heads_k = k.size(-2);
736
+ int const batch_size_k = !paged_KV ? (!is_varlen_k ? k.size(0) : cu_seqlens_k.size(0) - 1) : page_table.size(0);
737
+ double softmax_scale = 1.0 / sqrt(double(head_size));
738
+ if (softmax_scale_.has_value()) {
739
+ softmax_scale = softmax_scale_.value();
740
+ }
741
+ if (!kv_batch_idx_.has_value()) {
742
+ TORCH_CHECK(batch_size == batch_size_k, "batch_size must be equal to batch_size_k");
743
+ }
744
+ int const max_headdim = get_max_headdim();
745
+ TORCH_CHECK(head_size <= max_headdim, "FlashAttention forward only supports head dimension at most " + std::to_string(max_headdim));
746
+ TORCH_CHECK(num_heads % num_heads_k == 0, "Number of heads in key/value must divide number of heads in query");
747
+ if (head_size_v != head_size) {
748
+ TORCH_CHECK((head_size > 128 && head_size <= 192 && head_size_v > 96 && head_size_v <= 128) ||
749
+ (head_size <= 64 && head_size_v <= 512),
750
+ "If V headdim is different from Q/K dim, we only support Q/K headdim in (128, 192] and V headdim in (96, 128], "
751
+ "or (Q/K <= 64 and V <= 512).");
752
+ TORCH_CHECK(dprops->major == 9, "Only Hopper supports different V headdim");
753
+ if (head_size_v > 256) {
754
+ TORCH_CHECK(q_type == at::ScalarType::Half || q_type == at::ScalarType::BFloat16,
755
+ "HeaddimV > 256 requires fp16 and bf16 data type");
756
+ }
757
+ }
758
+
759
+ // This needs to go before kBlockM & kBlockN since we rely on the correct window_size and is_causal to set kBlockM
760
+ // TODO: check this
761
+ if (window_size_left >= seqlen_k - 1) { window_size_left = -1; }
762
+ if (window_size_right >= seqlen_q - 1) { window_size_right = -1; }
763
+ // causal=true is the same as causal=false in this case
764
+ if (seqlen_q == 1 && window_size_left == -1 && window_size_right == -1 && attention_chunk == 0) {
765
+ // Special case of hdim 128 where we want causal to have kBlockN=128, better for pagedKV and TMA
766
+ if ((head_size <= 64 || head_size > 128) || !paged_KV) {
767
+ is_causal = false;
768
+ }
769
+ }
770
+ if (is_causal) { window_size_right = 0; }
771
+
772
+ if (!is_varlen_q) {
773
+ CHECK_SHAPE(q, batch_size, seqlen_q, num_heads, head_size);
774
+ } else {
775
+ CHECK_SHAPE(q, total_q, num_heads, head_size);
776
+ CHECK_SHAPE(cu_seqlens_q, batch_size + 1);
777
+ }
778
+ if (!paged_KV) {
779
+ if (!is_varlen_k) {
780
+ CHECK_SHAPE(k, batch_size_k, seqlen_k, num_heads_k, head_size);
781
+ CHECK_SHAPE(v, batch_size_k, seqlen_k, num_heads_k, head_size_v);
782
+ } else {
783
+ CHECK_SHAPE(k, total_k, num_heads_k, head_size);
784
+ CHECK_SHAPE(v, total_k, num_heads_k, head_size_v);
785
+ CHECK_SHAPE(cu_seqlens_k, batch_size + 1);
786
+ }
787
+ } else {
788
+ CHECK_SHAPE(k, num_pages, page_size, num_heads_k, head_size);
789
+ CHECK_SHAPE(v, num_pages, page_size, num_heads_k, head_size_v);
790
+ CHECK_SHAPE(page_table, batch_size_k, max_num_pages_per_seq);
791
+ }
792
+
793
+ if (seqused_q_.has_value()){
794
+ auto seqused_q = seqused_q_.value();
795
+ TORCH_CHECK(seqused_q.dtype() == torch::kInt32, "seqused_q must have dtype int32");
796
+ CHECK_DEVICE(seqused_q); CHECK_CONTIGUOUS(seqused_q);
797
+ CHECK_SHAPE(seqused_q, batch_size);
798
+ }
799
+ if (seqused_k_.has_value()) {
800
+ auto seqused_k = seqused_k_.value();
801
+ TORCH_CHECK(seqused_k.dtype() == torch::kInt32, "seqused_k must have dtype int32");
802
+ CHECK_DEVICE(seqused_k); CHECK_CONTIGUOUS(seqused_k);
803
+ CHECK_SHAPE(seqused_k, batch_size);
804
+ }
805
+
806
+ if (leftpad_k_.has_value()) {
807
+ auto leftpad_k = leftpad_k_.value();
808
+ TORCH_CHECK(leftpad_k.dtype() == torch::kInt32, "leftpad_k must have dtype int32");
809
+ CHECK_DEVICE(leftpad_k); CHECK_CONTIGUOUS(leftpad_k);
810
+ CHECK_SHAPE(leftpad_k, batch_size);
811
+ }
812
+
813
+ // This is what we will template on
814
+ bool const is_varlen = is_varlen_q || is_varlen_k || seqused_q_.has_value() || seqused_k_.has_value() || leftpad_k_.has_value();
815
+ #ifdef FLASHATTENTION_DISABLE_VARLEN
816
+ TORCH_CHECK(!is_varlen, "This flash attention build does not support varlen.");
817
+ #endif
818
+
819
+ int const alignment = q_type == torch::kFloat8_e4m3fn ? 16 : 8;
820
+ TORCH_CHECK(head_size % alignment == 0, "head_size should be a multiple of " + std::to_string(alignment));
821
+ TORCH_CHECK(head_size_v % alignment == 0, "head_size_v should be a multiple of " + std::to_string(alignment));
822
+
823
+ auto opts = q.options();
824
+ auto out_type = q_type == at::ScalarType::Float8_e4m3fn ? at::ScalarType::BFloat16 : q_type;
825
+ at::Tensor out;
826
+ if (out_.has_value()) {
827
+ out = out_.value();
828
+ TORCH_CHECK(out.scalar_type() == out_type, "For FP16/BF16 input, output must have the same dtype as inputs. For FP8 input, output must have dtype BF16");
829
+ CHECK_DEVICE(out);
830
+ TORCH_CHECK(out.stride(-1) == 1, "Output tensor must have contiguous last dimension");
831
+ if (!is_varlen_q) {
832
+ CHECK_SHAPE(out, batch_size, seqlen_q, num_heads, head_size_v);
833
+ } else {
834
+ CHECK_SHAPE(out, total_q, num_heads, head_size_v);
835
+ }
836
+ } else {
837
+ out = !is_varlen_q
838
+ ? torch::empty({batch_size, seqlen_q, num_heads, head_size_v}, opts.dtype(out_type))
839
+ : torch::empty({total_q, num_heads, head_size_v}, opts.dtype(out_type));
840
+ }
841
+
842
+ auto round_multiple = [](int x, int m) { return (x + m - 1) / m * m; };
843
+ int const head_size_rounded = round_up_headdim(head_size);
844
+ int const head_size_v_rounded = head_size_v == head_size ? head_size_rounded : round_up_headdimv(head_size_v);
845
+ int const seqlen_q_rounded = round_multiple(seqlen_q, 128);
846
+ int const seqlen_k_rounded = round_multiple(seqlen_k, 128);
847
+
848
+ // Otherwise the kernel will be launched from cuda:0 device
849
+ // Cast to char to avoid compiler warning about narrowing
850
+ at::cuda::CUDAGuard device_guard{(char)q.get_device()};
851
+
852
+ at::Tensor softmax_lse;
853
+ if (!is_varlen_q) {
854
+ softmax_lse = torch::empty({batch_size, num_heads, seqlen_q}, opts.dtype(at::kFloat));
855
+ } else {
856
+ softmax_lse = torch::empty({num_heads, total_q}, opts.dtype(at::kFloat));
857
+ }
858
+
859
+ Flash_fwd_params params;
860
+ set_params_fprop(params,
861
+ batch_size,
862
+ seqlen_q, seqlen_k,
863
+ seqlen_q_rounded, seqlen_k_rounded,
864
+ num_heads, num_heads_k,
865
+ head_size, head_size_rounded,
866
+ q, k, v, out,
867
+ !is_varlen_q ? nullptr : cu_seqlens_q.data_ptr(),
868
+ !is_varlen_k ? nullptr : cu_seqlens_k.data_ptr(),
869
+ seqused_q_.has_value() ? seqused_q_.value().data_ptr() : nullptr,
870
+ seqused_k_.has_value() ? seqused_k_.value().data_ptr() : nullptr,
871
+ softmax_lse.data_ptr(),
872
+ /*p_dropout=*/0.f,
873
+ softmax_scale,
874
+ window_size_left,
875
+ window_size_right,
876
+ attention_chunk,
877
+ softcap,
878
+ sm_margin);
879
+ params.total_q = total_q;
880
+ params.total_k = total_k;
881
+ params.b_k = batch_size_k;
882
+ params.dv = head_size_v;
883
+ params.dv_rounded = head_size_v_rounded;
884
+ if (leftpad_k_.has_value()) { // This needs to be set before get_pagedkv_tma
885
+ params.leftpad_k = static_cast<int *>(leftpad_k_.value().data_ptr());
886
+ }
887
+ if (paged_KV) {
888
+ params.page_table = page_table.data_ptr<int>();
889
+ params.page_table_batch_stride = page_table.stride(0);
890
+ }
891
+ params.page_size = page_size;
892
+ params.num_pages = num_pages;
893
+
894
+ if (k_new_.has_value()) { // This needs to be set before get_pagedkv_tma
895
+ at::Tensor k_new, v_new;
896
+ TORCH_CHECK(v_new_.has_value(), "If k_new is supplied, v_new must also be passed in");
897
+ TORCH_CHECK(seqused_k_.has_value(), "If k_new is supplied, seqlens_k must also be passed in");
898
+ TORCH_CHECK(seqlen_q <= seqlen_k, "If k_new is supplied, it must have seqlen <= the seqlen of the KV cache");
899
+ at::Tensor cu_seqlens_k_new;
900
+ bool const is_varlen_k_new = cu_seqlens_k_new_.has_value();
901
+ if (is_varlen_k_new) {
902
+ cu_seqlens_k_new = cu_seqlens_k_new_.value();
903
+ CHECK_DEVICE(cu_seqlens_k_new); CHECK_CONTIGUOUS(cu_seqlens_k_new);
904
+ TORCH_CHECK(cu_seqlens_k_new.dtype() == torch::kInt32, "cu_seqlens_k_new must have dtype torch.int32");
905
+ }
906
+ k_new = k_new_.value();
907
+ v_new = v_new_.value();
908
+ TORCH_CHECK(k_new.dtype() == q_type, "k_new must have the same dtype as query");
909
+ TORCH_CHECK(v_new.dtype() == q_type, "v_new must have the same dtype as query");
910
+ CHECK_DEVICE(k_new); CHECK_DEVICE(v_new);
911
+ TORCH_CHECK(k_new.stride(-1) == 1, "k_new tensor must have contiguous last dimension");
912
+ TORCH_CHECK(v_new.stride(-1) == 1, "v_new tensor must have contiguous last dimension");
913
+ // We don't need max_seqlen_k_new, so seqlen_k_new can be whatever when is_varlen_k_new
914
+ int seqlen_k_new = !is_varlen_k_new ? k_new.size(1) : 0;
915
+ int total_k_new = !is_varlen_k_new ? batch_size * k_new.size(1): k_new.size(0);
916
+ if (!is_varlen_k_new) {
917
+ CHECK_SHAPE(k_new, batch_size, seqlen_k_new, num_heads_k, head_size);
918
+ CHECK_SHAPE(v_new, batch_size, seqlen_k_new, num_heads_k, head_size_v);
919
+ } else {
920
+ CHECK_SHAPE(k_new, total_k_new, num_heads_k, head_size);
921
+ CHECK_SHAPE(v_new, total_k_new, num_heads_k, head_size_v);
922
+ CHECK_SHAPE(cu_seqlens_k_new, batch_size + 1);
923
+ }
924
+ params.seqlen_knew = seqlen_k_new;
925
+ params.total_knew = total_k_new;
926
+ params.knew_ptr = k_new.data_ptr();
927
+ params.vnew_ptr = v_new.data_ptr();
928
+ // All stride are in elements, not bytes.
929
+ params.knew_row_stride = k_new.stride(-3);
930
+ params.vnew_row_stride = v_new.stride(-3);
931
+ params.knew_head_stride = k_new.stride(-2);
932
+ params.vnew_head_stride = v_new.stride(-2);
933
+ if (!is_varlen_k_new) {
934
+ params.knew_batch_stride = k_new.stride(0);
935
+ params.vnew_batch_stride = v_new.stride(0);
936
+ }
937
+ if (is_varlen_k_new) {
938
+ params.cu_seqlens_knew = static_cast<int*>(cu_seqlens_k_new.data_ptr());
939
+ }
940
+ }
941
+
942
+ // 992 = 32 * 31 is the max supported batch in prepare_varlen_num_blocks kernel
943
+ bool const use_dynamic_split = is_varlen && params.b <= 992;
944
+ // Temporarily set num_splits_dynamic_ptr to 1 since get_num_splits checks it
945
+ params.num_splits_dynamic_ptr = !use_dynamic_split ? nullptr : reinterpret_cast<int*>(1);
946
+
947
+ params.pagedkv_tma = get_pagedkv_tma(params);
948
+ params.num_splits = num_splits <= 0 ? get_num_splits(params) : num_splits;
949
+ // Always enable PackGQA for Split, and get_pack_gqa requires params.num_splits to decide
950
+ params.pack_gqa = pack_gqa_.has_value() ? pack_gqa_.value() : get_pack_gqa(params);
951
+
952
+ // This needs to be set after get_num_splits
953
+ at::Tensor tile_count_semaphore; // Contains the semaphore and optionally num_splits_dynamic
954
+ // We don't use the persistent scheduler if Split and not Varlen
955
+ bool const scheduler_needs_semaphore = params.arch >= 90
956
+ ? (((params.is_causal || params.is_local) && (params.num_splits == 1)) || is_varlen)
957
+ : ((params.is_causal && !is_varlen) || (is_varlen && params.num_splits > 1));
958
+ if (scheduler_needs_semaphore || use_dynamic_split) {
959
+ int metadata_size = int(scheduler_needs_semaphore) + int(use_dynamic_split) * params.b;
960
+ params.skip_scheduler_metadata_computation = scheduler_metadata_.has_value();
961
+ if (scheduler_metadata_.has_value()) {
962
+ at::Tensor scheduler_metadata = scheduler_metadata_.value();
963
+ CHECK_DEVICE(scheduler_metadata);
964
+ CHECK_SHAPE(scheduler_metadata, metadata_size);
965
+ CHECK_CONTIGUOUS(scheduler_metadata);
966
+ TORCH_CHECK(scheduler_metadata.dtype() == torch::kInt32, "scheduler_metadata must have dtype int32");
967
+ tile_count_semaphore = scheduler_metadata;
968
+ } else {
969
+ tile_count_semaphore = torch::empty({metadata_size}, opts.dtype(torch::kInt32));
970
+ }
971
+ if (scheduler_needs_semaphore && !use_dynamic_split) {
972
+ tile_count_semaphore.zero_(); // If varlen we'll manually do the zero-ing
973
+ }
974
+ params.tile_count_semaphore = scheduler_needs_semaphore ? tile_count_semaphore.data_ptr<int>() : nullptr;
975
+ params.num_splits_dynamic_ptr = use_dynamic_split ? tile_count_semaphore.data_ptr<int>() + 1 : nullptr;
976
+ }
977
+
978
+ if (q_v_.has_value()) {
979
+ TORCH_CHECK(head_size <= 64, "q_v is only supported for head_size <= 64");
980
+ TORCH_CHECK(q_type == at::ScalarType::Half || q_type == at::ScalarType::BFloat16,
981
+ "q_v is only supported for fp16 and bf16 data type");
982
+ TORCH_CHECK(params.arch == 90, "q_v is only supported for Hopper GPUs");
983
+ at::Tensor q_v = q_v_.value();
984
+ TORCH_CHECK(q_v.dtype() == q_type, "q_v must have the same dtype as query");
985
+ CHECK_DEVICE(q_v);
986
+ TORCH_CHECK(q_v.stride(-1) == 1, "q_v tensor must have contiguous last dimension");
987
+ if (!is_varlen_q) {
988
+ CHECK_SHAPE(q_v, batch_size, seqlen_q, num_heads, head_size_v);
989
+ } else {
990
+ CHECK_SHAPE(q_v, total_q, num_heads, head_size_v);
991
+ }
992
+ params.qv_ptr = q_v.data_ptr();
993
+ // All stride are in elements, not bytes.
994
+ params.qv_row_stride = q_v.stride(-3);
995
+ params.qv_head_stride = q_v.stride(-2);
996
+ if (!is_varlen_q) {
997
+ params.qv_batch_stride = q_v.stride(0);
998
+ }
999
+ }
1000
+
1001
+ if (rotary_cos_.has_value()) {
1002
+ TORCH_CHECK(k_new_.has_value(), "If rotary cos/sin are provided, new key / value to be appended to KV cache must also be provided");
1003
+ auto rotary_cos = rotary_cos_.value();
1004
+ CHECK_DEVICE(rotary_cos); CHECK_CONTIGUOUS(rotary_cos);
1005
+ params.rotary_dim = rotary_cos.size(1) * 2;
1006
+ TORCH_CHECK(params.rotary_dim <= head_size, "rotary_dim must be <= headdim");
1007
+ TORCH_CHECK(params.rotary_dim % 16 == 0, "Only rotary dimensions divisible by 16 are currently supported");
1008
+ const int seqlen_ro = rotary_cos.size(0);
1009
+ if (paged_KV) {
1010
+ TORCH_CHECK(seqlen_ro >= seqlen_k, "cos/sin seqlen must be at least the seqlen of KV cache");
1011
+ }
1012
+ CHECK_SHAPE(rotary_cos, seqlen_ro, params.rotary_dim / 2);
1013
+ TORCH_CHECK(rotary_cos.scalar_type() == q_type, "rotary_cos must have the same dtype as query");
1014
+
1015
+ TORCH_CHECK(rotary_sin_.has_value(), "If rotary cos is provided, rotary sin must also be provided");
1016
+ auto rotary_sin = rotary_sin_.value();
1017
+ CHECK_DEVICE(rotary_sin); CHECK_CONTIGUOUS(rotary_sin);
1018
+ CHECK_SHAPE(rotary_sin, seqlen_ro, params.rotary_dim / 2);
1019
+ TORCH_CHECK(rotary_sin.scalar_type() == q_type, "rotary_cos must have the same dtype as query");
1020
+ params.rotary_cos_ptr = rotary_cos.data_ptr();
1021
+ params.rotary_sin_ptr = rotary_sin.data_ptr();
1022
+ params.is_rotary_interleaved = is_rotary_interleaved;
1023
+ if (seqlens_rotary_.has_value()) {
1024
+ at::Tensor seqlens_rotary = seqlens_rotary_.value();
1025
+ CHECK_DEVICE(seqlens_rotary); CHECK_CONTIGUOUS(seqlens_rotary);
1026
+ TORCH_CHECK(seqlens_rotary.dtype() == torch::kInt32, "seqlens_rotary must have dtype torch.int32");
1027
+ CHECK_SHAPE(seqlens_rotary, batch_size);
1028
+ params.seqlens_rotary = seqlens_rotary.data_ptr<int>();
1029
+ }
1030
+ } else {
1031
+ params.rotary_dim = 0;
1032
+ }
1033
+
1034
+ if (kv_batch_idx_.has_value()) {
1035
+ auto kv_batch_idx = kv_batch_idx_.value();
1036
+ CHECK_DEVICE(kv_batch_idx); CHECK_CONTIGUOUS(kv_batch_idx);
1037
+ TORCH_CHECK(kv_batch_idx.scalar_type() == torch::kInt32, "kv_batch_idx must have dtype int32");
1038
+ params.kv_batch_idx = reinterpret_cast<int *>(kv_batch_idx.data_ptr());
1039
+ }
1040
+
1041
+ at::Tensor out_accum, softmax_lse_accum;
1042
+ auto outaccum_type = at::ScalarType::Float;
1043
+ if (params.num_splits > 1) {
1044
+ TORCH_CHECK(params.num_splits <= 256, "num_splits > 256 not supported");
1045
+ if (!is_varlen_q) {
1046
+ out_accum = torch::empty({params.num_splits, batch_size, num_heads, seqlen_q, head_size_v}, opts.dtype(outaccum_type));
1047
+ softmax_lse_accum = torch::empty({params.num_splits, batch_size, num_heads, seqlen_q}, opts.dtype(at::kFloat));
1048
+ params.oaccum_batch_stride = out_accum.stride(1);
1049
+ params.lseaccum_batch_stride = softmax_lse_accum.stride(1);
1050
+ } else {
1051
+ out_accum = torch::empty({params.num_splits, num_heads, total_q, head_size_v}, opts.dtype(outaccum_type));
1052
+ softmax_lse_accum = torch::empty({params.num_splits, num_heads, total_q}, opts.dtype(at::kFloat));
1053
+ }
1054
+ params.is_fp32 = false;
1055
+ params.oaccum_ptr = out_accum.data_ptr();
1056
+ params.softmax_lseaccum_ptr = softmax_lse_accum.data_ptr();
1057
+ params.oaccum_split_stride = out_accum.stride(0);
1058
+ params.oaccum_row_stride = out_accum.stride(-2);
1059
+ params.oaccum_head_stride = out_accum.stride(-3);
1060
+ params.lseaccum_split_stride = softmax_lse_accum.stride(0);
1061
+ params.lseaccum_head_stride = softmax_lse_accum.stride(-2);
1062
+ }
1063
+
1064
+ if (q_type == at::ScalarType::Float8_e4m3fn) {
1065
+ if (q_descale_.has_value()) {
1066
+ auto q_descale = q_descale_.value();
1067
+ CHECK_DEVICE(q_descale);
1068
+ CHECK_SHAPE(q_descale, batch_size, num_heads_k);
1069
+ params.q_descale_ptr = q_descale.data_ptr<float>();
1070
+ params.q_descale_batch_stride = q_descale.stride(0);
1071
+ params.q_descale_head_stride = q_descale.stride(1);
1072
+ } else {
1073
+ params.q_descale_ptr = nullptr;
1074
+ }
1075
+ if (k_descale_.has_value()) {
1076
+ auto k_descale = k_descale_.value();
1077
+ CHECK_DEVICE(k_descale);
1078
+ CHECK_SHAPE(k_descale, batch_size, num_heads_k);
1079
+ params.k_descale_ptr = k_descale.data_ptr<float>();
1080
+ params.k_descale_batch_stride = k_descale.stride(0);
1081
+ params.k_descale_head_stride = k_descale.stride(1);
1082
+ } else {
1083
+ params.k_descale_ptr = nullptr;
1084
+ }
1085
+ if (v_descale_.has_value()) {
1086
+ auto v_descale = v_descale_.value();
1087
+ CHECK_DEVICE(v_descale);
1088
+ CHECK_SHAPE(v_descale, batch_size, num_heads_k);
1089
+ params.v_descale_ptr = v_descale.data_ptr<float>();
1090
+ params.v_descale_batch_stride = v_descale.stride(0);
1091
+ params.v_descale_head_stride = v_descale.stride(1);
1092
+ } else {
1093
+ params.v_descale_ptr = nullptr;
1094
+ }
1095
+ }
1096
+
1097
+ #ifdef FLASHATTENTION_DISABLE_LOCAL
1098
+ TORCH_CHECK(!params.is_local, "This flash attention build does not support local attention.");
1099
+ #endif
1100
+ #ifdef FLASHATTENTION_DISABLE_SOFTCAP
1101
+ TORCH_CHECK(params.softcap == 0.0, "This flash attention build does not support tanh softcapping.");
1102
+ #endif
1103
+ #ifdef FLASHATTENTION_DISABLE_SPLIT
1104
+ TORCH_CHECK(params.num_splits == 1, "This flash attention build does not support splits.");
1105
+ #endif
1106
+ #ifdef FLASHATTENTION_DISABLE_PACKGQA
1107
+ TORCH_CHECK(!params.pack_gqa || params.arch < 90 || (params.page_table && !params.pagedkv_tma) || params.num_splits > 1, "This flash attention build does not support pack_gqa.");
1108
+ #endif
1109
+ #ifdef FLASHATTENTION_DISABLE_PAGEDKV
1110
+ TORCH_CHECK(!(params.page_table && !params.pagedkv_tma), "This flash attention build does not support paged KV.");
1111
+ #endif
1112
+ #ifdef FLASHATTENTION_DISABLE_APPENDKV
1113
+ TORCH_CHECK(!k_new_.has_value(), "This flash attention build does not support appending KV.");
1114
+ #endif
1115
+
1116
+ if (total_q > 0 && (total_k + params.total_knew) > 0 && num_heads_k > 0) {
1117
+ auto stream = at::cuda::getCurrentCUDAStream().stream();
1118
+ run_mha_fwd(params, stream);
1119
+ if (params.num_splits > 1) {
1120
+ if (out_type == at::ScalarType::BFloat16) {
1121
+ // Since we want output in BF16. Otherwise fwd_combine will output to FP16
1122
+ params.is_bf16 = true;
1123
+ }
1124
+ // Unless there's seqused_q, for the purpose of attn_combine, we can just treat it as batch=1
1125
+ // and seqlen = total_q, and don't need to dispatch to Varlen there.
1126
+ // However, with dynamic split, each row needs to know which batch it belongs to
1127
+ // to read the number of splits, so we just use the varlen version of combine kernel.
1128
+ // if (is_varlen_q && !seqused_q_.has_value()) {
1129
+ // if (is_varlen_q) {
1130
+ // params.b = 1;
1131
+ // params.seqlen_q = total_q;
1132
+ // }
1133
+ // This will zero out the semaphore if needed
1134
+ run_mha_fwd_combine(params, stream, true /*enable_pdl*/);
1135
+ } else if (scheduler_needs_semaphore && params.skip_scheduler_metadata_computation) {
1136
+ // need to zero out the semaphore in this case
1137
+ tile_count_semaphore.index({torch::indexing::Slice(0, 1)}).zero_();
1138
+ }
1139
+ } else if (total_q > 0 && num_heads_k > 0) {
1140
+ // If seqlen_k == 0, then we have an empty tensor. We need to set the output to 0.
1141
+ out.zero_();
1142
+ softmax_lse.fill_(std::numeric_limits<float>::infinity());
1143
+ }
1144
+
1145
+ // return {out, softmax_lse};
1146
+ return {out, softmax_lse, out_accum, softmax_lse_accum};
1147
+ }
1148
+
1149
+ #ifdef FLASHATTENTION_DISABLE_BACKWARD
1150
+ void run_mha_bwd(Flash_bwd_params &params, cudaStream_t stream) {
1151
+ TORCH_CHECK(false, "Flash-Attention was built with backward disabled");
1152
+ }
1153
+ #else
1154
+ template <int Arch, bool Has_softcap>
1155
+ void run_mha_bwd_constexpr(Flash_bwd_params &params, cudaStream_t stream) {
1156
+ if (!params.is_bf16) {
1157
+ #ifndef FLASHATTENTION_DISABLE_FP16
1158
+ #ifndef FLASHATTENTION_DISABLE_HDIM64
1159
+ if (params.d_rounded == 64) { return run_mha_bwd_<Arch, cutlass::half_t, 64, Has_softcap>(params, stream); }
1160
+ #endif
1161
+ #ifndef FLASHATTENTION_DISABLE_HDIM96
1162
+ if (params.d_rounded == 96) { return run_mha_bwd_<Arch, cutlass::half_t, 96, Has_softcap>(params, stream); }
1163
+ #endif
1164
+ #ifndef FLASHATTENTION_DISABLE_HDIM128
1165
+ if (params.d_rounded == 128) { return run_mha_bwd_<Arch, cutlass::half_t, 128, Has_softcap>(params, stream); }
1166
+ #endif
1167
+ #ifndef FLASHATTENTION_DISABLE_HDIM192
1168
+ if (params.d_rounded == 192) { return run_mha_bwd_<Arch, cutlass::half_t, 192, Has_softcap>(params, stream); }
1169
+ #endif
1170
+ #ifndef FLASHATTENTION_DISABLE_HDIM256
1171
+ if (params.d_rounded == 256) { return run_mha_bwd_<Arch, cutlass::half_t, 256, Has_softcap>(params, stream); }
1172
+ #endif
1173
+ #else
1174
+ TORCH_CHECK(false, "This flash attention build does not support FP16.");
1175
+ #endif
1176
+ } else {
1177
+ #ifndef FLASHATTENTION_DISABLE_HDIM64
1178
+ if (params.d_rounded == 64) { return run_mha_bwd_<Arch, cutlass::bfloat16_t, 64, Has_softcap>(params, stream); }
1179
+ #endif
1180
+ #ifndef FLASHATTENTION_DISABLE_HDIM96
1181
+ if (params.d_rounded == 96) { return run_mha_bwd_<Arch, cutlass::bfloat16_t, 96, Has_softcap>(params, stream); }
1182
+ #endif
1183
+ #ifndef FLASHATTENTION_DISABLE_HDIM128
1184
+ if (params.d_rounded == 128) { return run_mha_bwd_<Arch, cutlass::bfloat16_t, 128, Has_softcap>(params, stream); }
1185
+ #endif
1186
+ #ifndef FLASHATTENTION_DISABLE_HDIM192
1187
+ if (params.d_rounded == 192) { return run_mha_bwd_<Arch, cutlass::bfloat16_t, 192, Has_softcap>(params, stream); }
1188
+ #endif
1189
+ #ifndef FLASHATTENTION_DISABLE_HDIM256
1190
+ if (params.d_rounded == 256) { return run_mha_bwd_<Arch, cutlass::bfloat16_t, 256, Has_softcap>(params, stream); }
1191
+ #endif
1192
+ }
1193
+ }
1194
+
1195
+ void run_mha_bwd(Flash_bwd_params &params, cudaStream_t stream) {
1196
+ // FP16_SWITCH(!params.is_bf16, [&] {
1197
+ // HEADDIM_SWITCH(params.d, [&] {
1198
+ // run_mha_bwd_<elem_type, kHeadDim>(params, stream);
1199
+ // });
1200
+ // });
1201
+ ARCH_SWITCH(params.arch, Arch, [&] {
1202
+ SOFTCAP_SWITCH(params.softcap > 0.f, Has_softcap, [&] {
1203
+ run_mha_bwd_constexpr<Arch, Has_softcap>(params, stream);
1204
+ });
1205
+ });
1206
+ }
1207
+ #endif
1208
+
1209
+
1210
+ // b: batch_size
1211
+ // s_q: seqlen_q
1212
+ // s_k: seqlen_k
1213
+ // h: num_heads
1214
+ // h_k: num_heads_k
1215
+ // d: head_size
1216
+ std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor> mha_bwd(
1217
+ at::Tensor dout, // (b, s_q, h, dv) or (total_q, h, dv) if there is cu_seqlens_q
1218
+ at::Tensor q, // (b, s_q, h, d) or (total_q, h, d) if there is cu_seqlens_q
1219
+ at::Tensor k, // (b, s_k, h_k, d) or (total_k, h_k, d) if there is cu_seqlens_k
1220
+ at::Tensor v, // (b, s_k, h_k, dv) or (total_k, h_k, dv) if there is cu_seqlens_k
1221
+ at::Tensor out, // (b, s_q, h, dv) or (total_q, h, dv) if there is cu_seqlens_q
1222
+ at::Tensor softmax_lse, // (b, h, s_q) or (h, total_q) if there is cu_seqlens_q
1223
+ std::optional<at::Tensor> dq_, // (b, s_q, h, d) or (total_q, h, d) if there is cu_seqlens_q
1224
+ std::optional<at::Tensor> dk_, // (b, s_k, h_k, d) or (total_k, h_k, d) if there is cu_seqlens_k
1225
+ std::optional<at::Tensor> dv_, // (b, s_k, h_k, dv) or (total_k, h_k, dv) if there is cu_seqlens_k
1226
+ std::optional<at::Tensor> cu_seqlens_q_, // b+1
1227
+ std::optional<at::Tensor> cu_seqlens_k_, // b+1
1228
+ std::optional<at::Tensor> seqused_q_, // b. If given, only this many elements of each batch element's queries and outputs are used.
1229
+ std::optional<at::Tensor> seqused_k_, // b. If given, only this many elements of each batch element's keys are used.
1230
+ std::optional<int64_t> max_seqlen_q_,
1231
+ std::optional<int64_t> max_seqlen_k_,
1232
+ std::optional<double> softmax_scale_,
1233
+ bool is_causal,
1234
+ int64_t window_size_left,
1235
+ int64_t window_size_right,
1236
+ double softcap,
1237
+ bool deterministic,
1238
+ int64_t sm_margin
1239
+ ) {
1240
+
1241
+ #ifdef FLASHATTENTION_DISABLE_BACKWARD
1242
+ TORCH_CHECK(false, "This flash attention build does not support backward.");
1243
+ #endif
1244
+
1245
+ auto dprops = at::cuda::getCurrentDeviceProperties();
1246
+ bool is_sm8x = dprops->major >= 8;
1247
+ TORCH_CHECK(is_sm8x, "FlashAttention only supports Ampere GPUs or newer.");
1248
+
1249
+ auto q_type = q.dtype();
1250
+ TORCH_CHECK(q_type == torch::kFloat16 || q_type == torch::kBFloat16,
1251
+ "FlashAttention only support fp16 and bf16 data type");
1252
+ TORCH_CHECK(k.dtype() == q_type, "query and key must have the same dtype");
1253
+ TORCH_CHECK(v.dtype() == q_type, "query and value must have the same dtype");
1254
+ TORCH_CHECK(out.dtype() == q_type, "query and out must have the same dtype");
1255
+ TORCH_CHECK(dout.dtype() == q_type, "query and dout must have the same dtype");
1256
+
1257
+ CHECK_DEVICE(q); CHECK_DEVICE(k); CHECK_DEVICE(v);
1258
+ CHECK_DEVICE(out); CHECK_DEVICE(dout); CHECK_DEVICE(softmax_lse);
1259
+
1260
+ TORCH_CHECK(q.stride(-1) == 1, "Input tensor must have contiguous last dimension");
1261
+ TORCH_CHECK(k.stride(-1) == 1, "Input tensor must have contiguous last dimension");
1262
+ TORCH_CHECK(v.stride(-1) == 1, "Input tensor must have contiguous last dimension");
1263
+ TORCH_CHECK(out.stride(-1) == 1, "out tensor must have contiguous last dimension");
1264
+ TORCH_CHECK(dout.stride(-1) == 1, "dout tensor must have contiguous last dimension");
1265
+
1266
+ at::Tensor cu_seqlens_q;
1267
+ bool const is_varlen_q = cu_seqlens_q_.has_value();
1268
+ if (is_varlen_q) {
1269
+ cu_seqlens_q = cu_seqlens_q_.value();
1270
+ CHECK_DEVICE(cu_seqlens_q); CHECK_CONTIGUOUS(cu_seqlens_q);
1271
+ TORCH_CHECK(cu_seqlens_q.dtype() == torch::kInt32, "cu_seqlens_q must have dtype torch.int32");
1272
+ TORCH_CHECK(max_seqlen_q_.has_value(), "max_seqlen_q must be provided if cu_seqlens_q is provided");
1273
+ }
1274
+ at::Tensor cu_seqlens_k;
1275
+ bool const is_varlen_k = cu_seqlens_k_.has_value();
1276
+ if (is_varlen_k) {
1277
+ cu_seqlens_k = cu_seqlens_k_.value();
1278
+ CHECK_DEVICE(cu_seqlens_k); CHECK_CONTIGUOUS(cu_seqlens_k);
1279
+ TORCH_CHECK(cu_seqlens_k.dtype() == torch::kInt32, "cu_seqlens_k must have dtype torch.int32");
1280
+ TORCH_CHECK(max_seqlen_k_.has_value(), "max_seqlen_k must be provided if cu_seqlens_k is provided");
1281
+ }
1282
+ // This is what we will template on
1283
+ bool const is_varlen = is_varlen_q || is_varlen_k || seqused_q_.has_value() || seqused_k_.has_value();
1284
+ #ifdef FLASHATTENTION_DISABLE_VARLEN
1285
+ TORCH_CHECK(!is_varlen, "This flash attention build does not support varlen.");
1286
+ #endif
1287
+
1288
+ auto const sizes = q.sizes();
1289
+ int const batch_size = !is_varlen_q ? sizes[0] : cu_seqlens_q.size(0) - 1;
1290
+ int const seqlen_q = !is_varlen_q ? sizes[1] : max_seqlen_q_.value();
1291
+ int const total_q = !is_varlen_q ? batch_size * sizes[1] : sizes[0];
1292
+ int const num_heads = q.size(-2);
1293
+ int const head_size = q.size(-1);
1294
+ int const head_size_v = v.size(-1);
1295
+ int const seqlen_k = !is_varlen_k ? k.size(1) : max_seqlen_k_.value();
1296
+ int const total_k = !is_varlen_k ? batch_size * k.size(1) : k.size(0);
1297
+ int const num_heads_k = k.size(-2);
1298
+ TORCH_CHECK(head_size % 8 == 0, "head_size should be a multiple of 8");
1299
+ TORCH_CHECK(head_size_v % 8 == 0, "head_size_v should be a multiple of 8");
1300
+ int const max_headdim = get_max_headdim();
1301
+ TORCH_CHECK(std::max(head_size, head_size_v) <= max_headdim, "FlashAttention forward only supports head dimension at most " + std::to_string(max_headdim));
1302
+ TORCH_CHECK(num_heads % num_heads_k == 0, "Number of heads in key/value must divide number of heads in query");
1303
+ double softmax_scale = 1.0 / sqrt(double(head_size));
1304
+ if (softmax_scale_.has_value()) {
1305
+ softmax_scale = softmax_scale_.value();
1306
+ }
1307
+
1308
+ // This needs to go before kBlockM & kBlockN since we rely on the correct window_size and is_causal to set kBlockM
1309
+ if (window_size_left >= seqlen_k - 1) { window_size_left = -1; }
1310
+ if (window_size_right >= seqlen_q - 1) { window_size_right = -1; }
1311
+ if (is_causal) { window_size_right = 0; }
1312
+ // There's a case where is_causal=false, window_size=(-1, 0). Then set_params_bprop will set params.is_causal=true.
1313
+ // If we don't have is_causal here matching params.is_causal, we might get the wrong kBlockM (and cause IMA).
1314
+ is_causal = window_size_left < 0 && window_size_right == 0;
1315
+
1316
+ int const arch = at::cuda::getCurrentDeviceProperties()->major * 10 + at::cuda::getCurrentDeviceProperties()->minor;
1317
+ int const head_size_rounded = round_up_headdim(std::max(head_size, head_size_v));
1318
+ int const head_size_v_rounded = head_size_rounded;
1319
+ // Very important that these match the kernel configs
1320
+ bool const is_local = (window_size_left >= 0 || window_size_right >= 0) && !is_causal;
1321
+ int const kBlockM_sm90 = head_size_rounded <= 64 ? (is_causal && softcap > 0.0 ? 96 : 128)
1322
+ : (head_size_rounded <= 96 ? 64
1323
+ : (head_size_rounded <= 128 ? (is_causal || is_local || softcap > 0.0 ? 64 : 80)
1324
+ : 64));
1325
+ int const kBlockM_sm80 = head_size_rounded <= 64 ? 128 : 64;
1326
+ int const kBlockM_sm86 = head_size_rounded <= 192 ? 64 : 32;
1327
+ int const kBlockM = arch >= 90 ? kBlockM_sm90 : (arch == 86 || arch == 89 ? kBlockM_sm86 : kBlockM_sm80);
1328
+ int const kBlockN_sm90 = head_size_rounded <= 128
1329
+ ? 128
1330
+ : (head_size_rounded <= 192 ? 96 : 80);
1331
+ int const kBlockN_sm80 = head_size_rounded <= 128
1332
+ ? 128
1333
+ : (head_size_rounded <= 192 ? 80 : 64);
1334
+ int const kBlockN_sm86 = head_size_rounded <= 64 ? 128
1335
+ : (head_size_rounded <= 96 ? 128
1336
+ : (head_size_rounded <= 128 ? 96
1337
+ : (head_size_rounded <= 192 ? 64 : 64)));
1338
+ int const kBlockN = arch >= 90 ? kBlockN_sm90 : (arch == 86 || arch == 89 ? kBlockN_sm86 : kBlockN_sm80);
1339
+ auto round_multiple = [](int x, int m) { return (x + m - 1) / m * m; };
1340
+ int const seqlen_q_rounded = round_multiple(seqlen_q, kBlockM);
1341
+ int const seqlen_k_rounded = round_multiple(seqlen_k, kBlockN);
1342
+ int const total_q_padded_rounded = round_multiple(total_q + batch_size * kBlockM, kBlockM);
1343
+ int const total_k_padded_rounded = round_multiple(total_k + batch_size * kBlockN, kBlockN);
1344
+
1345
+ if (!is_varlen_q) {
1346
+ CHECK_SHAPE(q, batch_size, seqlen_q, num_heads, head_size);
1347
+ CHECK_SHAPE(out, batch_size, seqlen_q, num_heads, head_size_v);
1348
+ CHECK_SHAPE(dout, batch_size, seqlen_q, num_heads, head_size_v);
1349
+ } else {
1350
+ CHECK_SHAPE(q, total_q, num_heads, head_size);
1351
+ CHECK_SHAPE(out, total_q, num_heads, head_size_v);
1352
+ CHECK_SHAPE(dout, total_q, num_heads, head_size_v);
1353
+ CHECK_SHAPE(cu_seqlens_q, batch_size + 1);
1354
+ }
1355
+ if (!is_varlen_k) {
1356
+ CHECK_SHAPE(k, batch_size, seqlen_k, num_heads_k, head_size);
1357
+ CHECK_SHAPE(v, batch_size, seqlen_k, num_heads_k, head_size_v);
1358
+ } else {
1359
+ CHECK_SHAPE(k, total_k, num_heads_k, head_size);
1360
+ CHECK_SHAPE(v, total_k, num_heads_k, head_size_v);
1361
+ CHECK_SHAPE(cu_seqlens_k, batch_size + 1);
1362
+ }
1363
+
1364
+ if (seqused_q_.has_value()){
1365
+ auto seqused_q = seqused_q_.value();
1366
+ TORCH_CHECK(seqused_q.dtype() == torch::kInt32, "seqused_q must have dtype int32");
1367
+ CHECK_DEVICE(seqused_q); CHECK_CONTIGUOUS(seqused_q);
1368
+ CHECK_SHAPE(seqused_q, batch_size);
1369
+ }
1370
+ if (seqused_k_.has_value()){
1371
+ auto seqused_k = seqused_k_.value();
1372
+ TORCH_CHECK(seqused_k.dtype() == torch::kInt32, "seqused_k must have dtype int32");
1373
+ CHECK_DEVICE(seqused_k); CHECK_CONTIGUOUS(seqused_k);
1374
+ CHECK_SHAPE(seqused_k, batch_size);
1375
+ }
1376
+
1377
+ at::Tensor dq, dk, dv;
1378
+ if (dq_.has_value()) {
1379
+ dq = dq_.value();
1380
+ TORCH_CHECK(dq.dtype() == q_type, "dq must have the same dtype as q");
1381
+ CHECK_DEVICE(dq);
1382
+ TORCH_CHECK(dq.stride(-1) == 1, "dq must have contiguous last dimension");
1383
+ if (!is_varlen_q) {
1384
+ CHECK_SHAPE(dq, batch_size, seqlen_q, num_heads, head_size);
1385
+ } else {
1386
+ CHECK_SHAPE(dq, total_q, num_heads, head_size);
1387
+ }
1388
+ } else {
1389
+ dq = torch::empty_like(q);
1390
+ }
1391
+ if (dk_.has_value()) {
1392
+ dk = dk_.value();
1393
+ TORCH_CHECK(dk.dtype() == q_type, "dk must have the same dtype as q");
1394
+ CHECK_DEVICE(dk);
1395
+ TORCH_CHECK(dk.stride(-1) == 1, "dk must have contiguous last dimension");
1396
+ if (!is_varlen_k) {
1397
+ CHECK_SHAPE(dk, batch_size, seqlen_k, num_heads_k, head_size);
1398
+ } else {
1399
+ CHECK_SHAPE(dk, total_k, num_heads_k, head_size);
1400
+ }
1401
+ } else {
1402
+ dk = torch::empty_like(k);
1403
+ }
1404
+ if (dv_.has_value()) {
1405
+ dv = dv_.value();
1406
+ TORCH_CHECK(dv.dtype() == q_type, "dv must have the same dtype as q");
1407
+ CHECK_DEVICE(dv);
1408
+ TORCH_CHECK(dv.stride(-1) == 1, "dv must have contiguous last dimension");
1409
+ if (!is_varlen_k) {
1410
+ CHECK_SHAPE(dv, batch_size, seqlen_k, num_heads_k, head_size_v);
1411
+ } else {
1412
+ CHECK_SHAPE(dv, total_k, num_heads_k, head_size_v);
1413
+ }
1414
+ } else {
1415
+ dv = torch::empty_like(v);
1416
+ }
1417
+
1418
+ // Otherwise the kernel will be launched from cuda:0 device
1419
+ // Cast to char to avoid compiler warning about narrowing
1420
+ at::cuda::CUDAGuard device_guard{(char)q.get_device()};
1421
+
1422
+ auto opts = q.options();
1423
+ // Need softmax_d to have total_q_padded_rounded since we want its address to be aligned by 16/8 bytes for TMA / LDG.64
1424
+ at::Tensor softmax_d, softmax_lse_log2;
1425
+ if (!is_varlen) {
1426
+ // Need softmax_d to have seqlen_q_rounded since we want its address to be aligned by 16/8 bytes for TMA / LDG.64
1427
+ softmax_d = torch::empty({batch_size, num_heads, seqlen_q_rounded}, opts.dtype(at::kFloat));
1428
+ softmax_lse_log2 = torch::empty({batch_size, num_heads, seqlen_q_rounded}, opts.dtype(at::kFloat));
1429
+ } else {
1430
+ softmax_d = torch::empty({num_heads, total_q_padded_rounded}, opts.dtype(at::kFloat));
1431
+ softmax_lse_log2 = torch::empty({num_heads, total_q_padded_rounded}, opts.dtype(at::kFloat));
1432
+ }
1433
+ at::Tensor dq_accum, dk_accum, dv_accum;
1434
+ if (!is_varlen) {
1435
+ dq_accum = torch::empty({batch_size, num_heads, seqlen_q_rounded * head_size_rounded}, opts.dtype(at::kFloat));
1436
+ } else {
1437
+ dq_accum = torch::empty({num_heads, total_q_padded_rounded * head_size_rounded}, opts.dtype(at::kFloat));
1438
+ }
1439
+ if (num_heads_k != num_heads) { // MQA / GQA
1440
+ if (!is_varlen) {
1441
+ dk_accum = torch::zeros({batch_size, num_heads_k, seqlen_k_rounded * head_size_rounded}, opts.dtype(at::kFloat));
1442
+ dv_accum = torch::zeros({batch_size, num_heads_k, seqlen_k_rounded * head_size_v_rounded}, opts.dtype(at::kFloat));
1443
+ } else {
1444
+ dk_accum = torch::zeros({num_heads_k, total_k_padded_rounded, head_size_rounded}, opts.dtype(at::kFloat));
1445
+ dv_accum = torch::zeros({num_heads_k, total_k_padded_rounded, head_size_v_rounded}, opts.dtype(at::kFloat));
1446
+ }
1447
+ }
1448
+
1449
+ Flash_bwd_params params;
1450
+ set_params_dgrad(params,
1451
+ batch_size,
1452
+ seqlen_q, seqlen_k,
1453
+ seqlen_q_rounded, seqlen_k_rounded,
1454
+ num_heads, num_heads_k,
1455
+ head_size, head_size_rounded,
1456
+ q, k, v, out,
1457
+ dout, dq, dk, dv,
1458
+ !is_varlen_q ? nullptr : cu_seqlens_q.data_ptr(),
1459
+ !is_varlen_k ? nullptr : cu_seqlens_k.data_ptr(),
1460
+ seqused_q_.has_value() ? seqused_q_.value().data_ptr() : nullptr,
1461
+ seqused_k_.has_value() ? seqused_k_.value().data_ptr() : nullptr,
1462
+ dq_accum.data_ptr(),
1463
+ num_heads_k != num_heads ? dk_accum.data_ptr() : nullptr,
1464
+ num_heads_k != num_heads ? dv_accum.data_ptr() : nullptr,
1465
+ softmax_lse.data_ptr(),
1466
+ softmax_d.data_ptr(),
1467
+ /*p_dropout=*/0.f,
1468
+ softmax_scale,
1469
+ window_size_left,
1470
+ window_size_right,
1471
+ 0, // attention_chunk
1472
+ softcap,
1473
+ deterministic,
1474
+ sm_margin);
1475
+ params.total_q = total_q;
1476
+ params.total_k = total_k;
1477
+ params.softmax_lse_log2_ptr = softmax_lse_log2.data_ptr();
1478
+ params.dv = head_size_v;
1479
+ params.dv_rounded = head_size_v_rounded;
1480
+
1481
+ // auto tile_count_semaphore = (params.is_causal || params.is_local) ? torch::zeros({1}, opts.dtype(torch::kInt32)) : torch::empty({1}, opts.dtype(torch::kInt32));
1482
+ // params.tile_count_semaphore = tile_count_semaphore.data_ptr<int>();
1483
+ // Will be zero'ed out in the backward preprocess kernel
1484
+ at::Tensor dq_semaphore = torch::empty({(seqlen_q + kBlockM - 1) / kBlockM, batch_size, num_heads}, opts.dtype(torch::kInt32));
1485
+ params.dq_semaphore = dq_semaphore.data_ptr<int>();
1486
+ if (num_heads_k != num_heads && params.deterministic) {
1487
+ // TODO: do we need to zero them out?
1488
+ at::Tensor dk_semaphore = torch::empty({(seqlen_k + kBlockN - 1) / kBlockN, batch_size, num_heads_k}, opts.dtype(torch::kInt32));
1489
+ at::Tensor dv_semaphore = torch::empty({(seqlen_k + kBlockN - 1) / kBlockN, batch_size, num_heads_k}, opts.dtype(torch::kInt32));
1490
+ params.dk_semaphore = dk_semaphore.data_ptr<int>();
1491
+ params.dv_semaphore = dv_semaphore.data_ptr<int>();
1492
+ }
1493
+
1494
+ #ifdef FLASHATTENTION_DISABLE_LOCAL
1495
+ TORCH_CHECK(!params.is_local, "This flash attention build does not support local attention.");
1496
+ #endif
1497
+ #ifdef FLASHATTENTION_DISABLE_SOFTCAP
1498
+ TORCH_CHECK(params.softcap == 0.0, "This flash attention build does not support tanh softcapping.");
1499
+ #endif
1500
+
1501
+ if (total_q > 0 && total_k > 0 && num_heads_k > 0) {
1502
+ auto stream = at::cuda::getCurrentCUDAStream().stream();
1503
+ run_mha_bwd(params, stream);
1504
+ } else if (total_k > 0 && num_heads_k > 0) {
1505
+ // If seqlen_q == 0, then we have an empty tensor. We need to set the output to 0.
1506
+ dk.zero_();
1507
+ dv.zero_();
1508
+ softmax_d.zero_();
1509
+ } else if (total_q > 0 && num_heads_k > 0) {
1510
+ dq.zero_();
1511
+ softmax_d.zero_();
1512
+ }
1513
+
1514
+ return { dq, dk, dv, softmax_d, softmax_lse_log2, dq_accum, dk_accum, dv_accum };
1515
+ }
1516
+
1517
+ std::tuple<at::Tensor, at::Tensor>
1518
+ mha_combine(at::Tensor out_partial, // num_splits x batch_size x seqlen x num_heads x head_size
1519
+ at::Tensor lse_partial, // num_splits x batch_size x seqlen x num_heads
1520
+ std::optional<at::Tensor> out_, // batch_size x seqlen x num_heads x head_size
1521
+ std::optional<at::ScalarType> out_dtype_
1522
+ ) {
1523
+
1524
+ auto dprops = at::cuda::getCurrentDeviceProperties();
1525
+ bool is_sm8x = dprops->major >= 8;
1526
+ TORCH_CHECK(is_sm8x, "Attention combine function only supports Ampere GPUs or newer.");
1527
+
1528
+ auto out_partial_type = out_partial.scalar_type();
1529
+ TORCH_CHECK(out_partial_type == at::ScalarType::Float, "Attention combine function only support fp32 data type");
1530
+ TORCH_CHECK(lse_partial.scalar_type() == at::ScalarType::Float, "Attention combine function only support fp32 data type");
1531
+
1532
+ CHECK_DEVICE(out_partial); CHECK_DEVICE(lse_partial);
1533
+
1534
+ TORCH_CHECK(out_partial.stride(-1) == 1, "Input tensor must have contiguous last dimension");
1535
+ TORCH_CHECK(lse_partial.stride(-2) == 1, "LSE tensor must be contiguous in the seqlen dimension");
1536
+
1537
+ const auto sizes = out_partial.sizes();
1538
+
1539
+ const int num_splits = sizes[0];
1540
+ const int batch_size = sizes[1];
1541
+ const int seqlen = sizes[2];
1542
+ const int num_heads = sizes[3];
1543
+ const int head_size_og = sizes[4];
1544
+ TORCH_CHECK(num_splits <= 256, "FlashAttention combine only supports num_splits at most 256");
1545
+
1546
+ CHECK_SHAPE(out_partial, num_splits, batch_size, seqlen, num_heads, head_size_og);
1547
+ CHECK_SHAPE(lse_partial, num_splits, batch_size, seqlen, num_heads);
1548
+
1549
+ int const alignment = 4;
1550
+ at::Tensor out_partial_padded;
1551
+ auto pad = [](at::Tensor x, int alignment) {
1552
+ return x.size(-1) % alignment == 0 ? x : torch::nn::functional::pad(x, torch::nn::functional::PadFuncOptions({0, alignment - x.size(-1) % alignment}));
1553
+ };
1554
+ out_partial_padded = pad(out_partial, alignment);
1555
+
1556
+ auto round_multiple = [](int x, int m) { return (x + m - 1) / m * m; };
1557
+ const int head_size = round_multiple(head_size_og, alignment);
1558
+
1559
+ auto opts = out_partial.options();
1560
+ at::ScalarType out_type = out_dtype_.value_or(out_partial.scalar_type());
1561
+ TORCH_CHECK(out_type == at::ScalarType::Float || out_type == at::ScalarType::BFloat16 || out_type == at::ScalarType::Half, "Output type must be FP32, FP16 or BF16");
1562
+ at::Tensor out;
1563
+ if (out_.has_value()) {
1564
+ out = out_.value();
1565
+ TORCH_CHECK(out.scalar_type() == out_type);
1566
+ CHECK_DEVICE(out);
1567
+ TORCH_CHECK(out.stride(-1) == 1, "Output tensor must have contiguous last dimension");
1568
+ CHECK_SHAPE(out, batch_size, seqlen, num_heads, head_size_og);
1569
+ if (head_size_og % alignment != 0) {
1570
+ out = torch::empty({batch_size, seqlen, num_heads, head_size}, opts.dtype(out_type));
1571
+ }
1572
+ } else {
1573
+ out = torch::empty({batch_size, seqlen, num_heads, head_size}, opts.dtype(out_type));
1574
+ }
1575
+
1576
+ // Otherwise the kernel will be launched from cuda:0 device
1577
+ // Cast to char to avoid compiler warning about narrowing
1578
+ at::cuda::CUDAGuard device_guard{(char)out_partial.get_device()};
1579
+
1580
+ auto softmax_lse = torch::empty({batch_size, num_heads, seqlen}, opts.dtype(at::kFloat)).transpose(1, 2);
1581
+
1582
+ Flash_fwd_params params {}; // Need to reset the params to set everything to zero
1583
+ params.is_fp32 = out_type == at::ScalarType::Float;
1584
+ params.is_bf16 = out_type == at::ScalarType::BFloat16;
1585
+ params.oaccum_ptr = out_partial_padded.data_ptr();
1586
+ params.softmax_lseaccum_ptr = lse_partial.data_ptr();
1587
+ params.o_ptr = out.data_ptr();
1588
+ params.softmax_lse_ptr = softmax_lse.data_ptr();
1589
+ params.b = batch_size;
1590
+ params.h = num_heads;
1591
+ params.seqlen_q = seqlen;
1592
+ params.dv = head_size;
1593
+ params.num_splits = num_splits;
1594
+ params.oaccum_split_stride = out_partial_padded.stride(0);
1595
+ params.oaccum_row_stride = out_partial_padded.stride(2);
1596
+ params.oaccum_head_stride = out_partial_padded.stride(3);
1597
+ params.oaccum_batch_stride = out_partial_padded.stride(1);
1598
+ params.lseaccum_split_stride = lse_partial.stride(0);
1599
+ params.lseaccum_head_stride = lse_partial.stride(3);
1600
+ params.lseaccum_batch_stride = lse_partial.stride(1);
1601
+ params.o_row_stride = out.stride(1);
1602
+ params.o_head_stride = out.stride(2);
1603
+ params.o_batch_stride = out.stride(0);
1604
+ params.arch = at::cuda::getCurrentDeviceProperties()->major * 10 + at::cuda::getCurrentDeviceProperties()->minor;
1605
+
1606
+ if (seqlen > 0 && batch_size > 0) {
1607
+ auto stream = at::cuda::getCurrentCUDAStream().stream();
1608
+ run_mha_fwd_combine(params, stream, false /*enable_pdl*/);
1609
+ }
1610
+
1611
+ at::Tensor out_padded = out;
1612
+ if (head_size_og % alignment != 0) {
1613
+ out = out.index({"...", torch::indexing::Slice(torch::indexing::None, head_size_og)});
1614
+ // if (out_.has_value()) { out_.value().copy_(out); }
1615
+ }
1616
+
1617
+ return {out, softmax_lse};
1618
+ }
1619
+
1620
+ TORCH_LIBRARY(flash_attn_3, m) {
1621
+ m.def("fwd("
1622
+ "Tensor q,"
1623
+ "Tensor k,"
1624
+ "Tensor v,"
1625
+ "Tensor(k_new!)? k_new = None,"
1626
+ "Tensor(v_new!)? v_new = None,"
1627
+ "Tensor? q_v = None,"
1628
+ "Tensor(out!)? out = None,"
1629
+ "Tensor? cu_seqlens_q = None,"
1630
+ "Tensor? cu_seqlens_k = None,"
1631
+ "Tensor? cu_seqlens_k_new = None,"
1632
+ "Tensor? seqused_q = None,"
1633
+ "Tensor? seqused_k = None,"
1634
+ "int? max_seqlen_q = None,"
1635
+ "int? max_seqlen_k = None,"
1636
+ "Tensor? page_table = None,"
1637
+ "Tensor? kv_batch_idx = None,"
1638
+ "Tensor? leftpad_k = None,"
1639
+ "Tensor? rotary_cos = None,"
1640
+ "Tensor? rotary_sin = None,"
1641
+ "Tensor? seqlens_rotary = None,"
1642
+ "Tensor? q_descale = None,"
1643
+ "Tensor? k_descale = None,"
1644
+ "Tensor? v_descale = None,"
1645
+ "float? softmax_scale = None,"
1646
+ "bool is_causal = False,"
1647
+ "int window_size_left = -1,"
1648
+ "int window_size_right = -1,"
1649
+ "int attention_chunk = 0,"
1650
+ "float softcap = 0.0,"
1651
+ "bool is_rotary_interleaved = False,"
1652
+ "Tensor? scheduler_metadata = None,"
1653
+ "int num_splits = 0,"
1654
+ "bool? pack_gqa = None,"
1655
+ "int sm_margin = 0) -> (Tensor(out!), Tensor, Tensor, Tensor)");
1656
+ m.def("bwd("
1657
+ "Tensor dout,"
1658
+ "Tensor q,"
1659
+ "Tensor k,"
1660
+ "Tensor v,"
1661
+ "Tensor out,"
1662
+ "Tensor softmax_lse,"
1663
+ "Tensor(dq!)? dq = None,"
1664
+ "Tensor(dk!)? dk = None,"
1665
+ "Tensor(dv!)? dv = None,"
1666
+ "Tensor? cu_seqlens_q = None,"
1667
+ "Tensor? cu_seqlens_k = None,"
1668
+ "Tensor? seqused_q = None,"
1669
+ "Tensor? seqused_k = None,"
1670
+ "int? max_seqlen_q = None,"
1671
+ "int? max_seqlen_k = None,"
1672
+ "float? softmax_scale = None,"
1673
+ "bool is_causal = False,"
1674
+ "int window_size_left = -1,"
1675
+ "int window_size_right = -1,"
1676
+ "float softcap = 0.0,"
1677
+ "bool deterministic = False,"
1678
+ "int sm_margin = 0) -> (Tensor(dq!), Tensor(dk!), Tensor(dv!), Tensor, Tensor, Tensor, Tensor, Tensor)");
1679
+ m.def("fwd_combine("
1680
+ "Tensor out_partial,"
1681
+ "Tensor lse_partial,"
1682
+ "Tensor(out!)? out = None,"
1683
+ "ScalarType? out_dtype = None) -> (Tensor(out!), Tensor)");
1684
+ m.def("get_scheduler_metadata("
1685
+ "int batch_size,"
1686
+ "int max_seqlen_q,"
1687
+ "int max_seqlen_k,"
1688
+ "int num_heads,"
1689
+ "int num_heads_k,"
1690
+ "int headdim,"
1691
+ "int headdim_v,"
1692
+ "ScalarType qkv_dtype,"
1693
+ "Tensor seqused_k,"
1694
+ "Tensor? cu_seqlens_q = None,"
1695
+ "Tensor? cu_seqlens_k = None,"
1696
+ "Tensor? cu_seqlens_k_new = None,"
1697
+ "Tensor? seqused_q = None,"
1698
+ "Tensor? leftpad_k = None,"
1699
+ "int? page_size = None,"
1700
+ "int max_seqlen_k_new = 0,"
1701
+ "bool is_causal = False,"
1702
+ "int window_size_left = -1,"
1703
+ "int window_size_right = -1,"
1704
+ "int attention_chunk = 0,"
1705
+ "bool has_softcap = False,"
1706
+ "int num_splits = 0,"
1707
+ "bool? pack_gqa = None,"
1708
+ "int sm_margin = 0) -> Tensor");
1709
+ }
1710
+
1711
+ TORCH_LIBRARY_IMPL(flash_attn_3, CUDA, m) {
1712
+ m.impl("fwd", &mha_fwd);
1713
+ m.impl("bwd", &mha_bwd);
1714
+ m.impl("fwd_combine", &mha_combine);
1715
+ m.impl("get_scheduler_metadata", &mha_fwd_get_scheduler_metadata);
1716
+ }
Code/Baselines/flash-attention/hopper/flash_attn_interface.py ADDED
@@ -0,0 +1,834 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023, Tri Dao.
2
+
3
+ from typing import Optional, Union
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+
8
+ # isort: off
9
+ # We need to import the CUDA kernels after importing torch
10
+ import flash_attn_3._C # Registers operators with PyTorch
11
+
12
+ # isort: on
13
+
14
+ flash_attn_3_cuda = torch.ops.flash_attn_3
15
+
16
+ def maybe_contiguous(x):
17
+ return x.contiguous() if x is not None and x.stride(-1) != 1 else x
18
+
19
+
20
+ def _flash_attn_forward(
21
+ q,
22
+ k,
23
+ v,
24
+ k_new,
25
+ v_new,
26
+ qv,
27
+ out,
28
+ cu_seqlens_q,
29
+ cu_seqlens_k,
30
+ cu_seqlens_k_new,
31
+ seqused_q,
32
+ seqused_k,
33
+ max_seqlen_q,
34
+ max_seqlen_k,
35
+ page_table,
36
+ kv_batch_idx,
37
+ leftpad_k,
38
+ rotary_cos,
39
+ rotary_sin,
40
+ seqlens_rotary,
41
+ q_descale,
42
+ k_descale,
43
+ v_descale,
44
+ softmax_scale,
45
+ causal,
46
+ window_size=(-1, -1),
47
+ attention_chunk=0,
48
+ softcap=0.0,
49
+ rotary_interleaved=True,
50
+ scheduler_metadata=None,
51
+ num_splits=1,
52
+ pack_gqa=None,
53
+ sm_margin=0):
54
+ q, k, k_new, v_new = [maybe_contiguous(x) for x in (q, k, k_new, v_new)]
55
+ v = v.contiguous() if v.stride(-1) != 1 and v.stride(-3) != 1 else v
56
+ cu_seqlens_q, cu_seqlens_k, cu_seqlens_k_new = [
57
+ maybe_contiguous(x) for x in (cu_seqlens_q, cu_seqlens_k, cu_seqlens_k_new)
58
+ ]
59
+ seqused_q, seqused_k = [maybe_contiguous(x) for x in (seqused_q, seqused_k)]
60
+ page_table, kv_batch_idx, leftpad_k = [
61
+ maybe_contiguous(x) for x in (page_table, kv_batch_idx, leftpad_k)
62
+ ]
63
+ rotary_cos, rotary_sin = [maybe_contiguous(x) for x in (rotary_cos, rotary_sin)]
64
+ seqlens_rotary = maybe_contiguous(seqlens_rotary)
65
+ out, softmax_lse, *rest = flash_attn_3_cuda.fwd(
66
+ q,
67
+ k,
68
+ v,
69
+ k_new,
70
+ v_new,
71
+ qv,
72
+ out,
73
+ cu_seqlens_q,
74
+ cu_seqlens_k,
75
+ cu_seqlens_k_new,
76
+ seqused_q,
77
+ seqused_k,
78
+ max_seqlen_q,
79
+ max_seqlen_k,
80
+ page_table,
81
+ kv_batch_idx,
82
+ leftpad_k,
83
+ rotary_cos,
84
+ rotary_sin,
85
+ seqlens_rotary,
86
+ q_descale,
87
+ k_descale,
88
+ v_descale,
89
+ softmax_scale,
90
+ causal,
91
+ window_size[0],
92
+ window_size[1],
93
+ attention_chunk,
94
+ softcap,
95
+ rotary_interleaved,
96
+ scheduler_metadata,
97
+ num_splits,
98
+ pack_gqa,
99
+ sm_margin,
100
+ )
101
+ return out, softmax_lse, *rest
102
+
103
+
104
+ def _flash_attn_backward(
105
+ dout,
106
+ q,
107
+ k,
108
+ v,
109
+ out,
110
+ softmax_lse,
111
+ cu_seqlens_q,
112
+ cu_seqlens_k,
113
+ sequed_q,
114
+ sequed_k,
115
+ max_seqlen_q,
116
+ max_seqlen_k,
117
+ dq,
118
+ dk,
119
+ dv,
120
+ softmax_scale,
121
+ causal,
122
+ window_size=(-1, -1),
123
+ softcap=0.0,
124
+ deterministic=False,
125
+ sm_margin=0,
126
+ ):
127
+ # dq, dk, dv are allocated by us so they should already be contiguous
128
+ dout, q, k, v, out = [maybe_contiguous(x) for x in (dout, q, k, v, out)]
129
+ dq, dk, dv, softmax_d, *rest = flash_attn_3_cuda.bwd(
130
+ dout,
131
+ q,
132
+ k,
133
+ v,
134
+ out,
135
+ softmax_lse,
136
+ dq,
137
+ dk,
138
+ dv,
139
+ cu_seqlens_q,
140
+ cu_seqlens_k,
141
+ sequed_q,
142
+ sequed_k,
143
+ max_seqlen_q,
144
+ max_seqlen_k,
145
+ softmax_scale,
146
+ causal,
147
+ window_size[0],
148
+ window_size[1],
149
+ softcap,
150
+ deterministic,
151
+ sm_margin,
152
+ )
153
+ return dq, dk, dv, softmax_d
154
+
155
+
156
+ class FlashAttnQKVPackedFunc(torch.autograd.Function):
157
+ @staticmethod
158
+ def forward(
159
+ ctx,
160
+ qkv,
161
+ softmax_scale,
162
+ causal,
163
+ q_descale=None, k_descale=None, v_descale=None,
164
+ window_size=(-1, -1),
165
+ attention_chunk=0,
166
+ softcap=0.0,
167
+ deterministic=False,
168
+ num_heads_q=None,
169
+ sm_margin=0,
170
+ ):
171
+ if softmax_scale is None:
172
+ softmax_scale = qkv.shape[-1] ** (-0.5)
173
+ if qkv.dim() == 5:
174
+ assert qkv.shape[-3] == 3
175
+ q, k, v = qkv.unbind(dim=-3)
176
+ else:
177
+ assert qkv.dim() == 4
178
+ assert num_heads_q is not None
179
+ num_heads_k = (qkv.shape[2] - num_heads_q) // 2
180
+ assert num_heads_k * 2 + num_heads_q == qkv.shape[2]
181
+ q, k, v = qkv.split([num_heads_q, num_heads_k, num_heads_k], dim=-2)
182
+ out, softmax_lse, *rest = _flash_attn_forward(
183
+ q,
184
+ k,
185
+ v,
186
+ None, None, # k_new, v_new
187
+ None, # qv
188
+ None, # out
189
+ None, None, None, # cu_seqlens_q/k/k_new
190
+ None, None, # seqused_q/k
191
+ None, None, # max_seqlen_q/k
192
+ None, None, None, # page_table, kv_batch_idx, leftpad_k,
193
+ None, None, None, # rotary_cos/sin, seqlens_rotary
194
+ q_descale, k_descale, v_descale,
195
+ softmax_scale,
196
+ causal=causal,
197
+ window_size=window_size,
198
+ attention_chunk=attention_chunk,
199
+ softcap=softcap,
200
+ sm_margin=sm_margin,
201
+ )
202
+ # ctx.save_for_backward(q, k, v, out_padded, softmax_lse)
203
+ ctx.save_for_backward(q, k, v, out, softmax_lse)
204
+ ctx.softmax_scale = softmax_scale
205
+ ctx.causal = causal
206
+ ctx.window_size = window_size
207
+ ctx.attention_chunk = attention_chunk
208
+ ctx.softcap = softcap
209
+ ctx.deterministic = deterministic
210
+ ctx.ndim = qkv.dim()
211
+ ctx.sm_margin = sm_margin
212
+ # return out, softmax_lse
213
+ return out
214
+
215
+ @staticmethod
216
+ def backward(ctx, dout, *args):
217
+ q, k, v, out, softmax_lse = ctx.saved_tensors
218
+ assert ctx.attention_chunk == 0, "FA3 backward does not support attention_chunk"
219
+ if ctx.ndim == 5:
220
+ qkv_shape = q.shape[:-2] + (3, *q.shape[-2:])
221
+ dqkv = torch.empty(qkv_shape, dtype=q.dtype, device=q.device)
222
+ dq, dk, dv = dqkv.unbind(dim=-3)
223
+ else:
224
+ num_heads_q = q.shape[2]
225
+ num_heads_k = k.shape[2]
226
+ qkv_shape = q.shape[:-2] + (num_heads_q + num_heads_k * 2, *q.shape[-1:])
227
+ dqkv = torch.empty(qkv_shape, dtype=q.dtype, device=q.device)
228
+ dq, dk, dv = dqkv.split([num_heads_q, num_heads_k, num_heads_k], dim=-2)
229
+ _flash_attn_backward(
230
+ dout,
231
+ q,
232
+ k,
233
+ v,
234
+ out,
235
+ softmax_lse,
236
+ None, None, # cu_seqlens_q, cu_seqlens_k,
237
+ None, None, # sequed_q, sequed_k,
238
+ None, None, # max_seqlen_q, max_seqlen_k,
239
+ dq,
240
+ dk,
241
+ dv,
242
+ ctx.softmax_scale,
243
+ ctx.causal,
244
+ ctx.window_size,
245
+ ctx.softcap,
246
+ ctx.deterministic,
247
+ ctx.sm_margin,
248
+ )
249
+ dqkv = dqkv[..., : dout.shape[-1]] # We could have padded the head dimension
250
+ return dqkv, None, None, None, None, None, None, None, None, None, None, None
251
+
252
+
253
+ class FlashAttnFunc(torch.autograd.Function):
254
+
255
+ @staticmethod
256
+ def forward(
257
+ ctx,
258
+ q,
259
+ k,
260
+ v,
261
+ softmax_scale,
262
+ causal,
263
+ qv=None,
264
+ q_descale=None, k_descale=None, v_descale=None,
265
+ window_size=(-1, -1),
266
+ attention_chunk=0,
267
+ softcap=0.0,
268
+ num_splits=1,
269
+ pack_gqa=None,
270
+ deterministic=False,
271
+ sm_margin=0,
272
+ ):
273
+ if softmax_scale is None:
274
+ softmax_scale = (q.shape[-1] + (qv.shape[-1] if qv is not None else 0)) ** (-0.5)
275
+ # out, q, k, v, out_padded, softmax_lse = _flash_attn_forward(
276
+ out, softmax_lse, *rest = _flash_attn_forward(
277
+ q,
278
+ k,
279
+ v,
280
+ None, None, # k_new, v_new
281
+ qv, # qv
282
+ None, # out
283
+ None, None, None, # cu_seqlens_q/k/k_new
284
+ None, None, # seqused_q/k
285
+ None, None, # max_seqlen_q/k
286
+ None, None, None, # page_table, kv_batch_idx, leftpad_k,
287
+ None, None, None, # rotary_cos/sin, seqlens_rotary
288
+ q_descale, k_descale, v_descale,
289
+ softmax_scale,
290
+ causal=causal,
291
+ window_size=window_size,
292
+ attention_chunk=attention_chunk,
293
+ softcap=softcap,
294
+ num_splits=num_splits,
295
+ pack_gqa=pack_gqa,
296
+ sm_margin=sm_margin,
297
+ )
298
+ # ctx.save_for_backward(q, k, v, out_padded, softmax_lse)
299
+ ctx.save_for_backward(q, k, v, out, softmax_lse)
300
+ ctx.softmax_scale = softmax_scale
301
+ ctx.causal = causal
302
+ ctx.window_size = window_size
303
+ ctx.attention_chunk = attention_chunk
304
+ ctx.softcap = softcap
305
+ ctx.deterministic = deterministic
306
+ ctx.sm_margin = sm_margin
307
+ return out
308
+
309
+ @staticmethod
310
+ def backward(ctx, dout, *args):
311
+ q, k, v, out, softmax_lse = ctx.saved_tensors
312
+ assert ctx.attention_chunk == 0, "FA3 backward does not support attention_chunk"
313
+ dq, dk, dv = torch.empty_like(q), torch.empty_like(k), torch.empty_like(v)
314
+ _flash_attn_backward(
315
+ dout,
316
+ q,
317
+ k,
318
+ v,
319
+ out,
320
+ softmax_lse,
321
+ None, None, # cu_seqlens_q, cu_seqlens_k,
322
+ None, None, # sequed_q, sequed_k,
323
+ None, None, # max_seqlen_q, max_seqlen_k,
324
+ dq,
325
+ dk,
326
+ dv,
327
+ ctx.softmax_scale,
328
+ ctx.causal,
329
+ ctx.window_size,
330
+ ctx.softcap,
331
+ ctx.deterministic,
332
+ ctx.sm_margin,
333
+ )
334
+ dq = dq[..., : q.shape[-1]] # We could have padded the head dimension
335
+ dk = dk[..., : k.shape[-1]]
336
+ dv = dv[..., : v.shape[-1]]
337
+ return dq, dk, dv, None, None, None, None, None, None, None, None, None, None, None, None, None, None
338
+
339
+
340
+ class FlashAttnVarlenFunc(torch.autograd.Function):
341
+
342
+ @staticmethod
343
+ def forward(
344
+ ctx,
345
+ q,
346
+ k,
347
+ v,
348
+ cu_seqlens_q,
349
+ cu_seqlens_k,
350
+ seqused_q,
351
+ seqused_k,
352
+ max_seqlen_q,
353
+ max_seqlen_k,
354
+ softmax_scale,
355
+ causal,
356
+ qv=None,
357
+ q_descale=None, k_descale=None, v_descale=None,
358
+ window_size=(-1, -1),
359
+ attention_chunk=0,
360
+ softcap=0.0,
361
+ num_splits=1,
362
+ pack_gqa=None,
363
+ deterministic=False,
364
+ sm_margin=0,
365
+ ):
366
+ if softmax_scale is None:
367
+ softmax_scale = (q.shape[-1] + (qv.shape[-1] if qv is not None else 0)) ** (-0.5)
368
+ # out, q, k, v, out_padded, softmax_lse = _flash_attn_varlen_forward(
369
+ out, softmax_lse, *rest = _flash_attn_forward(
370
+ q,
371
+ k,
372
+ v,
373
+ None, None, # k_new, v_new
374
+ qv, # qv
375
+ None, # out
376
+ cu_seqlens_q,
377
+ cu_seqlens_k,
378
+ None, # cu_seqlens_k_new
379
+ seqused_q,
380
+ seqused_k,
381
+ max_seqlen_q,
382
+ max_seqlen_k,
383
+ None, None, None, # page_table, kv_batch_idx, leftpad_k,
384
+ None, None, None, # rotary_cos/sin, seqlens_rotary
385
+ q_descale, k_descale, v_descale,
386
+ softmax_scale,
387
+ causal=causal,
388
+ window_size=window_size,
389
+ attention_chunk=attention_chunk,
390
+ softcap=softcap,
391
+ num_splits=num_splits,
392
+ pack_gqa=pack_gqa,
393
+ sm_margin=sm_margin,
394
+ )
395
+ # ctx.save_for_backward(q, k, v, out_padded, softmax_lse, cu_seqlens_q, cu_seqlens_k, seqused_q, seqused_k)
396
+ ctx.save_for_backward(q, k, v, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, seqused_q, seqused_k)
397
+ ctx.max_seqlen_q = max_seqlen_q
398
+ ctx.max_seqlen_k = max_seqlen_k
399
+ ctx.softmax_scale = softmax_scale
400
+ ctx.causal = causal
401
+ ctx.window_size = window_size
402
+ ctx.attention_chunk = attention_chunk
403
+ ctx.softcap = softcap
404
+ ctx.deterministic = deterministic
405
+ ctx.sm_margin = sm_margin
406
+ return out
407
+
408
+ @staticmethod
409
+ def backward(ctx, dout, *args):
410
+ q, k, v, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, seqused_q, seqused_k = ctx.saved_tensors
411
+ assert ctx.attention_chunk == 0, "FA3 backward does not support attention_chunk"
412
+ dq, dk, dv = torch.empty_like(q), torch.empty_like(k), torch.empty_like(v)
413
+ _flash_attn_backward(
414
+ dout,
415
+ q,
416
+ k,
417
+ v,
418
+ out,
419
+ softmax_lse,
420
+ cu_seqlens_q,
421
+ cu_seqlens_k,
422
+ seqused_q,
423
+ seqused_k,
424
+ ctx.max_seqlen_q,
425
+ ctx.max_seqlen_k,
426
+ dq,
427
+ dk,
428
+ dv,
429
+ ctx.softmax_scale,
430
+ ctx.causal,
431
+ ctx.window_size,
432
+ ctx.softcap,
433
+ ctx.deterministic,
434
+ ctx.sm_margin,
435
+ )
436
+ dq = dq[..., : q.shape[-1]] # We could have padded the head dimension
437
+ dk = dk[..., : k.shape[-1]]
438
+ dv = dv[..., : v.shape[-1]]
439
+ return dq, dk, dv, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None
440
+
441
+
442
+ def flash_attn_qkvpacked_func(
443
+ qkv,
444
+ softmax_scale=None,
445
+ causal=False,
446
+ q_descale=None, k_descale=None, v_descale=None,
447
+ window_size=(-1, -1),
448
+ attention_chunk=0,
449
+ softcap=0.0,
450
+ deterministic=False,
451
+ num_heads_q=None,
452
+ sm_margin=0,
453
+ ):
454
+ """dropout_p should be set to 0.0 during evaluation
455
+ If Q, K, V are already stacked into 1 tensor, this function will be faster than
456
+ calling flash_attn_func on Q, K, V since the backward pass avoids explicit concatenation
457
+ of the gradients of Q, K, V.
458
+ For multi-query and grouped-query attention (MQA/GQA), please see
459
+ flash_attn_kvpacked_func and flash_attn_func.
460
+
461
+ If window_size != (-1, -1), implements sliding window local attention. Query at position i
462
+ will only attend to keys between [i - window_size[0], i + window_size[1]] inclusive.
463
+
464
+ Arguments:
465
+ qkv: (batch_size, seqlen, 3, nheads, headdim)
466
+ dropout_p: float. Dropout probability.
467
+ softmax_scale: float. The scaling of QK^T before applying softmax.
468
+ Default to 1 / sqrt(headdim).
469
+ causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
470
+ window_size: (left, right). If not (-1, -1), implements sliding window local attention.
471
+ softcap: float. Anything > 0 activates softcapping attention.
472
+ alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of (-alibi_slope * |i - j|) is added to
473
+ the attention score of query i and key j.
474
+ deterministic: bool. Whether to use the deterministic implementation of the backward pass,
475
+ which is slightly slower and uses more memory. The forward pass is always deterministic.
476
+ return_attn_probs: bool. Whether to return the attention probabilities. This option is for
477
+ testing only. The returned probabilities are not guaranteed to be correct
478
+ (they might not have the right scaling).
479
+ Return:
480
+ out: (batch_size, seqlen, nheads, headdim).
481
+ softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
482
+ logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
483
+ normalization factor).
484
+ S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
485
+ The output of softmax (possibly with different scaling). It also encodes the dropout
486
+ pattern (negative means that location was dropped, nonnegative means it was kept).
487
+ """
488
+ return FlashAttnQKVPackedFunc.apply(
489
+ qkv,
490
+ softmax_scale,
491
+ causal,
492
+ q_descale, k_descale, v_descale,
493
+ window_size,
494
+ attention_chunk,
495
+ softcap,
496
+ deterministic,
497
+ num_heads_q,
498
+ sm_margin,
499
+ )
500
+
501
+
502
+ def flash_attn_func(
503
+ q,
504
+ k,
505
+ v,
506
+ softmax_scale=None,
507
+ causal=False,
508
+ qv=None,
509
+ q_descale=None, k_descale=None, v_descale=None,
510
+ window_size=(-1, -1),
511
+ attention_chunk=0,
512
+ softcap=0.0,
513
+ num_splits=1,
514
+ pack_gqa=None,
515
+ deterministic=False,
516
+ sm_margin=0,
517
+ ):
518
+ """dropout_p should be set to 0.0 during evaluation
519
+ Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
520
+ than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
521
+ For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
522
+ 0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
523
+
524
+ If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
525
+ For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
526
+ 1 1 1 1 0
527
+ 1 1 1 1 1
528
+ If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
529
+ 0 0
530
+ 0 0
531
+ 0 0
532
+ 1 0
533
+ 1 1
534
+ If the row of the mask is all zero, the output will be zero.
535
+
536
+ If window_size != (-1, -1), implements sliding window local attention. Query at position i
537
+ will only attend to keys between
538
+ [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
539
+
540
+ Arguments:
541
+ q: (batch_size, seqlen, nheads, headdim)
542
+ k: (batch_size, seqlen, nheads_k, headdim)
543
+ v: (batch_size, seqlen, nheads_k, headdim)
544
+ dropout_p: float. Dropout probability.
545
+ softmax_scale: float. The scaling of QK^T before applying softmax.
546
+ Default to 1 / sqrt(headdim).
547
+ causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
548
+ window_size: (left, right). If not (-1, -1), implements sliding window local attention.
549
+ alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
550
+ (-alibi_slope * |i + seqlen_k - seqlen_q - j|)
551
+ is added to the attention score of query i and key j.
552
+ deterministic: bool. Whether to use the deterministic implementation of the backward pass,
553
+ which is slightly slower and uses more memory. The forward pass is always deterministic.
554
+ return_attn_probs: bool. Whether to return the attention probabilities. This option is for
555
+ testing only. The returned probabilities are not guaranteed to be correct
556
+ (they might not have the right scaling).
557
+ Return:
558
+ out: (batch_size, seqlen, nheads, headdim).
559
+ softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
560
+ logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
561
+ normalization factor).
562
+ """
563
+ return FlashAttnFunc.apply(
564
+ q,
565
+ k,
566
+ v,
567
+ softmax_scale,
568
+ causal,
569
+ qv,
570
+ q_descale, k_descale, v_descale,
571
+ window_size,
572
+ attention_chunk,
573
+ softcap,
574
+ num_splits,
575
+ pack_gqa,
576
+ deterministic,
577
+ sm_margin,
578
+ )
579
+
580
+
581
+ def flash_attn_varlen_func(
582
+ q,
583
+ k,
584
+ v,
585
+ cu_seqlens_q,
586
+ cu_seqlens_k,
587
+ max_seqlen_q,
588
+ max_seqlen_k,
589
+ seqused_q=None,
590
+ seqused_k=None,
591
+ softmax_scale=None,
592
+ causal=False,
593
+ qv=None,
594
+ q_descale=None, k_descale=None, v_descale=None,
595
+ window_size=(-1, -1),
596
+ attention_chunk=0,
597
+ softcap=0.0,
598
+ num_splits=1,
599
+ pack_gqa=None,
600
+ deterministic=False,
601
+ sm_margin=0,
602
+ ):
603
+ return FlashAttnVarlenFunc.apply(
604
+ q,
605
+ k,
606
+ v,
607
+ cu_seqlens_q,
608
+ cu_seqlens_k,
609
+ seqused_q,
610
+ seqused_k,
611
+ max_seqlen_q,
612
+ max_seqlen_k,
613
+ softmax_scale,
614
+ causal,
615
+ qv,
616
+ q_descale, k_descale, v_descale,
617
+ window_size,
618
+ attention_chunk,
619
+ softcap,
620
+ num_splits,
621
+ pack_gqa,
622
+ deterministic,
623
+ sm_margin,
624
+ )
625
+
626
+
627
+ def flash_attn_combine(out_partial, lse_partial, out=None, out_dtype=None):
628
+ return flash_attn_3_cuda.fwd_combine(out_partial, lse_partial, out, out_dtype)
629
+
630
+
631
+ def flash_attn_with_kvcache(
632
+ q,
633
+ k_cache,
634
+ v_cache,
635
+ k=None,
636
+ v=None,
637
+ qv=None,
638
+ rotary_cos=None,
639
+ rotary_sin=None,
640
+ cache_seqlens: Optional[Union[(int, torch.Tensor)]] = None,
641
+ cache_batch_idx: Optional[torch.Tensor] = None,
642
+ cache_leftpad: Optional[torch.Tensor] = None,
643
+ page_table: Optional[torch.Tensor] = None,
644
+ cu_seqlens_q: Optional[torch.Tensor] = None,
645
+ cu_seqlens_k_new: Optional[torch.Tensor] = None,
646
+ max_seqlen_q: Optional[int] = None,
647
+ rotary_seqlens: Optional[torch.Tensor] = None,
648
+ q_descale: Optional[torch.Tensor] = None,
649
+ k_descale: Optional[torch.Tensor] = None,
650
+ v_descale: Optional[torch.Tensor] = None,
651
+ softmax_scale=None,
652
+ causal=False,
653
+ window_size=(-1, -1), # -1 means infinite context window
654
+ attention_chunk=0,
655
+ softcap=0.0, # 0.0 means deactivated
656
+ rotary_interleaved=True,
657
+ scheduler_metadata=None,
658
+ num_splits=0, # Can be tuned for speed
659
+ pack_gqa=None, # Can be tuned for speed
660
+ sm_margin=0, # Can be tuned if some SMs are used for communication
661
+ return_softmax_lse=False,
662
+ ):
663
+ """
664
+ If k and v are not None, k_cache and v_cache will be updated *inplace* with the new values from
665
+ k and v. This is useful for incremental decoding: you can pass in the cached keys/values from
666
+ the previous step, and update them with the new keys/values from the current step, and do
667
+ attention with the updated cache, all in 1 kernel.
668
+
669
+ If you pass in k / v, you must make sure that the cache is large enough to hold the new values.
670
+ For example, the KV cache could be pre-allocated with the max sequence length, and you can use
671
+ cache_seqlens to keep track of the current sequence lengths of each sequence in the batch.
672
+
673
+ Also apply rotary embedding if rotary_cos and rotary_sin are passed in. The key @k will be
674
+ rotated by rotary_cos and rotary_sin at indices cache_seqlens, cache_seqlens + 1, etc.
675
+ If causal or local (i.e., window_size != (-1, -1)), the query @q will be rotated by rotary_cos
676
+ and rotary_sin at indices cache_seqlens, cache_seqlens + 1, etc.
677
+ If not causal and not local, the query @q will be rotated by rotary_cos and rotary_sin at
678
+ indices cache_seqlens only (i.e. we consider all tokens in @q to be at position cache_seqlens).
679
+
680
+ See tests/test_flash_attn.py::test_flash_attn_kvcache for examples of how to use this function.
681
+
682
+ Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
683
+ than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
684
+ For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
685
+ 0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
686
+
687
+ If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
688
+ For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
689
+ 1 1 1 1 0
690
+ 1 1 1 1 1
691
+ If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
692
+ 0 0
693
+ 0 0
694
+ 0 0
695
+ 1 0
696
+ 1 1
697
+ If the row of the mask is all zero, the output will be zero.
698
+
699
+ If window_size != (-1, -1), implements sliding window local attention. Query at position i
700
+ will only attend to keys between
701
+ [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
702
+
703
+ Note: Does not support backward pass.
704
+
705
+ Arguments:
706
+ q: (batch_size, seqlen, nheads, headdim)
707
+ k_cache: (batch_size_cache, seqlen_cache, nheads_k, headdim) if there's no page_table,
708
+ or (num_blocks, page_block_size, nheads_k, headdim) if there's a page_table (i.e. paged KV cache)
709
+ page_block_size must be a multiple of 256.
710
+ v_cache: (batch_size_cache, seqlen_cache, nheads_k, headdim_v) if there's no page_table,
711
+ or (num_blocks, page_block_size, nheads_k, headdim_v) if there's a page_table (i.e. paged KV cache)
712
+ k [optional]: (batch_size, seqlen_new, nheads_k, headdim). If not None, we concatenate
713
+ k with k_cache, starting at the indices specified by cache_seqlens.
714
+ v [optional]: (batch_size, seqlen_new, nheads_k, headdim_v). Similar to k.
715
+ qv [optional]: (batch_size, seqlen, nheads, headdim_v)
716
+ rotary_cos [optional]: (seqlen_ro, rotary_dim / 2). If not None, we apply rotary embedding
717
+ to k and q. Only applicable if k and v are passed in. rotary_dim must be divisible by 16.
718
+ rotary_sin [optional]: (seqlen_ro, rotary_dim / 2). Similar to rotary_cos.
719
+ cache_seqlens: int, or (batch_size,), dtype torch.int32. The sequence lengths of the
720
+ KV cache.
721
+ cache_batch_idx: (batch_size,), dtype torch.int32. The indices used to index into the KV cache.
722
+ If None, we assume that the batch indices are [0, 1, 2, ..., batch_size - 1].
723
+ If the indices are not distinct, and k and v are provided, the values updated in the cache
724
+ might come from any of the duplicate indices.
725
+ cache_leftpad: (batch_size,), dtype torch.int32. The index that the KV cache starts. If None, assume 0.
726
+ page_table [optional]: (batch_size, max_num_blocks_per_seq), dtype torch.int32.
727
+ softmax_scale: float. The scaling of QK^T before applying softmax.
728
+ Default to 1 / sqrt(headdim).
729
+ causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
730
+ window_size: (left, right). If not (-1, -1), implements sliding window local attention.
731
+ softcap: float. Anything > 0 activates softcapping attention.
732
+ rotary_interleaved: bool. Only applicable if rotary_cos and rotary_sin are passed in.
733
+ If True, rotary embedding will combine dimensions 0 & 1, 2 & 3, etc. If False,
734
+ rotary embedding will combine dimensions 0 & rotary_dim / 2, 1 & rotary_dim / 2 + 1
735
+ (i.e. GPT-NeoX style).
736
+ num_splits: int. If > 1, split the key/value into this many chunks along the sequence.
737
+ If num_splits == 1, we don't split the key/value. If num_splits == 0, we use a heuristic
738
+ to automatically determine the number of splits.
739
+ Don't change this unless you know what you are doing.
740
+ return_softmax_lse: bool. Whether to return the logsumexp of the attention scores.
741
+
742
+ Return:
743
+ out: (batch_size, seqlen, nheads, headdim).
744
+ softmax_lse [optional, if return_softmax_lse=True]: (batch_size, nheads, seqlen). The
745
+ logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
746
+ normalization factor).
747
+ """
748
+ assert k_cache.stride(-1) == 1, "k_cache must have contiguous last dimension"
749
+ assert v_cache.stride(-1) == 1, "v_cache must have contiguous last dimension"
750
+ if softmax_scale is None:
751
+ softmax_scale = (q.shape[-1] + (qv.shape[-1] if qv is not None else 0)) ** (-0.5)
752
+ if cache_seqlens is not None and isinstance(cache_seqlens, int):
753
+ cache_seqlens = torch.full(
754
+ (k_cache.shape[0],), cache_seqlens, dtype=torch.int32, device=k_cache.device
755
+ )
756
+ cache_seqlens = maybe_contiguous(cache_seqlens)
757
+ out, softmax_lse, *rest = _flash_attn_forward(
758
+ q,
759
+ k_cache,
760
+ v_cache,
761
+ k,
762
+ v,
763
+ qv,
764
+ None, # out
765
+ cu_seqlens_q,
766
+ None, # cu_seqlens_k
767
+ cu_seqlens_k_new,
768
+ None, # seqused_q
769
+ cache_seqlens,
770
+ max_seqlen_q,
771
+ None, # max_seqlen_k
772
+ page_table,
773
+ cache_batch_idx,
774
+ cache_leftpad,
775
+ rotary_cos,
776
+ rotary_sin,
777
+ rotary_seqlens,
778
+ q_descale, k_descale, v_descale,
779
+ softmax_scale,
780
+ causal=causal,
781
+ window_size=window_size,
782
+ attention_chunk=attention_chunk,
783
+ softcap=softcap,
784
+ rotary_interleaved=rotary_interleaved,
785
+ scheduler_metadata=scheduler_metadata,
786
+ num_splits=num_splits,
787
+ pack_gqa=pack_gqa,
788
+ sm_margin=sm_margin,
789
+ )
790
+ # return (out, softmax_lse) if return_softmax_lse else out
791
+ return (out, softmax_lse, *rest) if return_softmax_lse else out
792
+
793
+
794
+ def get_scheduler_metadata(
795
+ batch_size, max_seqlen_q, max_seqlen_k, num_heads_q, num_heads_kv, headdim,
796
+ cache_seqlens: torch.Tensor,
797
+ qkv_dtype=torch.bfloat16,
798
+ headdim_v=None,
799
+ cu_seqlens_q: Optional[torch.Tensor] = None,
800
+ cu_seqlens_k_new: Optional[torch.Tensor] = None,
801
+ cache_leftpad: Optional[torch.Tensor] = None,
802
+ page_size: Optional[int] = None,
803
+ max_seqlen_k_new=0,
804
+ causal=False,
805
+ window_size=(-1, -1), # -1 means infinite context window
806
+ attention_chunk=0,
807
+ has_softcap=False,
808
+ num_splits=0, # Can be tuned for speed
809
+ pack_gqa=None, # Can be tuned for speed
810
+ sm_margin=0, # Can be tuned if some SMs are used for communication
811
+ ):
812
+ cache_seqlens = maybe_contiguous(cache_seqlens)
813
+ if headdim_v is None:
814
+ headdim_v = headdim
815
+ scheduler_metadata = flash_attn_3_cuda.get_scheduler_metadata(
816
+ batch_size, max_seqlen_q, max_seqlen_k, num_heads_q, num_heads_kv, headdim, headdim_v,
817
+ qkv_dtype,
818
+ cache_seqlens,
819
+ cu_seqlens_q,
820
+ None, # cu_seqlens_k
821
+ cu_seqlens_k_new,
822
+ None, # seqused_q
823
+ cache_leftpad,
824
+ page_size,
825
+ max_seqlen_k_new,
826
+ causal,
827
+ window_size[0], window_size[1],
828
+ attention_chunk,
829
+ has_softcap,
830
+ num_splits,
831
+ pack_gqa,
832
+ sm_margin,
833
+ )
834
+ return scheduler_metadata
Code/Baselines/flash-attention/hopper/flash_bwd_kernel_sm90.h ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ /******************************************************************************
3
+ * Copyright (c) 2024, Jay Shah, Ganesh Bikshandi, Ying Zhang, Vijay Thakkar, Pradeep Ramani, Tri Dao.
4
+ ******************************************************************************/
5
+
6
+ #pragma once
7
+
8
+ #include "cute/tensor.hpp"
9
+
10
+ #include <cutlass/cutlass.h>
11
+ #include <cutlass/arch/reg_reconfig.h>
12
+ #include <cutlass/array.h>
13
+ #include <cutlass/numeric_types.h>
14
+ #include <cutlass/numeric_conversion.h>
15
+ #include <cutlass/kernel_hardware_info.h>
16
+ #include "cutlass/pipeline/pipeline.hpp"
17
+
18
+ #include "utils.h"
19
+
20
+ namespace flash {
21
+
22
+ using namespace cute;
23
+
24
+ template <class CollectiveMainloop_, class CollectiveEpilogue_, class TileScheduler_>
25
+ class FlashAttnBwdSm90 {
26
+
27
+ public:
28
+
29
+ // Type Aliases
30
+ static constexpr bool Is_causal = CollectiveMainloop_::Is_causal;
31
+ static constexpr bool Is_local = CollectiveMainloop_::Is_local;
32
+ static_assert(CollectiveMainloop_::Varlen == CollectiveEpilogue_::Varlen);
33
+ static constexpr bool Varlen = CollectiveMainloop_::Varlen;
34
+
35
+ // Mainloop derived types
36
+ using CollectiveMainloop = CollectiveMainloop_;
37
+ using TileShape_MNK = typename CollectiveMainloop::TileShape_MNK;
38
+ using TiledMmaSdP = typename CollectiveMainloop::TiledMmaSdP;
39
+ using TiledMmadKV = typename CollectiveMainloop::TiledMmadKV;
40
+ using ArchTag = typename CollectiveMainloop::ArchTag;
41
+ using ClusterShape = typename CollectiveMainloop::ClusterShape;
42
+ using MainloopArguments = typename CollectiveMainloop::Arguments;
43
+ using MainloopParams = typename CollectiveMainloop::Params;
44
+ static constexpr bool dKV_swapAB = CollectiveMainloop::dKV_swapAB;
45
+
46
+ // Epilogue derived types
47
+ using CollectiveEpilogue = CollectiveEpilogue_;
48
+ using EpilogueArguments = typename CollectiveEpilogue::Arguments;
49
+ using EpilogueParams = typename CollectiveEpilogue::Params;
50
+
51
+ static_assert(ArchTag::kMinComputeCapability >= 90);
52
+
53
+ using TileScheduler = TileScheduler_;
54
+ using TileSchedulerArguments = typename flash::TileSchedulerArguments;
55
+ using TileSchedulerParams = typename TileScheduler::Params;
56
+
57
+ static constexpr uint32_t NumLoadWarpGroups = 1;
58
+ static constexpr uint32_t NumMmaWarpGroups = CUTE_STATIC_V(size(TiledMmaSdP{})) / cutlass::NumThreadsPerWarpGroup;
59
+ static constexpr uint32_t MaxThreadsPerBlock = CUTE_STATIC_V(size(TiledMmaSdP{})) + (NumLoadWarpGroups * cutlass::NumThreadsPerWarpGroup);
60
+ static constexpr uint32_t MinBlocksPerMultiprocessor = 1;
61
+ static_assert(NumMmaWarpGroups == 2 || NumMmaWarpGroups == 3);
62
+
63
+ /// Register requirement for Load and Math WGs
64
+ static constexpr uint32_t LoadRegisterRequirement = NumMmaWarpGroups == 2 ? 24 : 32;
65
+ static constexpr uint32_t MmaRegisterRequirement = NumMmaWarpGroups == 2 ? 240 : 160;
66
+ // If you want to print from the producer warp, you'd need to increase the number of registers
67
+ // Otherwise you'll get CUDA error.
68
+ // static constexpr uint32_t LoadRegisterRequirement = 40;
69
+ // static constexpr uint32_t MmaRegisterRequirement = NumMmaWarpGroups == 2 ? 232 : 152;
70
+
71
+ // Kernel level shared memory storage
72
+ struct SharedStorage {
73
+ struct TensorStorage : cute::aligned_struct<128> {
74
+ union {
75
+ typename CollectiveMainloop::TensorStorage mainloop;
76
+ typename CollectiveEpilogue::TensorStorage epilogue;
77
+ };
78
+ } tensors;
79
+
80
+ struct PipelineStorage : cute::aligned_struct<16> {
81
+ alignas(16) cutlass::arch::ClusterTransactionBarrier barrier_KV;
82
+ alignas(16) typename CollectiveMainloop::MainloopPipeline::SharedStorage pipeline_q;
83
+ alignas(16) typename CollectiveMainloop::MainloopPipeline_dO::SharedStorage pipeline_do;
84
+ alignas(16) typename TileScheduler::SharedStorage smem_scheduler;
85
+ } pipelines;
86
+
87
+ };
88
+
89
+ static constexpr int SharedStorageSize = sizeof(SharedStorage);
90
+
91
+ // Device side arguments
92
+ struct Arguments {
93
+ MainloopArguments mainloop{};
94
+ EpilogueArguments epilogue{};
95
+ cutlass::KernelHardwareInfo hw_info{};
96
+ TileSchedulerArguments scheduler{};
97
+ };
98
+
99
+ // Kernel entry point API
100
+ struct Params {
101
+ MainloopParams mainloop{};
102
+ EpilogueParams epilogue{};
103
+ cutlass::KernelHardwareInfo hw_info{};
104
+ TileSchedulerParams scheduler{};
105
+ };
106
+
107
+ //
108
+ // Methods
109
+ //
110
+
111
+ // Convert to underlying arguments. In this case, a simple copy for the aliased type.
112
+ static
113
+ Params
114
+ to_underlying_arguments(Arguments const& args) {
115
+ CUTLASS_TRACE_HOST("to_underlying_arguments():");
116
+
117
+ // Get SM count if needed, otherwise use user supplied SM count
118
+ int sm_count = args.hw_info.sm_count;
119
+ if (sm_count <= 0) {
120
+ CUTLASS_TRACE_HOST(" WARNING: Arguments do not include a valid SM count.\n"
121
+ " For optimal performance, populate the arguments KernelHardwareInfo struct with the SM count.");
122
+ sm_count = cutlass::KernelHardwareInfo::query_device_multiprocessor_count(args.hw_info.device_id);
123
+ }
124
+
125
+ CUTLASS_TRACE_HOST("to_underlying_arguments(): Setting persistent grid SM count to " << sm_count);
126
+
127
+ cutlass::KernelHardwareInfo hw_info{args.hw_info.device_id, sm_count};
128
+ return {
129
+ CollectiveMainloop::to_underlying_arguments(args.mainloop),
130
+ CollectiveEpilogue::to_underlying_arguments(args.epilogue),
131
+ hw_info,
132
+ TileScheduler::to_underlying_arguments(args.scheduler)
133
+ };
134
+ }
135
+
136
+ // Computes the kernel launch grid shape based on runtime parameters
137
+ static dim3
138
+ get_grid_shape(Params const& params) {
139
+ return TileScheduler::get_grid_shape(params.scheduler, params.hw_info.sm_count);
140
+ }
141
+
142
+ static dim3
143
+ get_block_shape() {
144
+ return dim3(MaxThreadsPerBlock, 1, 1);
145
+ }
146
+
147
+ CUTLASS_DEVICE
148
+ void
149
+ operator()(Params const& params, char* smem_buf) {
150
+
151
+ static constexpr int NumMmaThreads = NumMmaWarpGroups * cutlass::NumThreadsPerWarpGroup;
152
+ static constexpr int NumCopyThreads = NumLoadWarpGroups * cutlass::NumThreadsPerWarpGroup;
153
+ static constexpr int kBlockM = get<0>(TileShape_MNK{});
154
+ static constexpr int kBlockN = get<1>(TileShape_MNK{});
155
+
156
+ using MainloopPipeline = typename CollectiveMainloop::MainloopPipeline;
157
+ using PipelineParams = typename MainloopPipeline::Params;
158
+ using PipelineState = typename MainloopPipeline::PipelineState;
159
+ using MainloopPipeline_dO = typename CollectiveMainloop::MainloopPipeline_dO;
160
+ using PipelineParams_dO = typename MainloopPipeline_dO::Params;
161
+ using PipelineState_dO = typename MainloopPipeline_dO::PipelineState;
162
+ static constexpr bool Q_dO_same_stages = std::is_same_v<MainloopPipeline, MainloopPipeline_dO>;
163
+
164
+ SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(smem_buf);
165
+
166
+ int const lane_predicate = cute::elect_one_sync();
167
+ int const warp_idx = cutlass::canonical_warp_idx_sync();
168
+
169
+ // Issue Tma Descriptor Prefetch from a single thread
170
+ if (warp_idx == 0 && lane_predicate) {
171
+ CollectiveMainloop::prefetch_tma_descriptors(params.mainloop);
172
+ CollectiveEpilogue::prefetch_tma_descriptors(params.epilogue);
173
+ }
174
+
175
+ // Obtain warp index
176
+ int const warp_group_thread_idx = threadIdx.x % cutlass::NumThreadsPerWarpGroup;
177
+
178
+ PipelineParams pipeline_params;
179
+ pipeline_params.transaction_bytes = CollectiveMainloop::TmaTransactionBytesQ + CollectiveMainloop::TmaTransactionBytesLSE;
180
+ int warp_group_idx = cutlass::canonical_warp_group_idx();
181
+ pipeline_params.role = warp_group_idx == 0
182
+ ? MainloopPipeline::ThreadCategory::Producer
183
+ : MainloopPipeline::ThreadCategory::Consumer;
184
+ pipeline_params.is_leader = warp_group_thread_idx == 0;
185
+ pipeline_params.num_consumers = NumMmaThreads;
186
+
187
+ if (warp_idx == 0 && lane_predicate) {
188
+ shared_storage.pipelines.barrier_KV.init(1 /*numThreads*/);
189
+ }
190
+ // We're counting on pipeline_q to call cutlass::arch::fence_barrier_init();
191
+ MainloopPipeline pipeline_q(shared_storage.pipelines.pipeline_q, pipeline_params, ClusterShape{});
192
+ auto role_dO = warp_group_idx == 0
193
+ ? MainloopPipeline_dO::ThreadCategory::Producer
194
+ : MainloopPipeline_dO::ThreadCategory::Consumer;
195
+ PipelineParams_dO pipeline_params_dO {pipeline_params.transaction_bytes, role_dO, pipeline_params.is_leader, pipeline_params.num_consumers};
196
+ MainloopPipeline_dO pipeline_do(shared_storage.pipelines.pipeline_do, cute::conditional_return<Q_dO_same_stages>(pipeline_params, pipeline_params_dO), ClusterShape{});
197
+
198
+ CollectiveMainloop mainloop;
199
+ CollectiveEpilogue epilogue;
200
+
201
+ // We need this to guarantee that the Pipeline init is visible to all producers and consumer blocks in the Cluster
202
+ if constexpr (size(ClusterShape{}) > 1) {
203
+ cute::cluster_arrive_relaxed();
204
+ cute::cluster_wait();
205
+ } else {
206
+ __syncthreads();
207
+ }
208
+
209
+ TileScheduler scheduler(reinterpret_cast<typename TileScheduler::SharedStorage*>(&shared_storage.pipelines.smem_scheduler));
210
+
211
+ if (warp_group_idx == 0) { // Producer
212
+ cutlass::arch::warpgroup_reg_dealloc<LoadRegisterRequirement>();
213
+
214
+ int warp_idx_in_warpgroup = __shfl_sync(0xffffffff, (threadIdx.x / 32) % 4, 0);
215
+ if (warp_idx_in_warpgroup == 0) { // Load K, V, and do TMA on Q and dO
216
+ PipelineState smem_pipe_write = cutlass::make_producer_start_state<MainloopPipeline>();
217
+ PipelineState_dO smem_pipe_write_do = cutlass::make_producer_start_state<MainloopPipeline_dO>();
218
+ for (auto work_tile_info = scheduler.template get_initial_work</*IsProducerWarp=*/true>(params.scheduler);
219
+ work_tile_info.is_valid(params.scheduler);
220
+ work_tile_info = scheduler.template get_next_work</*IsProducerWarp=*/true>(params.scheduler, work_tile_info)) {
221
+ auto block_coord_ = work_tile_info.get_block_coord(params.scheduler);
222
+ auto [n_block, bidh, bidb, _ /*split_idx*/] = block_coord_;
223
+ cute::tuple<int32_t, int32_t, int32_t> block_coord = {n_block, bidh, bidb};
224
+ auto scheduler_prefetch = [&scheduler, &params, &work_tile_info]() {
225
+ scheduler.prefetch_next_work(params.scheduler, work_tile_info);
226
+ };
227
+ mainloop.load(params.mainloop, pipeline_q, pipeline_do, smem_pipe_write,
228
+ smem_pipe_write_do, shared_storage, scheduler_prefetch, block_coord);
229
+ }
230
+ mainloop.load_tail(pipeline_q, pipeline_do, smem_pipe_write, smem_pipe_write_do);
231
+ } else if (warp_idx_in_warpgroup == 1) {
232
+ for (auto work_tile_info = scheduler.template get_initial_work</*IsProducerWarp=*/false>(params.scheduler);
233
+ work_tile_info.is_valid(params.scheduler);
234
+ work_tile_info = scheduler.template get_next_work</*IsProducerWarp=*/false>(params.scheduler, work_tile_info)) {
235
+ auto block_coord_ = work_tile_info.get_block_coord(params.scheduler);
236
+ auto [n_block, bidh, bidb, _ /*split_idx*/] = block_coord_;
237
+ cute::tuple<int32_t, int32_t, int32_t> block_coord = {n_block, bidh, bidb};
238
+ mainloop.store_dq(params.mainloop, shared_storage, block_coord);
239
+ }
240
+ }
241
+ } else { // Consumer
242
+ cutlass::arch::warpgroup_reg_alloc<MmaRegisterRequirement>();
243
+ // Initialize matmul objects.
244
+ TiledMmadKV tiled_mma_dKV;
245
+
246
+ PipelineState smem_pipe_read;
247
+ PipelineState_dO smem_pipe_read_do;
248
+
249
+ mainloop.mma_init();
250
+ scheduler.init_consumer();
251
+
252
+ int work_idx = 0;
253
+ CUTLASS_PRAGMA_NO_UNROLL
254
+ for (auto work_tile_info = scheduler.template get_initial_work</*IsProducerWarp=*/false>(params.scheduler);
255
+ work_tile_info.is_valid(params.scheduler);
256
+ work_tile_info = scheduler.template get_next_work</*IsProducerWarp=*/false>(params.scheduler, work_tile_info)) {
257
+ auto block_coord_ = work_tile_info.get_block_coord(params.scheduler);
258
+ auto [n_block, bidh, bidb, _ /*split_idx*/] = block_coord_;
259
+ cute::tuple<int32_t, int32_t, int32_t> block_coord = {n_block, bidh, bidb};
260
+
261
+ // dK and dV output accumulator.
262
+ Tensor tdKrdK = partition_fragment_C(tiled_mma_dKV, select<!dKV_swapAB ? 1 : 2, !dKV_swapAB? 2 : 1>(TileShape_MNK{}));
263
+ Tensor tdVrdV = partition_fragment_C(tiled_mma_dKV, select<!dKV_swapAB ? 1 : 2, !dKV_swapAB? 2 : 1>(TileShape_MNK{}));
264
+ bool tile_valid = mainloop.mma(
265
+ params.mainloop, pipeline_q, pipeline_do, smem_pipe_read, smem_pipe_read_do,
266
+ tdKrdK, tdVrdV, threadIdx.x - NumCopyThreads, work_idx, block_coord, shared_storage);
267
+ if (tile_valid) {
268
+ epilogue.store(params.epilogue, tdKrdK, tdVrdV, shared_storage, tiled_mma_dKV,
269
+ threadIdx.x - NumCopyThreads, block_coord);
270
+ } else {
271
+ epilogue.store_zero(params.epilogue, threadIdx.x - NumCopyThreads, block_coord);
272
+ }
273
+
274
+ }
275
+ epilogue.store_tail();
276
+ }
277
+
278
+ }
279
+
280
+ };
281
+
282
+ } // namespace flash
Code/Baselines/flash-attention/hopper/flash_bwd_launch_template.h ADDED
@@ -0,0 +1,390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2024, Jay Shah, Ganesh Bikshandi, Ying Zhang, Vijay Thakkar, Pradeep Ramani, Tri Dao.
3
+ ******************************************************************************/
4
+
5
+ #pragma once
6
+
7
+ #include "cute/tensor.hpp"
8
+
9
+ #include "cutlass/device_kernel.h" // For device_kernel
10
+ #include "cutlass/kernel_launch.h" // For kernel_launch
11
+ #include "cutlass/cluster_launch.hpp" // For ClusterLauncher
12
+
13
+ #include "static_switch.h"
14
+ #include "flash.h"
15
+ #include "flash_bwd_preprocess_kernel.h"
16
+ #include "flash_bwd_postprocess_kernel.h"
17
+ #include "tile_scheduler.hpp"
18
+ #include "mainloop_bwd_sm90_tma_gmma_ws.hpp"
19
+ #include "mainloop_bwd_sm80.hpp"
20
+ #include "epilogue_bwd.hpp"
21
+ #include "flash_bwd_kernel_sm90.h"
22
+ #include "flash_bwd_kernel_sm80.h"
23
+
24
+ using namespace cute;
25
+
26
+ template <int Arch, int kHeadDim, int kBlockM, int kBlockN, typename Element,
27
+ bool Is_causal, bool Is_local, bool Has_softcap, bool Varlen, bool Deterministic, bool GQA,
28
+ int Stages_dO=2, int Stages_dS_or_QSm80=2,
29
+ bool SdP_swapAB=true, bool dKV_swapAB=false, bool dQ_swapAB=false,
30
+ int NumMmaWarpGroups=2, int AtomLayoutMSdP=1, int AtomLayoutNdKV=2, int AtomLayoutMdQ=1,
31
+ bool V_in_regs=false>
32
+ void run_flash_bwd(Flash_bwd_params &params, cudaStream_t stream) {
33
+ static_assert(!(Is_causal && Is_local), "Is_causal and Is_local cannot be true at the same time.");
34
+ using ElementAccum = float;
35
+ using ArchTag = std::conditional_t<Arch >= 90, cutlass::arch::Sm90, cutlass::arch::Sm80>;
36
+
37
+ int const total_q_padded_rounded = cute::round_up(params.total_q + params.b * kBlockM, kBlockM);
38
+ int const total_k_padded_rounded = cute::round_up(params.total_k + params.b * kBlockN, kBlockN);
39
+ bool const is_varlen_q = params.cu_seqlens_q;
40
+ bool const is_varlen_k = params.cu_seqlens_k;
41
+ int seqlen_q = !is_varlen_q ? params.seqlen_q : params.total_q;
42
+ int seqlen_k = !is_varlen_k ? params.seqlen_k : params.total_k;
43
+ int seqlen_q_rounded = !is_varlen_q ? params.seqlen_q_rounded : total_q_padded_rounded;
44
+ int seqlen_k_rounded = !is_varlen_k ? params.seqlen_k_rounded : total_k_padded_rounded;
45
+ int batch_q = !is_varlen_q ? params.b : 1;
46
+ int batch_k = !is_varlen_k ? params.b : 1;
47
+
48
+ using TileShape_MK = cute::Shape<Int<kBlockM>, Int<kHeadDim>>;
49
+ using PreprocessKernel = flash::FlashAttnBwdPreprocess<TileShape_MK, Element, ElementAccum, ArchTag, /*Clear_dQaccum=*/true, Varlen>;
50
+ typename PreprocessKernel::Arguments preprocess_args {
51
+ static_cast<Element const*>(params.o_ptr),
52
+ {seqlen_q, params.dv, params.h, batch_q}, // shape_O
53
+ {params.o_row_stride, _1{}, params.o_head_stride, !is_varlen_q ? params.o_batch_stride : 0}, // stride_O
54
+ static_cast<Element const*>(params.do_ptr),
55
+ {params.do_row_stride, _1{}, params.do_head_stride, !is_varlen_q ? params.do_batch_stride : 0}, // stride_dO
56
+ static_cast<float*>(params.dsoftmax_sum),
57
+ {seqlen_q_rounded, params.h, batch_q}, // shape_dPsum
58
+ {_1{}, seqlen_q_rounded, !is_varlen_q ? params.h * params.seqlen_q_rounded : 0}, // stride_dPsum
59
+ static_cast<float*>(params.softmax_lse_ptr),
60
+ {_1{}, seqlen_q, !is_varlen_q ? params.h * params.seqlen_q : 0}, // stride_LSE
61
+ static_cast<float*>(params.softmax_lse_log2_ptr),
62
+ {_1{}, seqlen_q_rounded, !is_varlen_q ? params.h * params.seqlen_q_rounded : 0}, // stride_LSE_log2
63
+ static_cast<ElementAccum*>(params.dq_accum_ptr),
64
+ {seqlen_q_rounded * params.d_rounded, params.h, batch_q}, // shape_dQaccum
65
+ {_1{}, seqlen_q_rounded * params.d_rounded, !is_varlen_q ? params.d_rounded * seqlen_q_rounded * params.h : 0}, // stride_dQaccum
66
+ params.b,
67
+ params.dq_semaphore,
68
+ params.cu_seqlens_q,
69
+ params.seqused_q
70
+ };
71
+ typename PreprocessKernel::Params preprocess_params = PreprocessKernel::to_underlying_arguments(preprocess_args);
72
+ int num_m_block = cute::ceil_div(params.seqlen_q, kBlockM);
73
+ dim3 grid_m(num_m_block, params.h, params.b);
74
+ cutlass::kernel_launch<PreprocessKernel>(grid_m, PreprocessKernel::MaxThreadsPerBlock, PreprocessKernel::SharedStorageSize, stream, preprocess_params, false /*launch_with_pdl*/);
75
+ CHECK_CUDA_KERNEL_LAUNCH();
76
+
77
+ using TileShape_MNK = cute::Shape<Int<kBlockM>, Int<kBlockN>, Int<kHeadDim>>;
78
+ using ClusterShape = cute::Shape<_1, Int<1>, _1>; // Currently doesn't not support cluster
79
+ // Stages_dS_or_QSm80 is Stages_dS if Sm90 and Stages if Sm80
80
+ static constexpr int Stages = Arch >= 90 ? 2 : Stages_dS_or_QSm80;
81
+ static constexpr int Stages_dS = Arch >= 90 ? Stages_dS_or_QSm80 : 1;
82
+ using CollectiveMainloop = std::conditional_t<
83
+ Arch >= 90,
84
+ flash::CollectiveMainloopBwdSm90<Stages, Stages_dO, Stages_dS, ClusterShape, TileShape_MNK, Element, ElementAccum, cutlass::arch::Sm90,
85
+ Is_causal, Is_local, Has_softcap, Varlen, Deterministic,
86
+ SdP_swapAB, dKV_swapAB, dQ_swapAB, NumMmaWarpGroups, AtomLayoutMSdP, AtomLayoutNdKV, AtomLayoutMdQ, V_in_regs>,
87
+ flash::CollectiveMainloopBwdSm80<Stages, Stages_dO, TileShape_MNK, Element, ElementAccum, cutlass::arch::Sm80,
88
+ Is_causal, Is_local, Has_softcap, Varlen, Deterministic,
89
+ SdP_swapAB, dKV_swapAB, dQ_swapAB, NumMmaWarpGroups, AtomLayoutMSdP, AtomLayoutNdKV, AtomLayoutMdQ, V_in_regs>
90
+ >;
91
+ using CollectiveEpilogue = std::conditional_t<
92
+ !GQA,
93
+ flash::CollectiveEpilogueBwd<TileShape_MNK, Element, ArchTag, CollectiveMainloop::NumMmaThreads, Varlen, dKV_swapAB, NumMmaWarpGroups * (Arch >= 90 ? 1 : cutlass::NumWarpsPerWarpGroup) / AtomLayoutNdKV>,
94
+ flash::CollectiveEpilogueBwdGQA<TileShape_MNK, ElementAccum, ArchTag, CollectiveMainloop::NumMmaThreads, Varlen, Deterministic>
95
+ >;
96
+ using Scheduler = std::conditional_t<
97
+ Is_causal && !Varlen,
98
+ flash::SingleTileBwdLPTScheduler,
99
+ flash::SingleTileScheduler<Varlen, false /*Split*/, false /*PackGQA*/, kBlockN>
100
+ >;
101
+ using AttnKernel = std::conditional_t<
102
+ Arch >= 90,
103
+ flash::enable_sm90_or_later<flash::FlashAttnBwdSm90<CollectiveMainloop, CollectiveEpilogue, Scheduler>>,
104
+ flash::enable_sm80_to_sm89<flash::FlashAttnBwdSm80<CollectiveMainloop, CollectiveEpilogue, Scheduler>>
105
+ >;
106
+
107
+ typename CollectiveMainloop::Arguments mainloop_args {
108
+ static_cast<Element const*>(params.q_ptr),
109
+ {seqlen_q, params.d, params.h, batch_q}, // shape_Q
110
+ {params.q_row_stride, _1{}, params.q_head_stride, !is_varlen_q ? params.q_batch_stride : 0}, // stride_Q
111
+ static_cast<Element const*>(params.k_ptr),
112
+ {seqlen_k, params.d, params.h_k, batch_k}, // shape_K
113
+ {params.k_row_stride, _1{}, params.k_head_stride, !is_varlen_k ? params.k_batch_stride : 0}, // stride_K
114
+ static_cast<Element const*>(params.v_ptr),
115
+ {seqlen_k, params.dv, params.h_k, batch_k}, // shape_V
116
+ {params.v_row_stride, _1{}, params.v_head_stride, !is_varlen_k ? params.v_batch_stride : 0}, // stride_V
117
+ static_cast<Element const*>(params.do_ptr),
118
+ {seqlen_q, params.dv, params.h, batch_q}, // shape_dO
119
+ {params.do_row_stride, _1{}, params.do_head_stride, !is_varlen_q ? params.do_batch_stride : 0}, // stride_dO
120
+ static_cast<ElementAccum*>(params.dq_accum_ptr),
121
+ {seqlen_q_rounded * params.d_rounded, params.h, batch_q}, // shape_dQaccum
122
+ {_1{}, seqlen_q_rounded * params.d_rounded, !is_varlen_q ? params.d_rounded * params.seqlen_q_rounded * params.h : 0}, // stride_dQaccum
123
+ static_cast<float*>(params.softmax_lse_log2_ptr),
124
+ {seqlen_q_rounded, params.h, batch_q}, // shape_LSE
125
+ {_1{}, seqlen_q_rounded, !is_varlen_q ? params.h * params.seqlen_q_rounded : 0}, // stride_LSE_log2
126
+ static_cast<float*>(params.dsoftmax_sum),
127
+ {_1{}, seqlen_q_rounded, !is_varlen_q ? params.h * params.seqlen_q_rounded : 0}, // stride_dPsum
128
+ params.scale_softmax,
129
+ params.window_size_left, params.window_size_right, 0 /*attention_chunk*/,
130
+ params.softcap,
131
+ params.b,
132
+ params.dq_semaphore,
133
+ params.cu_seqlens_q, params.cu_seqlens_k,
134
+ params.seqused_q, params.seqused_k
135
+ };
136
+ // The case work with GQA is ugly but idk how to fix it.
137
+ typename CollectiveEpilogue::Arguments epilogue_args {
138
+ static_cast<typename CollectiveEpilogue::Element*>(!GQA ? params.dk_ptr : params.dk_accum_ptr),
139
+ [&] {
140
+ if constexpr (!GQA) {
141
+ return typename CollectiveEpilogue::ShapedKV {seqlen_k, params.d, params.h, batch_k}; // shape_dK
142
+ } else {
143
+ return typename CollectiveEpilogue::ShapedKV {seqlen_k_rounded * params.d_rounded, params.h_k, batch_k}; // shape_dKaccum
144
+ }
145
+ }(),
146
+ [&] {
147
+ if constexpr (!GQA) {
148
+ return typename CollectiveEpilogue::StridedKV {params.dk_row_stride, _1{}, params.dk_head_stride, !is_varlen_k ? params.dk_batch_stride : 0}; // stride_dK
149
+ } else {
150
+ return typename CollectiveEpilogue::StridedKV {_1{}, params.d_rounded * seqlen_k_rounded, !is_varlen_k ? params.h_k * params.d_rounded * params.seqlen_k_rounded : 0}; // stride_dKaccum
151
+ }
152
+ }(),
153
+ static_cast<typename CollectiveEpilogue::Element*>(!GQA ? params.dv_ptr : params.dv_accum_ptr),
154
+ [&] {
155
+ if constexpr (!GQA) {
156
+ return typename CollectiveEpilogue::ShapedKV {seqlen_k, params.dv, params.h, batch_k}; // shape_dV
157
+ } else {
158
+ return typename CollectiveEpilogue::ShapedKV {seqlen_k_rounded * params.dv_rounded, params.h_k, batch_k}; // shape_dVaccum
159
+ }
160
+ }(),
161
+ [&] {
162
+ if constexpr (!GQA) {
163
+ return typename CollectiveEpilogue::StridedKV {params.dv_row_stride, _1{}, params.dv_head_stride, !is_varlen_k ? params.dv_batch_stride : 0}; // stride_dV
164
+ } else {
165
+ return typename CollectiveEpilogue::StridedKV {_1{}, params.dv_rounded * seqlen_k_rounded, !is_varlen_k ? params.h_k * params.dv_rounded * params.seqlen_k_rounded : 0}; // stride_dVaccum
166
+ }
167
+ }(),
168
+ params.h,
169
+ params.dk_semaphore,
170
+ params.dv_semaphore,
171
+ params.cu_seqlens_k,
172
+ params.seqused_k,
173
+ };
174
+
175
+ int num_blocks_n = cutlass::ceil_div(params.seqlen_k, get<1>(TileShape_MNK{}));
176
+ num_blocks_n = cutlass::round_up(num_blocks_n, size<1>(ClusterShape{}));
177
+ typename flash::TileSchedulerArguments scheduler_args {
178
+ num_blocks_n, params.h, params.b, 1 /*num_splits*/,
179
+ params.h / params.h_k,
180
+ params.seqlen_k,
181
+ params.seqlen_q, params.d, params.dv, sizeof(Element),
182
+ params.tile_count_semaphore, params.cu_seqlens_k, params.seqused_k
183
+ };
184
+
185
+ int device;
186
+ cudaGetDevice(&device);
187
+ typename AttnKernel::Params kernel_params = AttnKernel::to_underlying_arguments({
188
+ mainloop_args, epilogue_args, {device, params.num_sm}, scheduler_args
189
+ });
190
+
191
+ dim3 grid_dims = AttnKernel::get_grid_shape(kernel_params);
192
+ dim3 block_dims = AttnKernel::get_block_shape();
193
+ int smem_size = AttnKernel::SharedStorageSize;
194
+ // int smem_size_q = sizeof(decltype((typename CollectiveMainloop::TensorStorage{}).smem_q));
195
+ // int smem_size_do = sizeof(decltype((typename CollectiveMainloop::TensorStorage{}).smem_do));
196
+ // int smem_size_ds = sizeof(decltype((typename CollectiveMainloop::TensorStorage{}).smem_ds));
197
+ // int smem_size_dqacc = [&] {
198
+ // if constexpr (Arch >= 90) {
199
+ // return sizeof(decltype((typename CollectiveMainloop::TensorStorage{}).smem_dqacc));
200
+ // } else {
201
+ // return 0;
202
+ // }
203
+ // }();
204
+ // int smem_size_k = sizeof(decltype((typename CollectiveMainloop::TensorStorage{}).smem_k));
205
+ // int smem_size_v = sizeof(decltype((typename CollectiveMainloop::TensorStorage{}).smem_v));
206
+ // int smem_size_lse = sizeof(decltype((typename CollectiveMainloop::TensorStorage{}).smem_lse));
207
+ // int smem_size_dpsum = sizeof(decltype((typename CollectiveMainloop::TensorStorage{}).smem_dpsum));
208
+ // printf("smem_size = %d, q = %d, k = %d, v = %d, do = %d, ds = %d, dqacc = %d, lse = %d, dpsum = %d\n", smem_size, smem_size_q, smem_size_k, smem_size_v, smem_size_do, smem_size_ds, smem_size_dqacc, smem_size_lse, smem_size_dpsum);
209
+ if constexpr (size(ClusterShape{}) > 1) {
210
+ void const* kernel = (void const*) cutlass::device_kernel<AttnKernel>;
211
+ if (smem_size >= 48 * 1024) {
212
+ CHECK_CUDA(cudaFuncSetAttribute(kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size));
213
+ }
214
+ dim3 cluster_dims(size<0>(ClusterShape{}), size<1>(ClusterShape{}), size<2>(ClusterShape{}));
215
+ cutlass::ClusterLauncher::launch(
216
+ grid_dims, cluster_dims, block_dims, smem_size, stream, kernel, kernel_params, false /*launch_with_pdl*/);
217
+ } else {
218
+ if (smem_size >= 48 * 1024) {
219
+ CHECK_CUDA(cudaFuncSetAttribute(cutlass::device_kernel<AttnKernel>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size));
220
+ }
221
+ cutlass::kernel_launch<AttnKernel>(grid_dims, block_dims, smem_size, stream, kernel_params, false /*launch_with_pdl*/);
222
+ }
223
+ CHECK_CUDA_KERNEL_LAUNCH();
224
+
225
+ using PostprocessKernel = flash::FlashAttnBwdPostprocessConvertdQ<TileShape_MK, Element, ElementAccum, ArchTag,
226
+ AttnKernel::CollectiveMainloop::NumMmaThreads,
227
+ typename AttnKernel::CollectiveMainloop::TiledMmadQ,
228
+ AttnKernel::CollectiveMainloop::dQ_swapAB
229
+ >;
230
+ typename PostprocessKernel::Arguments postprocess_args {
231
+ static_cast<ElementAccum const*>(params.dq_accum_ptr),
232
+ {seqlen_q_rounded * params.d_rounded, params.h, batch_q}, // shape_dQaccum
233
+ {_1{}, seqlen_q_rounded * params.d_rounded, !is_varlen_q ? params.d_rounded * params.seqlen_q_rounded * params.h : 0}, // stride_dQaccum
234
+ static_cast<Element*>(params.dq_ptr),
235
+ {seqlen_q, params.d, params.h, batch_q}, // shape_dQ
236
+ {params.dq_row_stride, _1{}, params.dq_head_stride, params.dq_batch_stride}, // stride_dQ
237
+ params.scale_softmax,
238
+ params.cu_seqlens_q,
239
+ params.seqused_q
240
+ };
241
+ typename PostprocessKernel::Params postprocess_params = PostprocessKernel::to_underlying_arguments(postprocess_args);
242
+ int num_m_block_postprocess = cute::ceil_div(params.seqlen_q, get<0>(TileShape_MK{}));
243
+ dim3 grid_m_postprocess(num_m_block_postprocess, params.h, params.b);
244
+ int smem_size_postprocess = PostprocessKernel::SharedStorageSize;
245
+ if (smem_size_postprocess >= 48 * 1024) {
246
+ CHECK_CUDA(cudaFuncSetAttribute(cutlass::device_kernel<PostprocessKernel>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size_postprocess));
247
+ }
248
+ cutlass::kernel_launch<PostprocessKernel>(grid_m_postprocess, PostprocessKernel::MaxThreadsPerBlock, smem_size_postprocess, stream, postprocess_params, false /*launch_with_pdl*/);
249
+ CHECK_CUDA_KERNEL_LAUNCH();
250
+
251
+ if constexpr (GQA) {
252
+ using TileShape_NK = cute::Shape<Int<kBlockN>, Int<kHeadDim>>;
253
+ using PostprocessKerneldKV = flash::FlashAttnBwdPostprocessConvertdQ<TileShape_NK, Element, ElementAccum, ArchTag,
254
+ AttnKernel::CollectiveEpilogue::NumEpilogueThreads,
255
+ typename AttnKernel::CollectiveMainloop::TiledMmadKV,
256
+ AttnKernel::CollectiveMainloop::dKV_swapAB
257
+ >;
258
+ typename PostprocessKerneldKV::Arguments postprocess_dK_args {
259
+ static_cast<ElementAccum const*>(params.dk_accum_ptr),
260
+ {seqlen_k_rounded * params.d_rounded, params.h_k, batch_k}, // shape_dKaccum
261
+ {_1{}, seqlen_k_rounded * params.d_rounded, !is_varlen_k ? params.d_rounded * params.seqlen_k_rounded * params.h_k : 0}, // stride_dKaccum
262
+ static_cast<Element*>(params.dk_ptr),
263
+ {seqlen_k, params.d, params.h_k, batch_k}, // shape_dK
264
+ {params.dk_row_stride, _1{}, params.dk_head_stride, params.dk_batch_stride}, // stride_dK
265
+ 1.f,
266
+ params.cu_seqlens_k,
267
+ params.seqused_k
268
+ };
269
+ typename PostprocessKerneldKV::Params postprocess_dK_params = PostprocessKerneldKV::to_underlying_arguments(postprocess_dK_args);
270
+ typename PostprocessKerneldKV::Arguments postprocess_dV_args {
271
+ static_cast<ElementAccum const*>(params.dv_accum_ptr),
272
+ {seqlen_k_rounded * params.dv_rounded, params.h_k, batch_k}, // shape_dVaccum
273
+ {_1{}, seqlen_k_rounded * params.dv_rounded, !is_varlen_k ? params.dv_rounded * params.seqlen_k_rounded * params.h_k : 0}, // stride_dVaccum
274
+ static_cast<Element*>(params.dv_ptr),
275
+ {seqlen_k, params.dv, params.h_k, batch_k}, // shape_dV
276
+ {params.dv_row_stride, _1{}, params.dv_head_stride, params.dv_batch_stride}, // stride_dV
277
+ 1.f,
278
+ params.cu_seqlens_k,
279
+ params.seqused_k
280
+ };
281
+ typename PostprocessKerneldKV::Params postprocess_dV_params = PostprocessKerneldKV::to_underlying_arguments(postprocess_dV_args);
282
+ int num_n_block_postprocess = cute::ceil_div(params.seqlen_k, get<0>(TileShape_NK{}));
283
+ dim3 grid_n_postprocess(num_n_block_postprocess, params.h_k, params.b);
284
+ int smem_size_postprocess = PostprocessKerneldKV::SharedStorageSize;
285
+ if (smem_size_postprocess >= 48 * 1024) {
286
+ CHECK_CUDA(cudaFuncSetAttribute(cutlass::device_kernel<PostprocessKerneldKV>, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size_postprocess));
287
+ }
288
+ cutlass::kernel_launch<PostprocessKerneldKV>(grid_n_postprocess, PostprocessKerneldKV::MaxThreadsPerBlock, smem_size_postprocess, stream, postprocess_dK_params, false /*launch_with_pdl*/);
289
+ CHECK_CUDA_KERNEL_LAUNCH();
290
+ cutlass::kernel_launch<PostprocessKerneldKV>(grid_n_postprocess, PostprocessKerneldKV::MaxThreadsPerBlock, smem_size_postprocess, stream, postprocess_dV_params, false /*launch_with_pdl*/);
291
+ CHECK_CUDA_KERNEL_LAUNCH();
292
+ }
293
+
294
+ }
295
+
296
+ template<int Arch, typename T, int kBlockM, int kBlockN, int kHeadDim, bool Is_causal, bool Is_local, bool Has_softcap,
297
+ int Stages_dO=2, int Stages_dS_or_QSm80=2,
298
+ bool SdP_swapAB=true, bool dKV_swapAB=false, bool dQ_swapAB=false,
299
+ int NumMmaWarpGroups=2, int AtomLayoutMSdP=1, int AtomLayoutNdKV=2, int AtomLayoutMdQ=1,
300
+ bool V_in_regs=false>
301
+ void run_mha_bwd_dispatch(Flash_bwd_params &params, cudaStream_t stream) {
302
+ VARLEN_SWITCH(params.cu_seqlens_q != nullptr || params.cu_seqlens_k != nullptr, Varlen, [&] {
303
+ BOOL_SWITCH(params.h != params.h_k, GQA, [&] {
304
+ // BOOL_SWITCH(params.deterministic, Deterministic, [&] {
305
+ // run_flash_bwd<kHeadDim, kBlockM, kBlockN, T, Is_causal, Is_local, Has_softcap, Varlen, false, GQA, Stages_dO, Stages_dS_or_QSm80, SdP_swapAB, dKV_swapAB, dQ_swapAB, NumMmaWarpGroups, AtomLayoutMSdP, AtomLayoutNdKV, AtomLayoutMdQ>(params, stream);
306
+ run_flash_bwd<Arch, kHeadDim, kBlockM, kBlockN, T, Is_causal, Is_local, Has_softcap, Varlen /*Varlen*/, false /*Deterministic*/, GQA, Stages_dO, Stages_dS_or_QSm80, SdP_swapAB, dKV_swapAB, dQ_swapAB, NumMmaWarpGroups, AtomLayoutMSdP, AtomLayoutNdKV, AtomLayoutMdQ, V_in_regs>(params, stream);
307
+ // });
308
+ });
309
+ });
310
+ }
311
+
312
+
313
+ template<int Arch, typename T, bool Has_softcap>
314
+ void run_mha_bwd_hdim64(Flash_bwd_params &params, cudaStream_t stream) {
315
+ CAUSAL_LOCAL_SWITCH(params.is_causal, params.is_local, Is_causal, Is_local, [&] {
316
+ if constexpr (Arch >= 90) {
317
+ if constexpr (Is_causal && Has_softcap) {
318
+ // register spill with 128 x 128
319
+ run_mha_bwd_dispatch<Arch, T, 96, 128, 64, Is_causal, Is_local, Has_softcap, 2, 2, true, false, true, 2, 1, 2, 2, false>(params, stream);
320
+ } else {
321
+ // With ShuffleStats we no longer have register spilling when Has_softcap and using 128 x 128 block.
322
+ run_mha_bwd_dispatch<Arch, T, 128, 128, 64, Is_causal, Is_local, Has_softcap, 2, 2, true, false, false, 2, 1, 2, 2, false>(params, stream);
323
+ }
324
+ } else if constexpr (Arch == 86 || Arch == 89) {
325
+ run_mha_bwd_dispatch<Arch, T, 64, 128, 64, Is_causal, Is_local, Has_softcap, 2, 2, false, false, false, 2, 2, 4, 2, true>(params, stream);
326
+ // run_mha_bwd_dispatch<Arch, T, 96, 96, 64, Is_causal, Is_local, Has_softcap, 1, 2, false, true, true, 2, 2, 4, 4, false>(params, stream);
327
+ // run_mha_bwd_dispatch<Arch, T, 80, 128, 64, Is_causal, Is_local, Has_softcap, 1, 2, true, false, true, 2, 2, 4, 2, true>(params, stream);
328
+ // run_mha_bwd_dispatch<Arch, T, 96, 128, 64, Is_causal, Is_local, Has_softcap, 1, 2, true, false, true, 2, 1, 8, 4, false>(params, stream);
329
+ } else {
330
+ run_mha_bwd_dispatch<Arch, T, 128, 128, 64, Is_causal, Is_local, Has_softcap, 2, 2, false, false, false, 2, 4, 4, 4, false>(params, stream);
331
+ }
332
+ });
333
+ }
334
+
335
+ template<int Arch, typename T, bool Has_softcap>
336
+ void run_mha_bwd_hdim96(Flash_bwd_params &params, cudaStream_t stream) {
337
+ CAUSAL_LOCAL_SWITCH(params.is_causal, params.is_local, Is_causal, Is_local, [&] {
338
+ if constexpr (Arch >= 90) {
339
+ run_mha_bwd_dispatch<Arch, T, 64, 128, 96, Is_causal, Is_local, Has_softcap, 2, 2, true, false, false, 2, 1, 2, 1, true>(params, stream);
340
+ } else if constexpr (Arch == 86 || Arch == 89) {
341
+ run_mha_bwd_dispatch<Arch, T, 64, 128, 96, Is_causal, Is_local, Has_softcap, 1, 2, false, false, false, 2, 2, 4, 2, true>(params, stream);
342
+ } else {
343
+ run_mha_bwd_dispatch<Arch, T, 64, 128, 96, Is_causal, Is_local, Has_softcap, 2, 2, false, false, false, 2, 2, 4, 2, false>(params, stream);
344
+ }
345
+ });
346
+ }
347
+
348
+ template<int Arch, typename T, bool Has_softcap>
349
+ void run_mha_bwd_hdim128(Flash_bwd_params &params, cudaStream_t stream) {
350
+ CAUSAL_LOCAL_SWITCH(params.is_causal, params.is_local, Is_causal, Is_local, [&] {
351
+ if constexpr (Arch >= 90) {
352
+ if constexpr (Is_causal || Is_local || Has_softcap) {
353
+ run_mha_bwd_dispatch<Arch, T, 64, 128, 128, Is_causal, Is_local, Has_softcap, 2, 2, true, false, false, 2, 1, 2, 1, false>(params, stream);
354
+ } else {
355
+ run_mha_bwd_dispatch<Arch, T, 80, 128, 128, Is_causal, Is_local, Has_softcap, 2, 2, true, false, true, 2, 1, 2, 1, false>(params, stream);
356
+ }
357
+ } else if constexpr (Arch == 86 || Arch == 89) {
358
+ run_mha_bwd_dispatch<Arch, T, 64, 96, 128, Is_causal, Is_local, Has_softcap, 1, 2, false, false, false, 2, 2, 2, 2, true>(params, stream);
359
+ } else {
360
+ run_mha_bwd_dispatch<Arch, T, 64, 128, 128, Is_causal, Is_local, Has_softcap, 2, 2, false, false, false, 2, 2, 2, 2, false>(params, stream);
361
+ }
362
+ });
363
+ }
364
+
365
+ template<int Arch, typename T, bool Has_softcap>
366
+ void run_mha_bwd_hdim192(Flash_bwd_params &params, cudaStream_t stream) {
367
+ CAUSAL_LOCAL_SWITCH(params.is_causal, params.is_local, Is_causal, Is_local, [&] {
368
+ if constexpr (Arch >= 90) {
369
+ run_mha_bwd_dispatch<Arch, T, 64, 96, 192, Is_causal, Is_local, Has_softcap, 1, 1, false, true, false, 3, 1, 1, 1, false>(params, stream);
370
+ } else if constexpr (Arch == 86 || Arch == 89) {
371
+ run_mha_bwd_dispatch<Arch, T, 64, 64, 192, Is_causal, Is_local, Has_softcap, 1, 1, false, false, false, 2, 2, 2, 2, true>(params, stream);
372
+ } else {
373
+ run_mha_bwd_dispatch<Arch, T, 64, 80, 192, Is_causal, Is_local, Has_softcap, 1, 2, false, true, false, 2, 4, 2, 2, false>(params, stream);
374
+ }
375
+ });
376
+ }
377
+
378
+ template<int Arch, typename T, bool Has_softcap>
379
+ void run_mha_bwd_hdim256(Flash_bwd_params &params, cudaStream_t stream) {
380
+ CAUSAL_LOCAL_SWITCH(params.is_causal, params.is_local, Is_causal, Is_local, [&] {
381
+ if constexpr (Arch >= 90) {
382
+ run_mha_bwd_dispatch<Arch, T, 64, 80, 256, Is_causal, Is_local, Has_softcap, 1, 1, false, true, true, 2, 1, 1, 1, false>(params, stream);
383
+ } else if constexpr (Arch == 86 || Arch == 89) {
384
+ run_mha_bwd_dispatch<Arch, T, 32, 64, 256, Is_causal, Is_local, Has_softcap, 1, 1, false, false, false, 2, 2, 2, 1, true>(params, stream);
385
+ // run_mha_bwd_dispatch<Arch, T, 64, 32, 256, Is_causal, Is_local, Has_softcap, 1, 1, false, false, false, 2, 4, 1, 2, true>(params, stream);
386
+ } else {
387
+ run_mha_bwd_dispatch<Arch, T, 64, 64, 256, Is_causal, Is_local, Has_softcap, 1, 1, false, false, false, 2, 4, 2, 2, false>(params, stream);
388
+ }
389
+ });
390
+ }
Code/Baselines/flash-attention/hopper/flash_bwd_preprocess_kernel.h ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2024, Jay Shah, Ganesh Bikshandi, Ying Zhang, Vijay Thakkar, Pradeep Ramani, Tri Dao.
3
+ ******************************************************************************/
4
+
5
+ #pragma once
6
+
7
+ #include "cute/tensor.hpp"
8
+
9
+ #include <cutlass/cutlass.h>
10
+ #include <cutlass/array.h>
11
+ #include <cutlass/numeric_types.h>
12
+ #include <cutlass/numeric_conversion.h>
13
+
14
+ #include "seqlen.h"
15
+ #include "utils.h"
16
+
17
+ namespace flash {
18
+
19
+ using namespace cute;
20
+
21
+ template <class TileShape_MK_, class Element, class ElementAccum, class ArchTag_, bool Clear_dQaccum, bool Varlen>
22
+ class FlashAttnBwdPreprocess {
23
+
24
+ public:
25
+
26
+ // Type Aliases
27
+ using TileShape_MK = TileShape_MK_;
28
+ using ArchTag = ArchTag_;
29
+
30
+ static_assert(std::is_same_v<Element, cutlass::half_t> && ArchTag::kMinComputeCapability >= 75 ||
31
+ std::is_same_v<Element, cutlass::bfloat16_t> && ArchTag::kMinComputeCapability >= 80 ||
32
+ std::is_same_v<Element, cutlass::float_e4m3_t> && ArchTag::kMinComputeCapability >= 89);
33
+
34
+ static constexpr uint32_t MaxThreadsPerBlock = 256;
35
+ static constexpr uint32_t MinBlocksPerMultiprocessor = 2;
36
+ static constexpr int SharedStorageSize = 0;
37
+
38
+ static constexpr int kGmemElemsPerLoad = sizeof(cute::uint128_t) / sizeof(Element);
39
+ static_assert(get<1>(TileShape_MK{}) % kGmemElemsPerLoad == 0, "Headdim must be a multiple of kGmemElemsPerLoad");
40
+ static constexpr int kBlockM = get<0>(TileShape_MK{});
41
+ static constexpr int kHeadDim = get<1>(TileShape_MK{});
42
+ // We want kBlockKGmem to be a power of 2 so that when we do the summing,
43
+ // it's just between threads in the same warp
44
+ static constexpr int kBlockKGmem = kHeadDim % 128 == 0 ? 128 : (kHeadDim % 64 == 0 ? 64 : 32);
45
+ static constexpr int kGmemThreadsPerRow = kBlockKGmem / kGmemElemsPerLoad;
46
+ static_assert(MaxThreadsPerBlock % kGmemThreadsPerRow == 0, "MaxThreadsPerBlock must be a multiple of kGmemThreadsPerRow");
47
+ using GmemLayoutAtom = Layout<Shape <Int<MaxThreadsPerBlock / kGmemThreadsPerRow>, Int<kGmemThreadsPerRow>>,
48
+ Stride<Int<kGmemThreadsPerRow>, _1>>;
49
+ using GmemTiledCopy = decltype(
50
+ make_tiled_copy(Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<128>, Element>{},
51
+ GmemLayoutAtom{},
52
+ Layout<Shape<_1, Int<kGmemElemsPerLoad>>>{})); // Val layout, 8 or 16 vals per load
53
+
54
+ static constexpr int kGmemElemsPerLoadAccum = sizeof(cute::uint128_t) / sizeof(ElementAccum);
55
+ static_assert((kBlockM * kHeadDim / kGmemElemsPerLoadAccum) % MaxThreadsPerBlock == 0, "MaxThreadsPerBlock must divide kBlockM * kHeadDim / kGmemElemsPerLoadAccum");
56
+ using GmemLayoutAtomAccum = Layout<Shape<Int<MaxThreadsPerBlock>>>;
57
+ using GmemTiledCopyAccum = decltype(
58
+ make_tiled_copy(Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<128>, ElementAccum>{},
59
+ GmemLayoutAtomAccum{},
60
+ Layout<Shape<Int<kGmemElemsPerLoadAccum>>>{})); // Val layout, 4 vals per store
61
+
62
+ using ShapeO = cute::Shape<int32_t, int32_t, int32_t, int32_t>; // (seqlen_q, d, head, batch)
63
+ using StrideO = cute::Stride<int64_t, _1, int64_t, int64_t>;
64
+ using ShapedPsum = cute::Shape<int32_t, int32_t, int32_t>; // (seqlen_q, head, batch)
65
+ using StridedPsum = cute::Stride<_1, int64_t, int64_t>;
66
+ using ShapedQaccum = cute::Shape<int32_t, int32_t, int32_t>; // (seqlen_q * d, head, batch)
67
+ using StridedQaccum = cute::Stride<_1, int64_t, int64_t>;
68
+
69
+ // Device side arguments
70
+ struct Arguments {
71
+ Element const* ptr_O;
72
+ ShapeO const shape_O;
73
+ StrideO const stride_O;
74
+ Element const* ptr_dO;
75
+ StrideO const stride_dO;
76
+ float* ptr_dPsum;
77
+ ShapedPsum const shape_dPsum;
78
+ StridedPsum const stride_dPsum;
79
+ float const* ptr_LSE;
80
+ StridedPsum const stride_LSE;
81
+ float *ptr_LSE_log2;
82
+ StridedPsum const stride_LSE_log2;
83
+ ElementAccum* ptr_dQaccum;
84
+ ShapedQaccum const shape_dQaccum;
85
+ StridedQaccum const stride_dQaccum;
86
+ int num_batch; // We need this to know the size of dq_semaphore in case of varlen
87
+ int* dq_semaphore;
88
+ int const* cu_seqlens = nullptr;
89
+ int const* seqused = nullptr;
90
+ };
91
+
92
+ // Kernel entry point API
93
+ struct Params {
94
+ Element const* ptr_O;
95
+ ShapeO const shape_O;
96
+ StrideO const stride_O;
97
+ Element const* ptr_dO;
98
+ StrideO const stride_dO;
99
+ float* ptr_dPsum;
100
+ ShapedPsum const shape_dPsum;
101
+ StridedPsum const stride_dPsum;
102
+ float const* ptr_LSE;
103
+ StridedPsum const stride_LSE;
104
+ float* ptr_LSE_log2;
105
+ StridedPsum const stride_LSE_log2;
106
+ ElementAccum* ptr_dQaccum;
107
+ ShapedQaccum const shape_dQaccum;
108
+ StridedQaccum const stride_dQaccum;
109
+ int num_batch;
110
+ int* dq_semaphore;
111
+ int const* cu_seqlens = nullptr;
112
+ int const* seqused = nullptr;
113
+ };
114
+
115
+ // Convert to underlying arguments. In this case, a simple copy for the aliased type.
116
+ static
117
+ Params
118
+ to_underlying_arguments(Arguments const& args) {
119
+ return {
120
+ args.ptr_O,
121
+ args.shape_O,
122
+ args.stride_O,
123
+ args.ptr_dO,
124
+ args.stride_dO,
125
+ args.ptr_dPsum,
126
+ args.shape_dPsum,
127
+ args.stride_dPsum,
128
+ args.ptr_LSE,
129
+ args.stride_LSE,
130
+ args.ptr_LSE_log2,
131
+ args.stride_LSE_log2,
132
+ args.ptr_dQaccum,
133
+ args.shape_dQaccum,
134
+ args.stride_dQaccum,
135
+ args.num_batch,
136
+ args.dq_semaphore,
137
+ args.cu_seqlens,
138
+ args.seqused
139
+ };
140
+ }
141
+
142
+ CUTLASS_DEVICE
143
+ void
144
+ operator()(Params const& params, [[maybe_unused]] char* smem_buf) {
145
+
146
+ static constexpr int kBlockM = get<0>(TileShape_MK{});
147
+
148
+ int const thread_idx = threadIdx.x;
149
+ int const m_block = blockIdx.x;
150
+ int const bidh = blockIdx.y;
151
+ int const bidb = blockIdx.z;
152
+
153
+ flash::SeqlenInfo<Varlen, kBlockM> seqlen_info(bidb, size<0>(params.shape_O), params.cu_seqlens, params.seqused);
154
+ bool const is_varlen = Varlen && params.cu_seqlens;
155
+ int const seqlen_o = seqlen_info.seqlen;
156
+ if (is_varlen && m_block * kBlockM >= seqlen_o) { return; }
157
+
158
+ Tensor mO = make_tensor(make_gmem_ptr(params.ptr_O), params.shape_O, params.stride_O)(_, _, bidh, !is_varlen ? bidb : 0);
159
+ Tensor gO = local_tile(cute::domain_offset(make_coord(seqlen_info.offset, _0{}), mO), TileShape_MK{}, make_coord(m_block, _0{})); // (M, K)
160
+ Tensor mdO = make_tensor(make_gmem_ptr(params.ptr_dO), params.shape_O, params.stride_dO)(_, _, bidh, !is_varlen ? bidb : 0);
161
+ Tensor gdO = local_tile(cute::domain_offset(make_coord(seqlen_info.offset, _0{}), mdO), TileShape_MK{}, make_coord(m_block, _0{})); // (M, K)
162
+
163
+ auto shape_LSE = select<0, 2, 3>(params.shape_O);
164
+ Tensor mLSE = make_tensor(make_gmem_ptr(params.ptr_LSE), shape_LSE, params.stride_LSE)(_, bidh, !is_varlen ? bidb : 0);
165
+ Tensor gLSE = local_tile(cute::domain_offset(make_coord(seqlen_info.offset), mLSE), Shape<Int<kBlockM>>{}, make_coord(m_block));
166
+ static_assert(kBlockM <= MaxThreadsPerBlock);
167
+ float lse = thread_idx < seqlen_o - m_block * kBlockM && thread_idx < kBlockM ? gLSE(thread_idx) : INFINITY;
168
+
169
+ GmemTiledCopy gmem_tiled_copy_O;
170
+ auto gmem_thr_copy_O = gmem_tiled_copy_O.get_thread_slice(thread_idx);
171
+
172
+ Tensor tOgO = gmem_thr_copy_O.partition_S(gO);
173
+ Tensor tOgdO = gmem_thr_copy_O.partition_S(gdO);
174
+ // Construct identity layout for gO
175
+ Tensor cO = cute::make_identity_tensor(TileShape_MK{}); // (BLK_M,BLK_K) -> (blk_m,blk_k)
176
+ // Repeat the partitioning with identity layouts
177
+ Tensor tOcO = gmem_thr_copy_O.partition_D(cO);
178
+ Tensor tOpO = make_tensor<bool>(make_shape(size<2>(tOgO)));
179
+ #pragma unroll
180
+ for (int k = 0; k < size(tOpO); ++k) { tOpO(k) = get<1>(tOcO(_0{}, _0{}, k)) < get<1>(params.shape_O); }
181
+
182
+ // (8, kBlockM / 32, kHeadDim / 64) or (8, kBlockM / 16, kHeadDim / 128)
183
+ Tensor tOrO = make_fragment_like(tOgO);
184
+ Tensor tOrdO = make_fragment_like(tOgdO);
185
+ flash::copy</*Is_even_MN=*/false, /*Is_even_K=*/false, /*Clear_OOB_MN=*/true, /*Clearn_OOB_K=*/true>(
186
+ gmem_tiled_copy_O, tOgO, tOrO, tOcO, tOpO, seqlen_o - m_block * kBlockM
187
+ );
188
+ flash::copy</*Is_even_MN=*/false, /*Is_even_K=*/false, /*Clear_OOB_MN=*/true, /*Clearn_OOB_K=*/true>(
189
+ gmem_tiled_copy_O, tOgdO, tOrdO, tOcO, tOpO, seqlen_o - m_block * kBlockM
190
+ );
191
+ // if (threadIdx.x == 222) { printf("bidx = %d, bidy = %d, bidz = %d, seqlen_o = %d, m_block = %d, seqlen_o - m_block * kBlockM = %d, tOgO addr = %p\n", blockIdx.x, blockIdx.y, blockIdx.z, seqlen_o, m_block, seqlen_o - m_block * kBlockM, &tOgO(0));}
192
+
193
+ // Reshape from e.g. (8, kBlockM / 32, kHeadDim / 64) to (kBlockM / 32, (8, kHeadDim / 64))
194
+ Layout l = make_layout(get<1>(tOrO.layout()), make_layout(get<0>(tOrO.layout()), get<2>(tOrO.layout())));
195
+ Tensor tOrO_l = make_tensor(tOrO.data(), l);
196
+ Tensor o_fp32 = make_tensor_like<float>(tOrO_l);
197
+ flash::convert_type_out(tOrO_l, o_fp32);
198
+ Tensor tOrdO_l = make_tensor(tOrdO.data(), l);
199
+ Tensor do_fp32 = make_tensor_like<float>(tOrdO_l);
200
+ flash::convert_type_out(tOrdO_l, do_fp32);
201
+ // Sum across the last dimension
202
+ Tensor dP_sum = make_tensor<float>(make_shape(size<0>(o_fp32)));
203
+ #pragma unroll
204
+ for (int mi = 0; mi < size<0>(o_fp32); ++mi) {
205
+ float dP_sum_cur = do_fp32(mi, 0) * o_fp32(mi, 0);
206
+ #pragma unroll
207
+ for (int ni = 1; ni < size<1>(o_fp32); ni++) {
208
+ dP_sum_cur += do_fp32(mi, ni) * o_fp32(mi, ni);
209
+ }
210
+ flash::SumOp<float> sum_op;
211
+ dP_sum(mi) = flash::Allreduce<kGmemThreadsPerRow>::run(dP_sum_cur, sum_op);
212
+ }
213
+
214
+ Tensor mdPsum = make_tensor(make_gmem_ptr(params.ptr_dPsum), params.shape_dPsum, params.stride_dPsum)(_, bidh, !is_varlen ? bidb : 0);
215
+ Tensor gdPsum = local_tile(cute::domain_offset(make_coord(seqlen_info.offset_padded), mdPsum), Shape<Int<kBlockM>>{}, make_coord(m_block));
216
+ if (get<1>(tOcO(_0{}, _0{}, _0{})) == 0) {
217
+ #pragma unroll
218
+ for (int mi = 0; mi < size(dP_sum); ++mi) {
219
+ int const row = get<0>(tOcO(_0{}, mi, _0{}));
220
+ gdPsum(row) = row < seqlen_o - m_block * kBlockM ? dP_sum(mi) : 0;
221
+ }
222
+ }
223
+
224
+ int const seqlen_rounded = cute::round_up(seqlen_o, kBlockM);
225
+ Tensor mLSElog2 = make_tensor(make_gmem_ptr(params.ptr_LSE_log2), params.shape_dPsum, params.stride_LSE_log2)(_, bidh, !is_varlen ? bidb : 0);
226
+ Tensor gLSElog2 = local_tile(cute::domain_offset(make_coord(seqlen_info.offset_padded), mLSElog2), Shape<Int<kBlockM>>{}, make_coord(m_block));
227
+ if (thread_idx < seqlen_rounded - m_block * kBlockM && thread_idx < kBlockM) {
228
+ gLSElog2(thread_idx) = lse == -INFINITY ? 0.f : lse * float(M_LOG2E);
229
+ }
230
+
231
+ if constexpr (Clear_dQaccum) {
232
+ Tensor mdQaccum = make_tensor(make_gmem_ptr(params.ptr_dQaccum), params.shape_dQaccum, params.stride_dQaccum)(_, bidh, !is_varlen ? bidb : 0);
233
+ Tensor gdQaccum = local_tile(cute::domain_offset(make_coord(seqlen_info.offset_padded * kHeadDim), mdQaccum), Shape<Int<kBlockM * kHeadDim>>{}, make_coord(m_block));
234
+ GmemTiledCopyAccum gmem_tiled_copy_dQaccum;
235
+ auto gmem_thr_copy_dQaccum = gmem_tiled_copy_dQaccum.get_thread_slice(thread_idx);
236
+ Tensor tdQgdQaccum = gmem_thr_copy_dQaccum.partition_D(gdQaccum);
237
+ Tensor zero = make_fragment_like(tdQgdQaccum);
238
+ clear(zero);
239
+ cute::copy(Copy_Atom<AutoVectorizingCopyWithAssumedAlignment<128>, ElementAccum>{}, zero, tdQgdQaccum);
240
+ }
241
+
242
+ if (params.dq_semaphore != nullptr && thread_idx == 0) {
243
+ int const num_batch = params.num_batch;
244
+ int const num_head = get<2>(params.shape_O);
245
+ params.dq_semaphore[bidh + bidb * num_head + m_block * num_head * num_batch] = 0;
246
+ }
247
+
248
+ }
249
+
250
+ };
251
+
252
+ } // namespace flash
Code/Baselines/flash-attention/hopper/flash_prepare_scheduler.cu ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2024, Jay Shah, Ganesh Bikshandi, Ying Zhang, Vijay Thakkar, Pradeep Ramani, Tri Dao.
3
+ ******************************************************************************/
4
+
5
+ #include "cutlass/fast_math.h"
6
+ #include "cutlass/barrier.h"
7
+ #include "cutlass/arch/barrier.h"
8
+
9
+ #include "cutlass/arch/grid_dependency_control.h"
10
+
11
+ #include "flash.h"
12
+
13
+ namespace flash {
14
+
15
+ __global__ void prepare_varlen_num_blocks_kernel(
16
+ int seqlen_q_static, int seqlen_k_static, int seqlen_k_new_static,
17
+ int const* const cu_seqlens_q, int const* const cu_seqlens_k, int const* const cu_seqlens_k_new,
18
+ int const* const seqused_q, int const* const seqused_k, int const* const leftpad_k_ptr,
19
+ int num_batch, int num_head, int qhead_per_khead, int num_sm, int num_splits_static,
20
+ cutlass::FastDivmod blockm_divmod, cutlass::FastDivmod blockn_divmod,
21
+ int* const tile_count_semaphore,
22
+ // int* const num_m_blocks_ptr,
23
+ int* const num_splits_dynamic_ptr,
24
+ bool enable_pdl) {
25
+
26
+ static constexpr int kNumBatchPerWarp = cutlass::NumThreadsPerWarp - 1;
27
+ static constexpr int kSmemSize = 1;
28
+ // Assume that there's only one block in the grid
29
+ __shared__ int total_blocks_smem[kSmemSize];
30
+
31
+ // There's only 1 block in the grid, so might as well start launching the main attn kernel
32
+ if (enable_pdl) { cutlass::arch::launch_dependent_grids(); }
33
+
34
+ if (threadIdx.x < kSmemSize) { total_blocks_smem[threadIdx.x] = 0; }
35
+ __syncthreads();
36
+
37
+ if (threadIdx.x == 0 && tile_count_semaphore) { *tile_count_semaphore = 0; }
38
+
39
+ int lane = threadIdx.x % cutlass::NumThreadsPerWarp;
40
+
41
+ auto get_num_m_blocks = [&](int bidb_start) {
42
+ int batch_idx = lane + bidb_start;
43
+ int seqlen;
44
+ if (seqused_q) {
45
+ seqlen = batch_idx < num_batch ? seqused_q[batch_idx] : 0;
46
+ } else if (cu_seqlens_q) {
47
+ int cur_cu_seqlen = batch_idx <= num_batch ? cu_seqlens_q[batch_idx] : 0;
48
+ int next_cu_seqlen = __shfl_down_sync(0xffffffff, cur_cu_seqlen, 1);
49
+ seqlen = next_cu_seqlen - cur_cu_seqlen;
50
+ } else {
51
+ seqlen = seqlen_q_static;
52
+ }
53
+ seqlen *= qhead_per_khead;
54
+ return batch_idx < num_batch && lane < kNumBatchPerWarp
55
+ ? blockm_divmod.div(seqlen + blockm_divmod.divisor - 1) : 0;
56
+ };
57
+
58
+ auto get_num_n_blocks = [&](int bidb_start) {
59
+ int batch_idx = lane + bidb_start;
60
+ int leftpad_k = batch_idx < num_batch && leftpad_k_ptr != nullptr ? leftpad_k_ptr[batch_idx] : 0;
61
+ int seqlen;
62
+ if (seqused_k) {
63
+ seqlen = batch_idx < num_batch ? seqused_k[batch_idx] : 0;
64
+ } else if (cu_seqlens_k) {
65
+ int cur_cu_seqlen = batch_idx <= num_batch ? cu_seqlens_k[batch_idx] : 0;
66
+ int next_cu_seqlen = __shfl_down_sync(0xffffffff, cur_cu_seqlen, 1);
67
+ seqlen = next_cu_seqlen - cur_cu_seqlen;
68
+ } else {
69
+ seqlen = seqlen_k_static;
70
+ }
71
+ int seqlen_new;
72
+ if (cu_seqlens_k_new) {
73
+ int cur_cu_seqlen_new = batch_idx <= num_batch ? cu_seqlens_k_new[batch_idx] : 0;
74
+ int next_cu_seqlen_new = __shfl_down_sync(0xffffffff, cur_cu_seqlen_new, 1);
75
+ seqlen_new = next_cu_seqlen_new - cur_cu_seqlen_new;
76
+ } else {
77
+ seqlen_new = seqlen_k_new_static;
78
+ }
79
+ // if (threadIdx.x == 0) { printf("seqlen = %d, seqlen_new = %d, leftpad_k = %d\n", seqlen, seqlen_new, leftpad_k); }
80
+ seqlen = seqlen - leftpad_k + seqlen_new;
81
+ return batch_idx < num_batch && lane < kNumBatchPerWarp
82
+ ? blockn_divmod.div(seqlen + blockn_divmod.divisor - 1) : 0;
83
+ };
84
+
85
+ int warp_idx = threadIdx.x / cutlass::NumThreadsPerWarp;
86
+ int bidb_start = kNumBatchPerWarp * warp_idx;
87
+ int num_m_blocks = get_num_m_blocks(bidb_start);
88
+ int num_n_blocks = get_num_n_blocks(bidb_start);
89
+
90
+ int total_blocks = num_m_blocks * num_n_blocks;
91
+ // Warp sum
92
+ #pragma unroll
93
+ for (int i = cutlass::NumThreadsPerWarp / 2; i >= 1; i /= 2) {
94
+ total_blocks += __shfl_down_sync(0xffffffff, total_blocks, i);
95
+ }
96
+ if (lane == 0) { atomicAdd(total_blocks_smem, total_blocks); }
97
+ __syncthreads();
98
+ total_blocks = total_blocks_smem[0];
99
+ // 10% margin
100
+ int blocks_per_sm = static_cast<int>(ceilf(float(total_blocks) * 1.1f * float(num_head) / float(num_sm)));
101
+ // blocks_per_sm = std::max(1, blocks_per_sm); // 1 is the minimum number of blocks per SM
102
+ int num_splits_dynamic = std::max(std::min((num_n_blocks + blocks_per_sm - 1) / blocks_per_sm, num_splits_static), 1);
103
+ if (bidb_start + lane < num_batch && lane < kNumBatchPerWarp) {
104
+ num_splits_dynamic_ptr[bidb_start + lane] = num_splits_dynamic;
105
+ // printf("idx = %d, num_m_blocks = %d, num_n_blocks = %d, num_split_static = %d, num_splits_dynamic = %d\n", bidb_start + lane, num_m_blocks_ptr[bidb_start + lane], num_n_blocks, num_splits_static, num_splits_dynamic);
106
+ }
107
+ }
108
+
109
+ } // flash
110
+
111
+ void prepare_varlen_num_blocks(Flash_fwd_params &params, cudaStream_t stream, bool packgqa,
112
+ int blockM, int blockN, bool enable_pdl) {
113
+ // Only support batch <= 992 (32 warps, each with 31 batches)
114
+ int qhead_per_khead = !packgqa ? 1 : cutlass::ceil_div(params.h, params.h_k);
115
+ flash::prepare_varlen_num_blocks_kernel<<<1 /*grid*/, 1024 /*block*/, 0, stream>>>(
116
+ params.seqlen_q, params.seqlen_k, params.seqlen_knew,
117
+ params.cu_seqlens_q, params.cu_seqlens_k, params.cu_seqlens_knew,
118
+ params.seqused_q, params.seqused_k, params.leftpad_k,
119
+ params.b, !packgqa ? params.h : params.h_k, qhead_per_khead, params.num_sm, params.num_splits,
120
+ cutlass::FastDivmod(blockM), cutlass::FastDivmod(blockN),
121
+ params.tile_count_semaphore,
122
+ // params.num_m_blocks_ptr,
123
+ params.num_splits_dynamic_ptr, enable_pdl);
124
+ }
Code/Baselines/flash-attention/hopper/mainloop_fwd_sm90_tma_gmma_ws.hpp ADDED
The diff for this file is too large to render. See raw diff
 
Code/Baselines/flash-attention/hopper/named_barrier.hpp ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2024, Jay Shah, Ganesh Bikshandi, Ying Zhang, Vijay Thakkar, Pradeep Ramani, Tri Dao.
3
+ ******************************************************************************/
4
+
5
+ #pragma once
6
+
7
+ #include "cutlass/arch/barrier.h"
8
+
9
+ namespace flash {
10
+
11
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
12
+
13
+ // cutlass::arch::NamedBarrier::sync/arrive are only enabled Sm90 even though they work
14
+ // for Sm80 as well. We reimplement them here, enabled for both Sm90 and Sm80.
15
+
16
+ CUTLASS_DEVICE
17
+ static void named_barrier_sync(uint32_t num_threads, uint32_t barrier_id_) {
18
+ static constexpr uint32_t ReservedNamedBarrierCount = static_cast<uint32_t>(cutlass::arch::ReservedNamedBarriers::FirstUserBarrier);
19
+ uint32_t barrier_id = barrier_id_ + ReservedNamedBarrierCount;
20
+ asm volatile("bar.sync %0, %1;" : : "r"(barrier_id), "r"(num_threads));
21
+ cutlass::arch::synclog_emit_named_barrier_arrive_and_wait(__LINE__, num_threads, barrier_id);
22
+ }
23
+
24
+ CUTLASS_DEVICE
25
+ static void named_barrier_sync(uint32_t num_threads, cutlass::arch::ReservedNamedBarriers reserved_named_barriers) {
26
+ uint32_t barrier_id = static_cast<uint32_t>(reserved_named_barriers);
27
+ asm volatile("bar.sync %0, %1;" : : "r"(barrier_id), "r"(num_threads));
28
+ cutlass::arch::synclog_emit_named_barrier_arrive_and_wait(__LINE__, num_threads, barrier_id);
29
+ }
30
+
31
+ CUTLASS_DEVICE
32
+ static void named_barrier_arrive(uint32_t num_threads, uint32_t barrier_id_) {
33
+ static constexpr uint32_t ReservedNamedBarrierCount = static_cast<uint32_t>(cutlass::arch::ReservedNamedBarriers::FirstUserBarrier);
34
+ uint32_t barrier_id = barrier_id_ + ReservedNamedBarrierCount;
35
+ cutlass::arch::synclog_emit_named_barrier_arrive(__LINE__, num_threads, barrier_id);
36
+ asm volatile("bar.arrive %0, %1;" : : "r"(barrier_id), "r"(num_threads));
37
+ }
38
+
39
+ CUTLASS_DEVICE
40
+ static void named_barrier_arrive(uint32_t num_threads, cutlass::arch::ReservedNamedBarriers reserved_named_barriers) {
41
+ uint32_t barrier_id = static_cast<uint32_t>(reserved_named_barriers);
42
+ cutlass::arch::synclog_emit_named_barrier_arrive(__LINE__, num_threads, barrier_id);
43
+ asm volatile("bar.arrive %0, %1;" : : "r"(barrier_id), "r"(num_threads));
44
+ }
45
+
46
+
47
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
48
+ // Enumerates the reserved named barriers to avoid potential conflicts
49
+
50
+ enum class FwdNamedBarriers {
51
+ QueryEmpty = 0,
52
+ WarpSchedulerWG1 = 1,
53
+ WarpSchedulerWG2 = 2,
54
+ WarpSchedulerWG3 = 3,
55
+ AppendKV = 4,
56
+ QueryRotated = 5,
57
+ PFull = 6,
58
+ PEmpty = 7,
59
+ };
60
+
61
+ enum class BwdNamedBarriers {
62
+ KVEmpty = 0,
63
+ PdS = 1,
64
+ dQEmptyWG1 = 2,
65
+ dQEmptyWG2 = 3,
66
+ dQEmptyWG3 = 4,
67
+ dQFullWG1 = 5,
68
+ dQFullWG2 = 6,
69
+ dQFullWG3 = 7,
70
+ };
71
+
72
+ } // flash
Code/Baselines/flash-attention/hopper/softmax.h ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2024, Jay Shah, Ganesh Bikshandi, Ying Zhang, Vijay Thakkar, Pradeep Ramani, Tri Dao.
3
+ ******************************************************************************/
4
+
5
+ #pragma once
6
+
7
+ #include <cmath>
8
+
9
+ #include <cute/tensor.hpp>
10
+
11
+ #include <cutlass/numeric_types.h>
12
+
13
+ #include "utils.h"
14
+
15
+ namespace flash {
16
+
17
+ using namespace cute;
18
+
19
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
20
+
21
+ template<bool zero_init=true, typename Engine0, typename Layout0, typename Engine1, typename Layout1, typename Operator>
22
+ __device__ __forceinline__ void thread_reduce_(Tensor<Engine0, Layout0> const &tensor, Tensor<Engine1, Layout1> &summary, Operator &op) {
23
+ static_assert(Layout0::rank == 2, "Only support 2D Tensor");
24
+ static_assert(Layout1::rank == 1, "Only support 1D Tensor");
25
+ CUTE_STATIC_ASSERT_V(size<0>(summary) == size<0>(tensor));
26
+ #pragma unroll
27
+ for (int ni = 0; ni < size<1>(tensor); ni++) {
28
+ #pragma unroll
29
+ for (int mi = 0; mi < size<0>(tensor); mi++) {
30
+ summary(mi) = zero_init && ni == 0 ? tensor(mi, ni) : op(summary(mi), tensor(mi, ni));
31
+ }
32
+ }
33
+ }
34
+
35
+ template<typename Engine0, typename Layout0, typename Engine1, typename Layout1, typename Operator>
36
+ __device__ __forceinline__ void quad_allreduce_(Tensor<Engine0, Layout0> &dst, Tensor<Engine1, Layout1> &src, Operator &op) {
37
+ CUTE_STATIC_ASSERT_V(size(dst) == size(src));
38
+ #pragma unroll
39
+ for (int i = 0; i < size(dst); i++) {
40
+ dst(i) = Allreduce<4>::run(src(i), op);
41
+ }
42
+ }
43
+
44
+ template<bool zero_init=true, typename Engine0, typename Layout0, typename Engine1, typename Layout1, typename Operator>
45
+ __device__ __forceinline__ void reduce_(Tensor<Engine0, Layout0> const& tensor, Tensor<Engine1, Layout1> &summary, Operator &op) {
46
+ thread_reduce_<zero_init>(tensor, summary, op);
47
+ quad_allreduce_(summary, summary, op);
48
+ }
49
+
50
+ template<bool zero_init=true, typename Engine0, typename Layout0, typename Engine1, typename Layout1>
51
+ __device__ __forceinline__ void reduce_max(Tensor<Engine0, Layout0> const& tensor, Tensor<Engine1, Layout1> &max){
52
+ MaxOp<float> max_op;
53
+ reduce_<zero_init>(tensor, max, max_op);
54
+ }
55
+
56
+ template<bool zero_init=true, bool warp_reduce=true, typename Engine0, typename Layout0, typename Engine1, typename Layout1>
57
+ __device__ __forceinline__ void reduce_sum(Tensor<Engine0, Layout0> const& tensor, Tensor<Engine1, Layout1> &sum){
58
+ SumOp<float> sum_op;
59
+ thread_reduce_<zero_init>(tensor, sum, sum_op);
60
+ if constexpr (warp_reduce) { quad_allreduce_(sum, sum, sum_op); }
61
+ }
62
+
63
+ // Apply the exp to all the elements.
64
+ template <bool Scale_max=true, bool Check_inf=true, int Max_offset=0,
65
+ typename Engine0, typename Layout0, typename Engine1, typename Layout1>
66
+ __forceinline__ __device__ void scale_apply_exp2(Tensor<Engine0, Layout0> &tensor, Tensor<Engine1, Layout1> const &max, const float scale) {
67
+ // For FP8, we can subtract max by 8.0 so that the value after exp2 is in the range of [0, 256].
68
+ // This lets us use more of the FP8 range (instead of just [0, 1]) to reduce underflow.
69
+ static constexpr float max_offset = float(Max_offset); // We can only template on int, not float
70
+ static_assert(Layout0::rank == 2, "Only support 2D Tensor");
71
+ static_assert(Layout1::rank == 1, "Only support 1D Tensor");
72
+ CUTE_STATIC_ASSERT_V(size<0>(max) == size<0>(tensor));
73
+ #pragma unroll
74
+ for (int mi = 0; mi < size<0>(tensor); ++mi) {
75
+ // If max is -inf, then all elements must have been -inf (possibly due to masking).
76
+ // We don't want (-inf - (-inf)) since that would give NaN.
77
+ const float max_scaled = Check_inf
78
+ ? (max(mi) == -INFINITY ? 0.f : (!Scale_max ? max(mi) : max(mi) * scale) - max_offset)
79
+ : (!Scale_max ? max(mi) : max(mi) * scale) - max_offset;
80
+ #pragma unroll
81
+ for (int ni = 0; ni < size<1>(tensor); ++ni) {
82
+ // Instead of computing exp(x - max), we compute exp2(x * log_2(e) -
83
+ // max * log_2(e)). This allows the compiler to use the ffma
84
+ // instruction instead of fadd and fmul separately.
85
+ tensor(mi, ni) = exp2f(tensor(mi, ni) * scale - max_scaled);
86
+ }
87
+ }
88
+ }
89
+
90
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
91
+
92
+ template <int kNRows, int Max_offset=0>
93
+ struct Softmax {
94
+
95
+ using TensorT = decltype(make_tensor<float>(Shape<Int<kNRows>>{}));
96
+ TensorT row_max, row_sum;
97
+ float const softmax_scale_log2;
98
+
99
+ CUTLASS_DEVICE Softmax(float const softmax_scale_log2_) : softmax_scale_log2(softmax_scale_log2_) {};
100
+
101
+ template<bool Is_first, bool Check_inf=false, typename Tensor0>
102
+ __forceinline__ __device__ TensorT max_get_scale(Tensor0 &acc_s) {
103
+ // Reshape acc_s from ((2, 2, V), MMA_M, MMA_N) to (nrow=(2, MMA_M), ncol=(2, V, MMA_N))
104
+ Tensor scores = make_tensor(acc_s.data(), flash::convert_layout_acc_rowcol(acc_s.layout()));
105
+ static_assert(CUTE_STATIC_V(size<0>(scores)) == kNRows);
106
+ TensorT scores_scale;
107
+ if constexpr (Is_first) {
108
+ flash::template reduce_max</*zero_init=*/true>(scores, row_max);
109
+ cute::fill(scores_scale, 1.f);
110
+ } else {
111
+ Tensor scores_max_prev = make_fragment_like(row_max);
112
+ cute::copy(row_max, scores_max_prev);
113
+ flash::template reduce_max</*zero_init=*/false>(scores, row_max);
114
+ #pragma unroll
115
+ for (int mi = 0; mi < size(row_max); ++mi) {
116
+ float scores_max_cur = !Check_inf
117
+ ? row_max(mi)
118
+ : (row_max(mi) == -INFINITY ? 0.0f : row_max(mi));
119
+ scores_scale(mi) = exp2f((scores_max_prev(mi) - scores_max_cur) * softmax_scale_log2);
120
+ row_sum(mi) *= scores_scale(mi);
121
+ }
122
+ }
123
+ return scores_scale;
124
+ };
125
+
126
+ template<bool Is_first, bool Check_inf=false, typename Tensor0>
127
+ __forceinline__ __device__ void online_softmax(Tensor0 &acc_s) {
128
+ // Reshape acc_s from ((2, 2, V), MMA_M, MMA_N) to (nrow=(2, MMA_M), ncol=(2, V, MMA_N))
129
+ Tensor scores = make_tensor(acc_s.data(), flash::convert_layout_acc_rowcol(acc_s.layout()));
130
+ static_assert(CUTE_STATIC_V(size<0>(scores)) == kNRows);
131
+ flash::template scale_apply_exp2</*Scale_max=*/true, Check_inf, Max_offset>(scores, row_max, softmax_scale_log2);
132
+ // We don't do the reduce across threads here since we don't need to use the row_sum.
133
+ // We do that reduce at the end when we need to normalize the softmax.
134
+ flash::reduce_sum</*zero_init=*/Is_first, /*warp_reduce=*/false>(scores, row_sum);
135
+ };
136
+
137
+ __forceinline__ __device__ TensorT finalize(float const final_scale=1.f) {
138
+ SumOp<float> sum_op;
139
+ quad_allreduce_(row_sum, row_sum, sum_op);
140
+ TensorT scores_scale;
141
+ #pragma unroll
142
+ for (int mi = 0; mi < size(row_sum); ++mi) {
143
+ float sum = row_sum(mi);
144
+ float inv_sum = (sum == 0.f || sum != sum) ? 0.f : 1.f / sum;
145
+ scores_scale(mi) = inv_sum * final_scale;
146
+ // For FP8, we might have scaled the output of exp by 2**8 so we need to divide sum by that amount.
147
+ if constexpr (Max_offset != 0) {
148
+ static constexpr float sum_scale = 1.f / float(1 << Max_offset);
149
+ sum *= sum_scale;
150
+ }
151
+ row_sum(mi) = (sum == 0.f || sum != sum) ? -INFINITY : row_max(mi) * (softmax_scale_log2 * float(M_LN2)) + __logf(sum);
152
+ }
153
+ return scores_scale;
154
+ };
155
+
156
+ template<typename Tensor1>
157
+ __forceinline__ __device__ void rescale_o(Tensor1 &acc_o, TensorT const &scores_scale) {
158
+ // Reshape acc_o from (MMA=4, MMA_M, MMA_K) to (nrow=(2, MMA_M), ncol=(2, MMA_K))
159
+ Tensor acc_o_rowcol = make_tensor(acc_o.data(), flash::convert_layout_acc_rowcol(acc_o.layout()));
160
+ static_assert(CUTE_STATIC_V(size<0>(acc_o_rowcol)) == kNRows);
161
+ #pragma unroll
162
+ for (int mi = 0; mi < size<0>(acc_o_rowcol); ++mi) {
163
+ #pragma unroll
164
+ for (int ni = 0; ni < size<1>(acc_o_rowcol); ++ni) { acc_o_rowcol(mi, ni) *= scores_scale(mi); }
165
+ }
166
+ };
167
+
168
+ };
169
+
170
+ } // namespace flash
Code/Baselines/flash-attention/hopper/test_kvcache.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ #from flash_attn_interface import flash_attn_func, flash_attn_varlen_func, flash_attn_with_kvcache
3
+ import flash_attn_interface as fa3
4
+ import flash_attn as fa2
5
+ import torch.utils.benchmark as benchmark
6
+ import time
7
+
8
+ import argparse
9
+ import math
10
+
11
+ parser = argparse.ArgumentParser(description='Process some integers.')
12
+ parser.add_argument('--causal', action='store_true')
13
+ parser.add_argument('--splits', type=int, default=1)
14
+ parser.add_argument('--repeats', type=int, default=10)
15
+ parser.add_argument('--validate', action='store_true')
16
+ parser.add_argument('--gqa', action='store_true')
17
+
18
+ args = parser.parse_args()
19
+
20
+ def benchmark_fa_kv_old(fn, repeats=10, desc='', verbose=True, **kwinputs):
21
+ """Use Pytorch Benchmark on the forward pass of an arbitrary function."""
22
+ if verbose:
23
+ print(desc, '- Forward pass')
24
+ t = benchmark.Timer(
25
+ stmt='fn(**kwinputs)',
26
+ globals={'fn': fn, 'kwinputs': kwinputs},
27
+ num_threads=torch.get_num_threads(),
28
+ )
29
+ m = t.timeit(repeats)
30
+ if verbose:
31
+ print(desc, m)
32
+ return t, m
33
+
34
+ def benchmark_fa_kv(fn, repeats=10, *args, **kwargs):
35
+ # warmup
36
+ for _ in range(5):
37
+ fn(*args, **kwargs)
38
+ niters = repeats
39
+ torch.cuda.synchronize()
40
+ start = time.time()
41
+ for _ in range(niters):
42
+ fn(*args, **kwargs)
43
+ torch.cuda.synchronize()
44
+ end = time.time()
45
+ return (end - start) / niters
46
+
47
+ def main():
48
+ # *SAMPLE CONFIG*
49
+ # Model arch params:
50
+ nheads_q = 64
51
+ nheads_kv = 8
52
+ headdim = 128
53
+ #dtype = torch.bfloat16
54
+ dtype = torch.float16
55
+
56
+ # Cache settings:
57
+ num_caches = 8
58
+ cache_seqlen = 1024 * 16
59
+
60
+ # Batching settings
61
+ ntokens = 1024
62
+ max_queries_per_batch = 4
63
+ small_request_ntokens = 16
64
+
65
+ # Input settings
66
+ query_seqlens = [900, 12, 1]
67
+ num_queries = len(query_seqlens)
68
+ # Need to add empty queries to fill out `max_queries_per_batch`
69
+ num_padding_queries = max_queries_per_batch - num_queries
70
+ context_seqlens = [4096, 5120*2, 6145*2]
71
+ #context_seqlens = [4096, 5120*2, 6152*2]
72
+
73
+ # Validation
74
+ assert sum(query_seqlens) <= ntokens
75
+ assert all(s < small_request_ntokens for s in query_seqlens[1:])
76
+ assert num_queries <= max_queries_per_batch
77
+ assert all(s < cache_seqlen for s in context_seqlens)
78
+
79
+ torch.manual_seed(5434)
80
+
81
+ # Allocate some tensors
82
+ k_cache = torch.randn(
83
+ (num_caches, cache_seqlen, nheads_kv, headdim), device="cuda", dtype=dtype
84
+ )
85
+ v_cache = torch.randn(
86
+ (num_caches, cache_seqlen, nheads_kv, headdim), device="cuda", dtype=dtype
87
+ )
88
+
89
+ q_buf_large = torch.randn(
90
+ (1, ntokens, nheads_q, headdim), device="cuda", dtype=dtype
91
+ )
92
+ cache_seqlen_large = torch.tensor(
93
+ [context_seqlens[0]], dtype=torch.int32, device="cuda"
94
+ )
95
+ cache_idx_large = torch.tensor([1], dtype=torch.int32, device="cuda")
96
+
97
+ q_buf_small = torch.randn(
98
+ (max_queries_per_batch - 1, small_request_ntokens, nheads_q, headdim),
99
+ device="cuda",
100
+ dtype=dtype,
101
+ )
102
+ cache_seqlens_small = torch.tensor(
103
+ context_seqlens[1:] + [0] * num_padding_queries, dtype=torch.int32, device="cuda"
104
+ )
105
+ cache_idxs_small = torch.randperm(num_caches, dtype=torch.int32, device="cuda")[
106
+ : max_queries_per_batch - 1
107
+ ]
108
+
109
+ if args.validate:
110
+ # Call flash attn
111
+ # First for the single full-sized query
112
+ out0, lse0 = fa3.flash_attn_with_kvcache(
113
+ q=q_buf_large,
114
+ k_cache=k_cache,
115
+ v_cache=v_cache,
116
+ cache_seqlens=cache_seqlen_large,
117
+ cache_batch_idx=cache_idx_large,
118
+ causal=bool(args.causal),
119
+ num_splits=args.splits,
120
+ return_softmax_lse=True,
121
+ #num_splits=1
122
+ )
123
+
124
+ # Second for n-1 small queries
125
+ out1_split1, lse1_split1 = fa3.flash_attn_with_kvcache(
126
+ q=q_buf_small,
127
+ k_cache=k_cache,
128
+ v_cache=v_cache,
129
+ cache_seqlens=cache_seqlens_small,
130
+ cache_batch_idx=cache_idxs_small,
131
+ causal=bool(args.causal),
132
+ num_splits=1,
133
+ gqa_decoding=bool(args.gqa),
134
+ return_softmax_lse=True,
135
+ )
136
+
137
+ # Second for n-1 small queries
138
+ out1, lse1 = fa3.flash_attn_with_kvcache(
139
+ q=q_buf_small,
140
+ k_cache=k_cache,
141
+ v_cache=v_cache,
142
+ cache_seqlens=cache_seqlens_small,
143
+ cache_batch_idx=cache_idxs_small,
144
+ causal=bool(args.causal),
145
+ num_splits=args.splits,
146
+ gqa_decoding=bool(args.gqa),
147
+ return_softmax_lse=True,
148
+ )
149
+
150
+ # Call flash attn
151
+ # First for the single full-sized query
152
+ out2 = fa2.flash_attn_with_kvcache(
153
+ q=q_buf_large,
154
+ k_cache=k_cache,
155
+ v_cache=v_cache,
156
+ cache_seqlens=cache_seqlen_large,
157
+ cache_batch_idx=cache_idx_large,
158
+ causal=bool(args.causal),
159
+ num_splits=args.splits,
160
+ )
161
+
162
+ print ('big')
163
+ print ('diff-max', (out0 - out2).abs().max().item(), cache_seqlens_small)
164
+ print ('diff-mean', (out0 - out2).abs().mean().item())
165
+
166
+
167
+ # Second for n-1 small queries
168
+ out3, lse_fa2 = fa2.flash_attn_with_kvcache(
169
+ q=q_buf_small,
170
+ k_cache=k_cache,
171
+ v_cache=v_cache,
172
+ cache_seqlens=cache_seqlens_small,
173
+ cache_batch_idx=cache_idxs_small,
174
+ causal=bool(args.causal),
175
+ num_splits=args.splits,
176
+ return_softmax_lse=True,
177
+ #num_splits=1
178
+ )
179
+
180
+ print ('small') #, out1)
181
+ print ('lse', lse1, lse_fa2, (lse1 - lse_fa2).abs(), out1.shape)
182
+ print ('lse-dif-max', (lse1 - lse_fa2).abs().max().item())
183
+ print ('diff-max', (out1 - out3).abs().max().item())
184
+ print ('diff-mean', (out1 - out3).abs().mean().item())
185
+
186
+
187
+ print ('fa3', args.repeats)
188
+ time_fa3_big = benchmark_fa_kv(fa3.flash_attn_with_kvcache, repeats=args.repeats,
189
+ q=q_buf_large,
190
+ k_cache=k_cache,
191
+ v_cache=v_cache,
192
+ cache_seqlens=cache_seqlen_large,
193
+ cache_batch_idx=cache_idx_large,
194
+ causal=bool(args.causal),
195
+ num_splits=args.splits,
196
+ )
197
+
198
+ time_fa3_small = benchmark_fa_kv(fa3.flash_attn_with_kvcache, repeats=args.repeats,
199
+ q=q_buf_small,
200
+ k_cache=k_cache,
201
+ v_cache=v_cache,
202
+ cache_seqlens=cache_seqlens_small,
203
+ cache_batch_idx=cache_idxs_small,
204
+ causal=bool(args.causal),
205
+ num_splits=args.splits,
206
+ )
207
+
208
+ print ('fa2 ')
209
+
210
+ time_fa2_big = benchmark_fa_kv(fa2.flash_attn_with_kvcache, repeats=args.repeats,
211
+ q=q_buf_large,
212
+ k_cache=k_cache,
213
+ v_cache=v_cache,
214
+ cache_seqlens=cache_seqlen_large,
215
+ cache_batch_idx=cache_idx_large,
216
+ causal=bool(args.causal),
217
+ num_splits=args.splits
218
+ )
219
+
220
+ time_fa2_small = benchmark_fa_kv(fa2.flash_attn_with_kvcache, repeats=args.repeats,
221
+ q=q_buf_small,
222
+ k_cache=k_cache,
223
+ v_cache=v_cache,
224
+ cache_seqlens=cache_seqlens_small,
225
+ cache_batch_idx=cache_idxs_small,
226
+ causal=bool(args.causal),
227
+ num_splits=args.splits
228
+ )
229
+
230
+ print ('big (split, fa3, fa2, ratio):', args.splits, time_fa3_big * 1000000, time_fa2_big * 1000000, time_fa3_big / time_fa2_big)
231
+ print ('small (split, fa3, fa2, ratio):', args.splits, time_fa3_small * 1000000, time_fa2_small * 1000000, time_fa3_small / time_fa2_small)
232
+
233
+ if __name__ == "__main__":
234
+ main()
Code/Baselines/flash-attention/hopper/tile_scheduler.hpp ADDED
@@ -0,0 +1,709 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /******************************************************************************
2
+ * Copyright (c) 2024, Jay Shah, Ganesh Bikshandi, Ying Zhang, Vijay Thakkar, Pradeep Ramani, Tri Dao.
3
+ ******************************************************************************/
4
+
5
+ #pragma once
6
+
7
+ #include "cutlass/fast_math.h"
8
+ #include "cutlass/arch/barrier.h"
9
+
10
+ #include "named_barrier.hpp"
11
+ #include "utils.h"
12
+
13
+ namespace flash {
14
+
15
+ ///////////////////////////////////////////////////////////////////////////////
16
+
17
+ // Host side kernel arguments
18
+ struct TileSchedulerArguments {
19
+ // num_head is num_head_q if not PackGQA, else num_head_k
20
+ int const num_blocks, num_head, num_batch, num_splits;
21
+ int const qhead_per_khead;
22
+ int const seqlen; // Only used if Varlen and cu_seqlens == nullptr and seqused == nullptr
23
+ int const seqlen_k, headdim, headdim_v, element_size; // Used to calculate L2 swizzling
24
+ int* const tile_count_semaphore = nullptr;
25
+ int const* const cu_seqlens = nullptr;
26
+ int const* const seqused = nullptr;
27
+ // int const* const num_m_blocks_ptr = nullptr;
28
+ int const* const num_splits_dynamic_ptr = nullptr;
29
+ };
30
+
31
+ ///////////////////////////////////////////////////////////////////////////////
32
+
33
+ template<bool Varlen=false, bool Split=false, bool PackGQA=false, int kBlock=128>
34
+ class SingleTileScheduler {
35
+
36
+ public:
37
+
38
+ using SharedStorage = int;
39
+
40
+ // Device side kernel params
41
+ struct Params {
42
+ int const num_blocks, num_head, num_batch, num_splits;
43
+ int const qhead_per_khead;
44
+ int const seqlen;
45
+ cutlass::FastDivmod nsplits_divmod;
46
+ int const* const cu_seqlens;
47
+ int const* const seqused;
48
+ int const* const num_splits_dynamic_ptr = nullptr;
49
+ };
50
+
51
+ static Params
52
+ to_underlying_arguments(TileSchedulerArguments const& args) {
53
+ assert(!Split || !Varlen || args.num_splits_dynamic_ptr != nullptr);
54
+ assert(!Split || !Varlen || args.num_splits < (1 << 16)); // We use the top 16 bits to store num_splits
55
+ return {args.num_blocks, args.num_head, args.num_batch, !Split ? 1 : args.num_splits,
56
+ args.qhead_per_khead, args.seqlen,
57
+ cutlass::FastDivmod(!Split ? 1 : args.num_splits),
58
+ !Varlen ? nullptr : args.cu_seqlens, !Varlen ? nullptr : args.seqused,
59
+ args.num_splits_dynamic_ptr};
60
+ }
61
+
62
+ static dim3
63
+ get_grid_shape(Params const& params, int num_sm) {
64
+ return {uint32_t(params.num_blocks), uint32_t((!Split ? 1 : params.num_splits) * params.num_head), uint32_t(params.num_batch)};
65
+ }
66
+
67
+ struct WorkTileInfo {
68
+ int block_idx = 0;
69
+ int bidh = 0;
70
+ int bidb = 0;
71
+ int split_idx = 0;
72
+
73
+ CUTLASS_DEVICE
74
+ bool
75
+ is_valid(Params const& params) const {
76
+ return bidb >= 0;
77
+ }
78
+
79
+ CUTLASS_DEVICE
80
+ cute::tuple<int32_t, int32_t, int32_t, int32_t>
81
+ get_block_coord(Params const& params) const {
82
+ return {block_idx, bidh, bidb, !Split ? 0 : split_idx};
83
+ }
84
+
85
+ };
86
+
87
+ CUTLASS_DEVICE
88
+ SingleTileScheduler(SharedStorage* const smem_scheduler) { }
89
+
90
+ template<bool IsProducerWarp=false>
91
+ CUTLASS_DEVICE
92
+ WorkTileInfo
93
+ get_initial_work(Params const& params) const {
94
+ WorkTileInfo work_info {int(blockIdx.x), int(blockIdx.y), int(blockIdx.z), 0};
95
+ if constexpr (Split) {
96
+ int split_idx;
97
+ work_info.bidh = params.nsplits_divmod.divmod(split_idx, work_info.bidh);
98
+ work_info.split_idx = split_idx;
99
+ }
100
+ bool is_valid_tile = true;
101
+ if constexpr (Varlen) {
102
+ int seqlen = params.seqused
103
+ ? params.seqused[work_info.bidb]
104
+ : (params.cu_seqlens ? params.cu_seqlens[work_info.bidb + 1] - params.cu_seqlens[work_info.bidb] : params.seqlen);
105
+ if constexpr (PackGQA) { seqlen *= params.qhead_per_khead; }
106
+ is_valid_tile = work_info.block_idx * kBlock < seqlen;
107
+ }
108
+ if constexpr (Varlen && Split) {
109
+ int num_splits_dynamic = params.num_splits_dynamic_ptr ? params.num_splits_dynamic_ptr[work_info.bidb] : params.num_splits;
110
+ is_valid_tile &= work_info.split_idx < num_splits_dynamic;
111
+ // Use the top 16 bits to store num_splits
112
+ work_info.split_idx |= (num_splits_dynamic << 16);
113
+ }
114
+ work_info.bidb = is_valid_tile ? work_info.bidb : -1;
115
+ return work_info;
116
+ }
117
+
118
+ CUTLASS_DEVICE
119
+ void
120
+ init_consumer() const {}
121
+
122
+ CUTLASS_DEVICE
123
+ void
124
+ prefetch_next_work(Params const& params, WorkTileInfo& current_work) const {}
125
+
126
+ template<bool IsProducerWarp=false>
127
+ CUTLASS_DEVICE
128
+ WorkTileInfo
129
+ get_next_work(Params const& params, WorkTileInfo const& current_work) const {
130
+ return {0, 0, -1, 0};
131
+ }
132
+
133
+ };
134
+
135
+ ///////////////////////////////////////////////////////////////////////////////
136
+
137
+ template<bool Split=false>
138
+ class StaticPersistentTileScheduler {
139
+
140
+ public:
141
+
142
+ using SharedStorage = int;
143
+
144
+ // Device side kernel params
145
+ struct Params {
146
+ int total_blocks;
147
+ cutlass::FastDivmod m_block_divmod, head_divmod;
148
+ cutlass::FastDivmod nsplits_divmod;
149
+ };
150
+
151
+ static Params
152
+ to_underlying_arguments(TileSchedulerArguments const& args) {
153
+ return {args.num_blocks * args.num_head * args.num_batch * (!Split ? 1 : args.num_splits),
154
+ cutlass::FastDivmod(args.num_blocks), cutlass::FastDivmod(args.num_head * (!Split ? 1 : args.num_splits)),
155
+ cutlass::FastDivmod(!Split ? 1 : args.num_splits)};
156
+ }
157
+
158
+ static dim3
159
+ get_grid_shape(Params const& params, int num_sm) {
160
+ return {uint32_t(num_sm)};
161
+ }
162
+
163
+ struct WorkTileInfo {
164
+ int tile_idx;
165
+
166
+ CUTLASS_DEVICE
167
+ bool
168
+ is_valid(Params const& params) const {
169
+ return tile_idx < params.total_blocks;
170
+ }
171
+
172
+ CUTLASS_DEVICE
173
+ cute::tuple<int32_t, int32_t, int32_t, int32_t>
174
+ get_block_coord(Params const& params) const {
175
+ int block, bidh, bidb;
176
+ bidb = params.head_divmod.divmod(bidh, params.m_block_divmod.divmod(block, tile_idx));
177
+ int split_idx = 0;
178
+ if constexpr (Split) {
179
+ bidh = params.nsplits_divmod.divmod(split_idx, bidh);
180
+ }
181
+ return {block, bidh, bidb, split_idx};
182
+ }
183
+
184
+ };
185
+
186
+ CUTLASS_DEVICE
187
+ StaticPersistentTileScheduler(SharedStorage* const smem_scheduler) {};
188
+
189
+ template<bool IsProducerWarp=false>
190
+ CUTLASS_DEVICE
191
+ WorkTileInfo
192
+ get_initial_work(Params const& params) const {
193
+ return {int(blockIdx.x)};
194
+ }
195
+
196
+ CUTLASS_DEVICE
197
+ void
198
+ init_consumer() const {}
199
+
200
+ CUTLASS_DEVICE
201
+ void
202
+ prefetch_next_work(Params const& params, WorkTileInfo& current_work) const {}
203
+
204
+ template<bool IsProducerWarp=false>
205
+ CUTLASS_DEVICE
206
+ WorkTileInfo
207
+ get_next_work(Params const& params, WorkTileInfo const& current_work) const {
208
+ return {current_work.tile_idx + int(gridDim.x)};
209
+ }
210
+
211
+ };
212
+
213
+ ///////////////////////////////////////////////////////////////////////////////
214
+
215
+ template<int NumMmaThreads=2 * cutlass::NumThreadsPerWarpGroup, int NumProducerThreads=cutlass::NumThreadsPerWarp,
216
+ bool Split=false, bool PackGQA=false, bool WarpSpecialized=true>
217
+ class DynamicPersistentTileScheduler {
218
+
219
+ // This scheduler targets the causal (or local) case where each tile takes different
220
+ // amount of time. We use longest-processing-time-first scheduling:
221
+ // the longest remaining tile is assigned to the first SM that's free.
222
+ // SM indicates they are free by incrementing a semaphore.
223
+ // However, we have to make sure K & V still fit into L2 cache, so we perform scheduling
224
+ // on "sections" of the head & batch dimension, each section consisting of e.g. 8 heads.
225
+ // This is the L2 swizzling part. The size of each section is precomputed based on the
226
+ // size of K & V and the L2 cache size.
227
+
228
+ static_assert(WarpSpecialized || NumProducerThreads == NumMmaThreads);
229
+ static constexpr int NumThreads = WarpSpecialized ? NumMmaThreads + NumProducerThreads : NumMmaThreads;
230
+
231
+ public:
232
+ using SharedStorage = int;
233
+
234
+ protected:
235
+ SharedStorage* const tile_count_smem;
236
+
237
+ public:
238
+
239
+ // Device side kernel params
240
+ struct Params {
241
+ int const total_blocks;
242
+ cutlass::FastDivmod const m_block_divmod, head_divmod;
243
+ cutlass::FastDivmod const l2_minor_divmod, l2_major_divmod;
244
+ cutlass::FastDivmod const l2_minor_residual_divmod;
245
+ int const num_hb_quotient;
246
+ int* const tile_count_semaphore;
247
+ };
248
+
249
+ static Params
250
+ to_underlying_arguments(TileSchedulerArguments const& args) {
251
+ int const size_one_kv_head = args.seqlen_k * (args.headdim + args.headdim_v) * args.element_size;
252
+ int const size_l2 = 32 * 1024 * 1024; // 32 MB for K & V
253
+ // Swizzle is the size of each "section". Round swizzle to a power of 2
254
+ // If not PackGQA already, the size of each section can increase by qhead_per_khead
255
+ // Need to be careful about the case where only one head will fit
256
+ auto find_log2_floor = [&](int n) { return 31 - cutlass::clz(n); };
257
+ // Seems faster if swizzle if a power of 2
258
+ int const swizzle = (size_l2 < size_one_kv_head ? 1 : (1 << find_log2_floor(size_l2 / size_one_kv_head))) * (PackGQA ? 1 : args.qhead_per_khead);
259
+ // If we're in the last section (called residual), we don't want to divide by
260
+ // swizzle. Instead we want to divide by the remainder.
261
+ int const num_hb_remainder = (args.num_head * args.num_batch) % swizzle;
262
+ int const num_split_blocks = args.num_blocks * (!Split ? 1 : args.num_splits);
263
+ // printf("num_split_blocks = %d, num_head = %d, num_batch = %d, swizzle = %d, PackGQA = %d, qhead_per_khead = %d, num_hb_remainder = %d\n", num_split_blocks, args.num_head, args.num_batch, swizzle, int(PackGQA), args.qhead_per_khead, num_hb_remainder);
264
+ assert(args.tile_count_semaphore != nullptr);
265
+ return {num_split_blocks * args.num_head * args.num_batch,
266
+ cutlass::FastDivmod(args.num_blocks), cutlass::FastDivmod(args.num_head),
267
+ cutlass::FastDivmod(swizzle), cutlass::FastDivmod(swizzle * num_split_blocks),
268
+ // don't divide by 0
269
+ cutlass::FastDivmod(num_hb_remainder > 0 ? num_hb_remainder : 1),
270
+ (args.num_head * args.num_batch) / swizzle,
271
+ args.tile_count_semaphore};
272
+ }
273
+
274
+ static dim3
275
+ get_grid_shape(Params const& params, int num_sm) {
276
+ return {uint32_t(num_sm)};
277
+ }
278
+
279
+ struct WorkTileInfo {
280
+ int tile_idx;
281
+
282
+ CUTLASS_DEVICE
283
+ bool
284
+ is_valid(Params const& params) const {
285
+ return tile_idx < params.total_blocks;
286
+ }
287
+
288
+ CUTLASS_DEVICE
289
+ cute::tuple<int32_t, int32_t, int32_t, int32_t>
290
+ get_block_coord(Params const& params) const {
291
+ int block, bidh, bidb;
292
+ int l2_mod, bidhb, bidhb_residual;
293
+ bidhb = params.l2_major_divmod.divmod(l2_mod, tile_idx);
294
+ // If we're in the last section (called residual), we don't want to divide by
295
+ // swizzle. Instead we want to divide by the remainder.
296
+ if (bidhb < params.num_hb_quotient) {
297
+ block = params.l2_minor_divmod.divmod(bidhb_residual, l2_mod);
298
+ } else {
299
+ block = params.l2_minor_residual_divmod.divmod(bidhb_residual, l2_mod);
300
+ }
301
+ bidb = params.head_divmod.divmod(bidh, bidhb * params.l2_minor_divmod.divisor + bidhb_residual);
302
+ int split_idx = 0;
303
+ if constexpr (Split) {
304
+ split_idx = params.m_block_divmod.divmod(block, block);
305
+ }
306
+ // Longest-processing-time-first
307
+ block = params.m_block_divmod.divisor - 1 - block;
308
+ return {block, bidh, bidb, split_idx};
309
+ }
310
+
311
+ };
312
+
313
+ CUTLASS_DEVICE
314
+ DynamicPersistentTileScheduler(SharedStorage* const smem_scheduler) : tile_count_smem(smem_scheduler) {};
315
+
316
+ template<bool IsProducerWarp=false>
317
+ CUTLASS_DEVICE
318
+ WorkTileInfo
319
+ get_initial_work(Params const& params) const {
320
+ return {int(blockIdx.x)};
321
+ }
322
+
323
+ CUTLASS_DEVICE
324
+ void
325
+ init_consumer() const {
326
+ if (WarpSpecialized || cutlass::canonical_warp_idx_sync() > 0) {
327
+ flash::named_barrier_arrive(NumThreads, cutlass::arch::ReservedNamedBarriers::StreamkBarrier0 /*id*/); // TileCountSmemEmpty
328
+ }
329
+ }
330
+
331
+ CUTLASS_DEVICE
332
+ void
333
+ prefetch_next_work(Params const& params, WorkTileInfo& current_work) const {
334
+ if (threadIdx.x % NumProducerThreads == 0) {
335
+ current_work.tile_idx = atomicAdd(params.tile_count_semaphore, 1) + int(gridDim.x);
336
+ }
337
+ }
338
+
339
+ template<bool IsProducerWarp=false>
340
+ CUTLASS_DEVICE
341
+ WorkTileInfo
342
+ get_next_work(Params const& params, WorkTileInfo const& current_work) const {
343
+ if constexpr (IsProducerWarp) {
344
+ // thread 0 already has the right tile_idx, just need to broadcast to the rest of warp 0
345
+ int new_tile_idx = __shfl_sync(0xffffffff, current_work.tile_idx, 0 /*lane*/);
346
+ flash::named_barrier_sync(NumThreads, cutlass::arch::ReservedNamedBarriers::StreamkBarrier0 /*id*/); // TileCountSmemEmpty
347
+ if (threadIdx.x % NumProducerThreads == 0) {
348
+ *tile_count_smem = current_work.tile_idx;
349
+ }
350
+ flash::named_barrier_arrive(NumThreads, cutlass::arch::ReservedNamedBarriers::StreamkBarrier1 /*id*/); // TileCountSmemFull
351
+ return {new_tile_idx};
352
+ } else {
353
+ flash::named_barrier_sync(NumThreads, cutlass::arch::ReservedNamedBarriers::StreamkBarrier1 /*id*/); // TileCountSmemFull
354
+ int tile_idx = *tile_count_smem;
355
+ flash::named_barrier_arrive(NumThreads, cutlass::arch::ReservedNamedBarriers::StreamkBarrier0 /*id*/); // TileCountSmemEmpty
356
+ return {tile_idx};
357
+ }
358
+ }
359
+
360
+ };
361
+
362
+ ///////////////////////////////////////////////////////////////////////////////
363
+
364
+ class SingleTileBwdLPTScheduler {
365
+
366
+ public:
367
+
368
+ using SharedStorage = int;
369
+
370
+ // Device side kernel params
371
+ struct Params {
372
+ int const total_blocks;
373
+ cutlass::FastDivmod const m_block_divmod, head_divmod;
374
+ cutlass::FastDivmod const l2_minor_divmod, l2_major_divmod;
375
+ cutlass::FastDivmod const l2_minor_residual_divmod;
376
+ int const num_hb_quotient;
377
+ };
378
+
379
+ static Params
380
+ to_underlying_arguments(TileSchedulerArguments const& args) {
381
+ // Since it's the bwd pass, seqlen_k get passed to args.seqlen and seqlen_q is passed to args.seqlen_k
382
+ int const size_one_qdo_head = args.seqlen_k * (args.headdim + args.headdim_v) * args.element_size;
383
+ int const size_one_dqaccum_head = args.seqlen_k * args.headdim * sizeof(float);
384
+ int const size_one_head = size_one_qdo_head + size_one_dqaccum_head;
385
+ int const size_l2 = 40 * 1024 * 1024; // 40 MB for Q, dO, and dQaccum
386
+ // Swizzle is the size of each "section". Round swizzle to a power of 2
387
+ // Need to be careful about the case where only one head will fit
388
+ auto find_log2_floor = [&](int n) { return 31 - cutlass::clz(n); };
389
+ // Seems faster if swizzle if a power of 2
390
+ int const swizzle = size_l2 < size_one_head ? 1 : (1 << find_log2_floor(size_l2 / size_one_head));
391
+ // If we're in the last section (called residual), we don't want to divide by
392
+ // swizzle. Instead we want to divide by the remainder.
393
+ int const num_hb_remainder = (args.num_head * args.num_batch) % swizzle;
394
+ // printf("num_blocks = %d, num_head = %d, num_batch = %d, size_one_head = %d, ratio = %d, swizzle = %d, num_hb_remainder = %d\n", args.num_blocks, args.num_head, args.num_batch, size_one_head, size_l2 / size_one_head, swizzle, num_hb_remainder);
395
+ assert(args.tile_count_semaphore != nullptr);
396
+ return {args.num_blocks * args.num_head * args.num_batch,
397
+ cutlass::FastDivmod(args.num_blocks), cutlass::FastDivmod(args.num_head),
398
+ cutlass::FastDivmod(swizzle), cutlass::FastDivmod(swizzle * args.num_blocks),
399
+ // don't divide by 0
400
+ cutlass::FastDivmod(num_hb_remainder > 0 ? num_hb_remainder : 1),
401
+ (args.num_head * args.num_batch) / swizzle};
402
+ }
403
+
404
+ static dim3
405
+ get_grid_shape(Params const& params, int num_sm) {
406
+ return {uint32_t(params.total_blocks)};
407
+ }
408
+
409
+ struct WorkTileInfo {
410
+ int tile_idx;
411
+
412
+ CUTLASS_DEVICE
413
+ bool
414
+ is_valid(Params const& params) const {
415
+ return tile_idx < params.total_blocks;
416
+ }
417
+
418
+ CUTLASS_DEVICE
419
+ cute::tuple<int32_t, int32_t, int32_t, int32_t>
420
+ get_block_coord(Params const& params) const {
421
+ int block, bidh, bidb;
422
+ int l2_mod, bidhb, bidhb_residual;
423
+ bidhb = params.l2_major_divmod.divmod(l2_mod, tile_idx);
424
+ // If we're in the last section (called residual), we don't want to divide by
425
+ // swizzle. Instead we want to divide by the remainder.
426
+ if (bidhb < params.num_hb_quotient) {
427
+ block = params.l2_minor_divmod.divmod(bidhb_residual, l2_mod);
428
+ } else {
429
+ block = params.l2_minor_residual_divmod.divmod(bidhb_residual, l2_mod);
430
+ }
431
+ bidb = params.head_divmod.divmod(bidh, bidhb * params.l2_minor_divmod.divisor + bidhb_residual);
432
+ return {block, bidh, bidb, 0 /*split_idx*/};
433
+ }
434
+
435
+ };
436
+
437
+ CUTLASS_DEVICE
438
+ SingleTileBwdLPTScheduler(SharedStorage* const smem_scheduler) { }
439
+
440
+ template<bool IsProducerWarp=false>
441
+ CUTLASS_DEVICE
442
+ WorkTileInfo
443
+ get_initial_work(Params const& params) const {
444
+ return {int(blockIdx.x)};
445
+ }
446
+
447
+ CUTLASS_DEVICE
448
+ void
449
+ init_consumer() const {}
450
+
451
+ CUTLASS_DEVICE
452
+ void
453
+ prefetch_next_work(Params const& params, WorkTileInfo& current_work) const {}
454
+
455
+ template<bool IsProducerWarp=false>
456
+ CUTLASS_DEVICE
457
+ WorkTileInfo
458
+ get_next_work(Params const& params, WorkTileInfo const& current_work) const {
459
+ return {params.total_blocks};
460
+ }
461
+
462
+ };
463
+
464
+ ///////////////////////////////////////////////////////////////////////////////
465
+
466
+ template<int kBlock, int NumMmaThreads=2 * cutlass::NumThreadsPerWarpGroup, int NumProducerThreads=cutlass::NumThreadsPerWarp, bool Split=false, bool PackGQA=false, bool WarpSpecialized=true>
467
+ class VarlenDynamicPersistentTileScheduler {
468
+
469
+ static_assert(WarpSpecialized || NumProducerThreads == NumMmaThreads);
470
+ static constexpr int NumThreads = WarpSpecialized ? NumMmaThreads + NumProducerThreads : NumMmaThreads;
471
+
472
+ public:
473
+ using SharedStorage = int4;
474
+
475
+ protected:
476
+ SharedStorage* const work_info_smem;
477
+
478
+ public:
479
+
480
+ // Device side kernel params
481
+ struct Params {
482
+ int num_head, num_batch;
483
+ int const qhead_per_khead;
484
+ int const seqlen;
485
+ cutlass::FastDivmod head_divmod;
486
+ cutlass::FastDivmod nsplits_divmod;
487
+ int* const tile_count_semaphore;
488
+ int const* const cu_seqlens;
489
+ int const* const seqused;
490
+ // int* const num_m_blocks_ptr;
491
+ int const* const num_splits_dynamic_ptr;
492
+ };
493
+
494
+ static Params
495
+ to_underlying_arguments(TileSchedulerArguments const& args) {
496
+ // If Split, for the purpose of scheduling, we pretend that instead there are
497
+ // (args.num_splits * args.num_head) number of heads.
498
+ assert(args.tile_count_semaphore != nullptr);
499
+ assert(args.num_head < (1 << 16)); // We use the top 16 bits to store num_splits & split_idx
500
+ assert(!Split || args.num_splits < (1 << 8)); // We use the top 8 bits to store num_splits
501
+ return {args.num_head, args.num_batch,
502
+ args.qhead_per_khead, args.seqlen,
503
+ cutlass::FastDivmod(args.num_head),
504
+ cutlass::FastDivmod(!Split ? 1 : args.num_splits),
505
+ args.tile_count_semaphore, args.cu_seqlens, args.seqused,
506
+ // args.num_m_blocks_ptr,
507
+ args.num_splits_dynamic_ptr};
508
+ }
509
+
510
+ static dim3
511
+ get_grid_shape(Params const& params, int num_sm) {
512
+ return {uint32_t(num_sm)};
513
+ }
514
+
515
+ struct WorkTileInfo {
516
+ int tile_idx, block, bidh, bidb;
517
+
518
+ CUTLASS_DEVICE
519
+ bool
520
+ is_valid(Params const& params) const {
521
+ // if (blockIdx.x >= 0 && (threadIdx.x == 128 || threadIdx.x == 0)) { printf("blockIdx.x = %d, threadIdx.x = %d, checking valid, bidb = %d, params.num_batch = %d\n", blockIdx.x, threadIdx.x, bidb, params.num_batch); }
522
+ return bidb < params.num_batch;
523
+ }
524
+
525
+ CUTLASS_DEVICE
526
+ cute::tuple<int32_t, int32_t, int32_t, int32_t>
527
+ get_block_coord(Params const& params) const {
528
+ if constexpr (!Split) {
529
+ return {block, bidh, bidb, 0 /*split_idx*/};
530
+ } else {
531
+ // the top 8 bits of bidh store num_splits and the next 8 bits store split_idx
532
+ // reinterpret_cast to uint32_t to make sure we're not doing sign extension when we shift
533
+ uint32_t bidh_packed = reinterpret_cast<uint32_t const&>(bidh);
534
+ uint32_t bidh_actual_u = bidh_packed & 0x0000FFFF;
535
+ int bidh_actual = reinterpret_cast<int&>(bidh_actual_u);
536
+ // Use the top 16 bits of split_idx to store num_splits and the next 16 bits to store split_idx
537
+ uint32_t split_idx_u = ((bidh_packed & 0x00FF0000) >> 16) + ((bidh_packed & 0xFF000000) >> 8);
538
+ int split_idx = reinterpret_cast<int&>(split_idx_u);
539
+ // int bidh_actual = params.nsplits_divmod.divmod(split_idx, bidh);
540
+ // if (threadIdx.x == 128) {
541
+ // printf("blockIdx.x = %d, bidb = %d, bidh = %d, bidh_actual = %d, split_idx = %d\n", blockIdx.x, bidb, bidh, bidh_actual, split_idx);
542
+ // }
543
+ return {block, bidh_actual, bidb, split_idx};
544
+ }
545
+ }
546
+ };
547
+
548
+ CUTLASS_DEVICE
549
+ VarlenDynamicPersistentTileScheduler(SharedStorage* const smem_scheduler) : work_info_smem(smem_scheduler) {};
550
+
551
+ CUTLASS_DEVICE
552
+ WorkTileInfo
553
+ tile_idx_to_work_tile(Params const& params, int next_tile_idx, WorkTileInfo const& current_work) const {
554
+ int lane = threadIdx.x % cutlass::NumThreadsPerWarp;
555
+ auto get_num_m_blocks = [&] (int bidb_start) {
556
+ int batch_idx = lane + bidb_start;
557
+ int seqlen = params.seqlen * (!PackGQA ? 1 : params.qhead_per_khead);
558
+ if (seqlen > kBlock) {
559
+ if (params.seqused) {
560
+ seqlen = batch_idx < params.num_batch ? params.seqused[batch_idx] : 0;
561
+ } else if (params.cu_seqlens) {
562
+ int cur_cu_seqlen = batch_idx <= params.num_batch ? params.cu_seqlens[batch_idx] : 0;
563
+ int next_cu_seqlen = __shfl_down_sync(0xffffffff, cur_cu_seqlen, 1);
564
+ seqlen = next_cu_seqlen - cur_cu_seqlen;
565
+ } else {
566
+ seqlen = params.seqlen;
567
+ }
568
+ if constexpr (PackGQA) { seqlen *= params.qhead_per_khead; }
569
+ }
570
+ return batch_idx < params.num_batch && lane < cutlass::NumThreadsPerWarp - 1
571
+ ? cute::ceil_div(seqlen, kBlock) : 0;
572
+ // ? params.num_m_blocks_ptr[batch_idx] : 0;
573
+ };
574
+
575
+ auto get_num_splits = [&] (int bidb_start) {
576
+ int batch_idx = lane + bidb_start;
577
+ return batch_idx < params.num_batch && lane < cutlass::NumThreadsPerWarp - 1
578
+ ? (!Split ? 1 : (params.num_splits_dynamic_ptr
579
+ ? params.num_splits_dynamic_ptr[batch_idx]
580
+ : params.nsplits_divmod.divisor))
581
+ : 0;
582
+ };
583
+
584
+ int num_m_blocks = get_num_m_blocks(current_work.bidb); // Different for each lane
585
+ int num_splits = get_num_splits(current_work.bidb);
586
+ int num_split_m_blocks = !Split ? num_m_blocks : num_m_blocks * num_splits;
587
+ // Cumulative number of blocks for the next 31 batches
588
+ int num_m_blocks_cumulative = warp_prefix_sum(num_split_m_blocks);
589
+ // Total number of blocks for the next 31 batches
590
+ int m_blocks_in_group = __shfl_sync(0xffffffff, num_m_blocks_cumulative, cutlass::NumThreadsPerWarp - 1);
591
+ // Only the lower 16 bits are the actual bidh
592
+ int current_bidh = !Split ? current_work.bidh : (current_work.bidh & 0x0000FFFF);
593
+ int group_end_tile = current_work.tile_idx - current_work.block - current_bidh * __shfl_sync(0xffffffff, num_split_m_blocks, 0 /*lane*/) + m_blocks_in_group * params.num_head; // Same for all lanes
594
+ if constexpr (Split) {
595
+ int current_split_idx = (current_work.bidh & 0x00FF0000) >> 16;
596
+ group_end_tile -= current_split_idx * __shfl_sync(0xffffffff, num_m_blocks, 0 /*lane*/);
597
+ }
598
+ int bidb = current_work.bidb;
599
+ // if (blockIdx.x <= 9 && threadIdx.x == 0) {
600
+ // printf("Before while, blockIdx.x = %d, threadIdx.x = %d, bidb = %d, num_m_blocks = %d, next_tile_idx = %d, cur tile_idx = %d, cur block = %d, cur bidh = %d, num_split_m_blocks = %d, group_end_tile = %d, m_blocks_in_group = %d\n", blockIdx.x, threadIdx.x, current_work.bidb, num_m_blocks, next_tile_idx, current_work.tile_idx, current_work.block, current_bidh, num_split_m_blocks, group_end_tile, m_blocks_in_group);
601
+ // }
602
+ // if (threadIdx.x == 0 && blockIdx.x == 0) { printf("tile_idx = %d, group_end_tile = %d, num_m_blocks_cumulative = %d, m_blocks_in_group = %d\n", current_work.tile_idx, group_end_tile, num_m_blocks_cumulative, m_blocks_in_group); }
603
+ while (group_end_tile <= next_tile_idx) {
604
+ bidb += cutlass::NumThreadsPerWarp - 1;
605
+ if (bidb >= params.num_batch) {
606
+ // if (blockIdx.x <= 9 && threadIdx.x == 0) {
607
+ // printf("Returning early, blockIdx.x = %d, threadIdx.x = %d, bidb = %d, num_m_blocks = %d, next_tile_idx = %d, group_end_tile = %d, m_blocks_in_group = %d\n", blockIdx.x, threadIdx.x, bidb, num_m_blocks, next_tile_idx, group_end_tile, m_blocks_in_group);
608
+ // }
609
+ return {next_tile_idx, 0, 0, params.num_batch};
610
+ }
611
+ num_m_blocks = get_num_m_blocks(bidb);
612
+ num_splits = get_num_splits(bidb);
613
+ num_split_m_blocks = !Split ? num_m_blocks : num_m_blocks * num_splits;
614
+ num_m_blocks_cumulative = warp_prefix_sum(num_split_m_blocks);
615
+ m_blocks_in_group = __shfl_sync(0xffffffff, num_m_blocks_cumulative, cutlass::NumThreadsPerWarp - 1);
616
+ group_end_tile += m_blocks_in_group * params.num_head;
617
+ // if (blockIdx.x <= 9 && threadIdx.x == 0) {
618
+ // printf("Bottom of while, blockIdx.x = %d, threadIdx.x = %d, bidb = %d, num_m_blocks = %d, next_tile_idx = %d, group_end_tile = %d, m_blocks_in_group = %d\n", blockIdx.x, threadIdx.x, bidb, num_m_blocks, next_tile_idx, group_end_tile, m_blocks_in_group);
619
+ // }
620
+ }
621
+ int group_start_tile = group_end_tile - m_blocks_in_group * params.num_head;
622
+ // The next problem to process is the first one that does not have ending tile position
623
+ // that is greater than or equal to tile index.
624
+ int batch_idx_in_group = __popc(__ballot_sync(0xffffffff, group_start_tile + num_m_blocks_cumulative * params.num_head <= next_tile_idx));
625
+ // if (threadIdx.x == 31 || threadIdx.x == 0) { printf("blockIdx.x = %d, tidx %d, group_start_tile = %d, num_m_blocks_cumulative = %d, num_head = %d, next_tile_idx = %d, ballot = %x, batch_idx_in_group = %d\n", blockIdx.x, threadIdx.x, group_start_tile, num_m_blocks_cumulative, params.num_head, next_tile_idx, tmp, batch_idx_in_group); }
626
+ bidb += batch_idx_in_group;
627
+ num_m_blocks = __shfl_sync(0xffffffff, num_m_blocks, batch_idx_in_group);
628
+ if constexpr (Split) { num_splits = __shfl_sync(0xffffffff, num_splits, batch_idx_in_group); }
629
+ int mh_block = next_tile_idx - group_start_tile - (batch_idx_in_group == 0 ? 0 : __shfl_sync(0xffffffff, num_m_blocks_cumulative, batch_idx_in_group - 1)) * params.num_head;
630
+ int bidh = mh_block / num_m_blocks;
631
+ int block = mh_block - bidh * num_m_blocks;
632
+ if constexpr (Split) {
633
+ int bidh_actual = bidh / num_splits;
634
+ int split_idx = bidh - bidh_actual * num_splits;
635
+ // TODO: idk why this gives wrong answer nondeterministically
636
+ // int bidh_actual, split_idx;
637
+ // split_idx = params.head_divmod.divmod(bidh_actual, bidh);
638
+ // Use the top 8 bits to store num_splits and the next 8 bits to store split_idx
639
+ // reinterpret_cast to uint32_t to make sure we're not doing sign extension when we shift
640
+ uint32_t bidh_packed = reinterpret_cast<uint32_t&>(bidh_actual) + (reinterpret_cast<uint32_t&>(split_idx) << 16) + (reinterpret_cast<uint32_t&>(num_splits) << 24);
641
+ // if (threadIdx.x == 0) {
642
+ // printf("blockIdx.x = %d, group_start_tiled = %d, bidb = %d, batch_idx_in_group = %d, mh_block = %d, num_m_blocks = %d, bidh = %d, bidh_actual = %d, split_idx = %d, num_splits = %d, bidh_packed = %d\n", blockIdx.x, group_start_tile, bidb, batch_idx_in_group, mh_block, num_m_blocks, bidh, bidh_actual, split_idx, num_splits, bidh_packed);
643
+ // }
644
+ bidh = reinterpret_cast<int&>(bidh_packed);
645
+ }
646
+ // if (blockIdx.x <= 9 && threadIdx.x == 0) {
647
+ // printf("Before returning, blockIdx.x = %d, threadIdx.x = %d, group_start_tile = %d, batch_idx_in_group = %d, bidb = %d, num_m_blocks = %d, next_tile_idx = %d, group_end_tile = %d, m_blocks_in_group = %d, mh_block = %d, bidh = %d, block = %d\n", blockIdx.x, threadIdx.x, group_start_tile, batch_idx_in_group, bidb, num_m_blocks, next_tile_idx, group_end_tile, m_blocks_in_group, mh_block, bidh, block);
648
+ // }
649
+ return {next_tile_idx, block, bidh, bidb};
650
+ }
651
+
652
+ template<bool IsProducerWarp=false>
653
+ CUTLASS_DEVICE
654
+ WorkTileInfo
655
+ get_initial_work(Params const& params) const {
656
+ if constexpr (IsProducerWarp) {
657
+ WorkTileInfo work_info = tile_idx_to_work_tile(params, int(blockIdx.x), {0, 0, 0, 0});
658
+ if (threadIdx.x % cutlass::NumThreadsPerWarp == 0) {
659
+ *work_info_smem = make_int4(work_info.tile_idx, work_info.block, work_info.bidh, work_info.bidb);
660
+ }
661
+ flash::named_barrier_arrive(NumThreads, cutlass::arch::ReservedNamedBarriers::StreamkBarrier1 /*id*/); // TileCountSmemFull
662
+ return work_info;
663
+ } else {
664
+ return get_next_work<false>(params, {0, 0, 0, 0});
665
+ }
666
+ }
667
+
668
+ CUTLASS_DEVICE
669
+ void
670
+ init_consumer() const {
671
+ // Don't arrive at the TileCountSmemEmpty barrier here, because get_initial_work will do that
672
+ }
673
+
674
+ CUTLASS_DEVICE
675
+ void
676
+ prefetch_next_work(Params const& params, WorkTileInfo& current_work) const {
677
+ if (threadIdx.x % NumProducerThreads == 0) {
678
+ current_work.tile_idx = atomicAdd(params.tile_count_semaphore, 1) + int(gridDim.x);
679
+ }
680
+ }
681
+
682
+ template<bool IsProducerWarp=false>
683
+ CUTLASS_DEVICE
684
+ WorkTileInfo
685
+ get_next_work(Params const& params, WorkTileInfo const& current_work) const {
686
+ if constexpr (IsProducerWarp) {
687
+ // thread 0 has the next tile_idx, just need to broadcast to the rest of warp 0
688
+ int new_tile_idx = __shfl_sync(0xffffffff, current_work.tile_idx, 0 /*lane*/);
689
+ WorkTileInfo work_info = {__shfl_sync(0xffffffff, current_work.tile_idx, 1 /*lane*/), current_work.block, current_work.bidh, current_work.bidb};
690
+ work_info = tile_idx_to_work_tile(params, new_tile_idx, work_info);
691
+ flash::named_barrier_sync(NumThreads, cutlass::arch::ReservedNamedBarriers::StreamkBarrier0 /*id*/); // TileCountSmemEmpty
692
+ if (threadIdx.x % cutlass::NumThreadsPerWarp == 0) {
693
+ *work_info_smem = make_int4(work_info.tile_idx, work_info.block, work_info.bidh, work_info.bidb);
694
+ }
695
+ flash::named_barrier_arrive(NumThreads, cutlass::arch::ReservedNamedBarriers::StreamkBarrier1 /*id*/); // TileCountSmemFull
696
+ return work_info;
697
+ } else {
698
+ flash::named_barrier_sync(NumThreads, cutlass::arch::ReservedNamedBarriers::StreamkBarrier1 /*id*/); // TileCountSmemFull
699
+ int4 work_info = *work_info_smem;
700
+ flash::named_barrier_arrive(NumThreads, cutlass::arch::ReservedNamedBarriers::StreamkBarrier0 /*id*/); // TileCountSmemEmpty
701
+ return WorkTileInfo{work_info.x, work_info.y, work_info.z, work_info.w};
702
+ }
703
+ }
704
+
705
+ };
706
+
707
+ ///////////////////////////////////////////////////////////////////////////////
708
+
709
+ } // flash
Code/Baselines/flash-attention/tests/pyproject.toml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [tool.black]
2
+ line-length = 100
3
+ target-version = ['py38']
Code/Baselines/flash-attention/tests/test_flash_attn.py ADDED
The diff for this file is too large to render. See raw diff
 
Code/Baselines/flash-attention/tests/test_flash_attn_triton_amd.py ADDED
The diff for this file is too large to render. See raw diff
 
Code/Baselines/flash-attention/tests/test_rotary.py ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import random
3
+
4
+ import pytest
5
+ import torch
6
+ import torch.nn.functional as F
7
+ from einops import rearrange
8
+
9
+ import triton
10
+
11
+ from flash_attn.layers.rotary import apply_rotary_emb, apply_rotary_emb_torch
12
+ from flash_attn.layers.rotary import apply_rotary_emb_qkv_, apply_rotary_emb_kv_
13
+ from flash_attn.bert_padding import pad_input, unpad_input
14
+
15
+ is_sm8x = torch.cuda.get_device_capability("cuda") >= (8, 0)
16
+
17
+
18
+ def generate_cos_sin(seqlen, rotary_dim, device, dtype):
19
+ assert rotary_dim % 2 == 0
20
+ angle = torch.rand(seqlen * 2, rotary_dim // 2, device=device) * 2 * math.pi
21
+ cos = torch.cos(angle).to(dtype=dtype)
22
+ sin = torch.sin(angle).to(dtype=dtype)
23
+ return cos, sin
24
+
25
+
26
+ def generate_seqlen_offsets(seqlen_offsets_type, batch_size, seqlen, device):
27
+ if seqlen_offsets_type == 0:
28
+ return 0
29
+ elif seqlen_offsets_type is int:
30
+ return torch.randint(0, seqlen + 1, (1,)).item()
31
+ elif seqlen_offsets_type is torch.Tensor:
32
+ return torch.randint(0, seqlen + 1, (batch_size,), dtype=torch.int32, device=device)
33
+
34
+
35
+ def index_cos_sin(cos, sin, seqlen_offsets, seqlen):
36
+ if isinstance(seqlen_offsets, torch.Tensor):
37
+ batch_size = seqlen_offsets.shape[0]
38
+ arange = rearrange(torch.arange(seqlen, device=cos.device), "s -> 1 s")
39
+ idx = rearrange(seqlen_offsets, "b -> b 1") + arange
40
+ cos_pt = rearrange(cos[idx.flatten()], "(b s) d -> b s d", b=batch_size)
41
+ sin_pt = rearrange(sin[idx.flatten()], "(b s) d -> b s d", b=batch_size)
42
+ else:
43
+ cos_pt = cos[seqlen_offsets : seqlen_offsets + seqlen]
44
+ sin_pt = sin[seqlen_offsets : seqlen_offsets + seqlen]
45
+ return cos_pt, sin_pt
46
+
47
+
48
+ @pytest.mark.parametrize(
49
+ "dtype", ([torch.float16] if not is_sm8x else [torch.float16, torch.bfloat16])
50
+ )
51
+ # @pytest.mark.parametrize('dtype', ([torch.bfloat16]))
52
+ @pytest.mark.parametrize("seqlen_offsets_type", [0, int, torch.Tensor])
53
+ # @pytest.mark.parametrize("seqlen_offsets_type", [0])
54
+ @pytest.mark.parametrize("rotary_fraction", [1.0, 0.5])
55
+ # @pytest.mark.parametrize('rotary_fraction', [1.0])
56
+ @pytest.mark.parametrize("interleaved", [False, True])
57
+ # @pytest.mark.parametrize('interleaved', [True])
58
+ @pytest.mark.parametrize("inplace", [False, True])
59
+ # @pytest.mark.parametrize('inplace', [False])
60
+ def test_rotary_emb_func(inplace, interleaved, rotary_fraction, seqlen_offsets_type, dtype):
61
+ rtol = 1e-3
62
+ batch_size = 32
63
+ nheads = 4
64
+ seqlen = 217
65
+ headdim = 128
66
+ device = "cuda"
67
+ rotary_dim = int(rotary_fraction * headdim)
68
+ torch.manual_seed(42)
69
+ x = torch.randn(
70
+ batch_size, seqlen, nheads, headdim, dtype=dtype, device=device, requires_grad=True
71
+ )
72
+ x_pt = x.detach().clone().requires_grad_()
73
+ cos, sin = generate_cos_sin(seqlen, rotary_dim, device, dtype)
74
+ seqlen_offsets = generate_seqlen_offsets(seqlen_offsets_type, batch_size, seqlen, device)
75
+ out = apply_rotary_emb(
76
+ x, cos, sin, seqlen_offsets=seqlen_offsets, interleaved=interleaved, inplace=inplace
77
+ )
78
+ cos_pt, sin_pt = index_cos_sin(cos, sin, seqlen_offsets, seqlen)
79
+ out_pt = apply_rotary_emb_torch(
80
+ x_pt.float(), cos_pt.float(), sin_pt.float(), interleaved=interleaved
81
+ ).to(dtype=dtype)
82
+ print(f"Output max diff: {(out - out_pt).abs().max().item()}")
83
+
84
+ g = torch.randn_like(out)
85
+ g_pt = g.clone() # If inplace=True, we might modify the gradient inplace
86
+ out.backward(g)
87
+ out_pt.backward(g_pt)
88
+ print(f"Grad max diff: {(x.grad - x_pt.grad).abs().max().item()}")
89
+
90
+ if not inplace:
91
+ assert torch.equal(x, x_pt)
92
+ # Numerical error if we just do any arithmetic
93
+ atol = ((out_pt + 0.3 - 0.3) - out_pt).abs().max().item()
94
+ assert torch.allclose(out, out_pt, rtol=rtol, atol=2 * atol)
95
+ atol = ((x_pt.grad + 0.3 - 0.3) - x_pt.grad).abs().max().item()
96
+ assert torch.allclose(x.grad, x_pt.grad, rtol=rtol, atol=2 * atol)
97
+
98
+
99
+ @pytest.mark.parametrize(
100
+ "dtype", ([torch.float16] if not is_sm8x else [torch.float16, torch.bfloat16])
101
+ )
102
+ # @pytest.mark.parametrize('dtype', ([torch.float16]))
103
+ @pytest.mark.parametrize("compiled", [False, True])
104
+ # @pytest.mark.parametrize("compiled", [True])
105
+ @pytest.mark.parametrize("gqa", [False, True])
106
+ # @pytest.mark.parametrize("gqa", [False])
107
+ @pytest.mark.parametrize("seqlen_offsets_type", [0, int, torch.Tensor])
108
+ # @pytest.mark.parametrize("seqlen_offsets_type", [0])
109
+ @pytest.mark.parametrize("rotary_fraction", [1.0, 0.5])
110
+ # @pytest.mark.parametrize('rotary_fraction', [1.0])
111
+ @pytest.mark.parametrize("interleaved", [False, True])
112
+ # @pytest.mark.parametrize('interleaved', [False])
113
+ def test_rotary_emb_qkv(interleaved, rotary_fraction, seqlen_offsets_type, gqa, compiled, dtype):
114
+ if compiled: # Don't fall back to eager just bc of recompilation
115
+ torch._dynamo.config.recompile_limit = 2 ** 31
116
+ rtol = 1e-3
117
+ batch_size = 32
118
+ nheads = 4
119
+ seqlen = 512
120
+ headdim = 128
121
+ device = "cuda"
122
+ rotary_dim = int(rotary_fraction * headdim)
123
+ torch.manual_seed(42)
124
+ if not gqa:
125
+ qkv = torch.randn(
126
+ batch_size, seqlen, 3, nheads, headdim, dtype=dtype, device=device, requires_grad=True
127
+ )
128
+ else:
129
+ nheads_k = nheads // 2
130
+ qkv = torch.randn(
131
+ batch_size, seqlen, nheads + nheads_k * 2, headdim, dtype=dtype, device=device, requires_grad=True
132
+ )
133
+ qkv_pt = qkv.detach().clone().requires_grad_()
134
+ cos, sin = generate_cos_sin(seqlen, rotary_dim, device, dtype)
135
+ seqlen_offsets = generate_seqlen_offsets(seqlen_offsets_type, batch_size, seqlen, device)
136
+ fn = apply_rotary_emb_qkv_ if not compiled else torch.compile(apply_rotary_emb_qkv_)
137
+ out = fn(
138
+ qkv, cos, sin, seqlen_offsets=seqlen_offsets, interleaved=interleaved,
139
+ num_heads_q=None if not gqa else nheads
140
+ )
141
+ cos_pt, sin_pt = index_cos_sin(cos, sin, seqlen_offsets, seqlen)
142
+ if not gqa:
143
+ q_pt, k_pt, v_pt = qkv_pt.unbind(2)
144
+ else:
145
+ q_pt, k_pt, v_pt = qkv_pt.split([nheads, nheads_k, nheads_k], dim=2)
146
+ q_pt = apply_rotary_emb_torch(
147
+ q_pt.float(), cos_pt.float(), sin_pt.float(), interleaved=interleaved
148
+ ).to(dtype=dtype)
149
+ k_pt = apply_rotary_emb_torch(
150
+ k_pt.float(), cos_pt.float(), sin_pt.float(), interleaved=interleaved
151
+ ).to(dtype=dtype)
152
+ if not gqa:
153
+ out_pt = torch.stack([q_pt, k_pt, v_pt], dim=2)
154
+ else:
155
+ out_pt = torch.cat([q_pt, k_pt, v_pt], dim=2)
156
+ print(f"Output max diff: {(out - out_pt).abs().max().item()}")
157
+
158
+ g = torch.randn_like(out)
159
+ g_pt = g.clone() # Since inplace=True, we modify the gradient inplace
160
+ out.backward(g)
161
+ out_pt.backward(g_pt)
162
+ print(f"Grad max diff: {(qkv.grad - qkv_pt.grad).abs().max().item()}")
163
+
164
+ # Numerical error if we just do any arithmetic
165
+ atol = ((out_pt + 0.3 - 0.3) - out_pt).abs().max().item()
166
+ assert torch.allclose(out, out_pt, rtol=rtol, atol=2 * atol)
167
+ atol = ((qkv_pt.grad + 0.3 - 0.3) - qkv_pt.grad).abs().max().item()
168
+ assert torch.allclose(qkv.grad, qkv_pt.grad, rtol=rtol, atol=2 * atol)
169
+
170
+
171
+ @pytest.mark.parametrize(
172
+ "dtype", ([torch.float16] if not is_sm8x else [torch.float16, torch.bfloat16])
173
+ )
174
+ # @pytest.mark.parametrize('dtype', ([torch.float16]))
175
+ @pytest.mark.parametrize("seqlen_offsets_type", [0, int, torch.Tensor])
176
+ # @pytest.mark.parametrize("seqlen_offsets_type", [0])
177
+ @pytest.mark.parametrize("rotary_fraction", [1.0, 0.5])
178
+ # @pytest.mark.parametrize('rotary_fraction', [1.0])
179
+ @pytest.mark.parametrize("interleaved", [False, True])
180
+ # @pytest.mark.parametrize('interleaved', [False])
181
+ def test_rotary_emb_kv(interleaved, rotary_fraction, seqlen_offsets_type, dtype):
182
+ rtol = 1e-3
183
+ batch_size = 32
184
+ nheads = 4
185
+ seqlen = 781
186
+ headdim = 64
187
+ device = "cuda"
188
+ rotary_dim = int(rotary_fraction * headdim)
189
+ torch.manual_seed(42)
190
+ kv = torch.randn(
191
+ batch_size, seqlen, 2, nheads, headdim, dtype=dtype, device=device, requires_grad=True
192
+ )
193
+ kv_pt = kv.detach().clone().requires_grad_()
194
+ cos, sin = generate_cos_sin(seqlen, rotary_dim, device, dtype)
195
+ seqlen_offsets = generate_seqlen_offsets(seqlen_offsets_type, batch_size, seqlen, device)
196
+ out = apply_rotary_emb_kv_(kv, cos, sin, seqlen_offsets=seqlen_offsets, interleaved=interleaved)
197
+ cos_pt, sin_pt = index_cos_sin(cos, sin, seqlen_offsets, seqlen)
198
+ k_pt = apply_rotary_emb_torch(
199
+ kv_pt[:, :, 0].float(), cos_pt.float(), sin_pt.float(), interleaved=interleaved
200
+ ).to(dtype=dtype)
201
+ out_pt = torch.stack([k_pt, kv_pt[:, :, 1]], dim=2)
202
+ print(f"Output max diff: {(out - out_pt).abs().max().item()}")
203
+
204
+ g = torch.randn_like(out)
205
+ g_pt = g.clone() # Since inplace=True, we modify the gradient inplace
206
+ out.backward(g)
207
+ out_pt.backward(g_pt)
208
+ print(f"Grad max diff: {(kv.grad - kv_pt.grad).abs().max().item()}")
209
+
210
+ # Numerical error if we just do any arithmetic
211
+ atol = ((out_pt + 0.3 - 0.3) - out_pt).abs().max().item()
212
+ assert torch.allclose(out, out_pt, rtol=rtol, atol=2 * atol)
213
+ atol = ((kv_pt.grad + 0.3 - 0.3) - kv_pt.grad).abs().max().item()
214
+ assert torch.allclose(kv.grad, kv_pt.grad, rtol=rtol, atol=2 * atol)
215
+
216
+
217
+ @pytest.mark.parametrize(
218
+ "dtype", ([torch.float16] if not is_sm8x else [torch.float16, torch.bfloat16])
219
+ )
220
+ # @pytest.mark.parametrize("dtype", ([torch.float16]))
221
+ @pytest.mark.parametrize("seqlen_offsets_type", [0, int, torch.Tensor])
222
+ # @pytest.mark.parametrize("seqlen_offsets_type", [0])
223
+ @pytest.mark.parametrize("rotary_fraction", [1.0, 0.5])
224
+ # @pytest.mark.parametrize("rotary_fraction", [1.0])
225
+ @pytest.mark.parametrize("interleaved", [False, True])
226
+ # @pytest.mark.parametrize("interleaved", [True])
227
+ @pytest.mark.parametrize("inplace", [False, True])
228
+ # @pytest.mark.parametrize("inplace", [False])
229
+ def test_rotary_emb_varlen_func(inplace, interleaved, rotary_fraction, seqlen_offsets_type, dtype):
230
+ rtol = 1e-3
231
+ batch_size = 32
232
+ nheads = 4
233
+ seqlen = 217
234
+ headdim = 128
235
+ device = "cuda"
236
+ rotary_dim = int(rotary_fraction * headdim)
237
+ torch.manual_seed(42)
238
+ x = torch.randn(batch_size, seqlen, nheads, headdim, dtype=dtype, device=device)
239
+ x_pt = x.detach().clone().requires_grad_()
240
+ lengths = torch.randint(max(1, seqlen - 20), seqlen + 1, (batch_size, 1), device=device)
241
+ padding_mask = rearrange(torch.arange(seqlen, device=device), "s -> 1 s") < lengths
242
+ x_unpad, indices, cu_seqlens, max_seqlen, _ = unpad_input(x, padding_mask)
243
+ x_unpad_clone = x_unpad.clone()
244
+ x_unpad = x_unpad.requires_grad_()
245
+ cos, sin = generate_cos_sin(seqlen, rotary_dim, device, dtype)
246
+ seqlen_offsets = generate_seqlen_offsets(seqlen_offsets_type, batch_size, seqlen, device)
247
+ out_unpad = apply_rotary_emb(
248
+ x_unpad,
249
+ cos,
250
+ sin,
251
+ seqlen_offsets=seqlen_offsets,
252
+ interleaved=interleaved,
253
+ inplace=inplace,
254
+ cu_seqlens=cu_seqlens,
255
+ max_seqlen=max_seqlen,
256
+ )
257
+ out = pad_input(out_unpad, indices, batch_size, seqlen)
258
+ cos_pt, sin_pt = index_cos_sin(cos, sin, seqlen_offsets, seqlen)
259
+ out_pt = apply_rotary_emb_torch(
260
+ x_pt.float(), cos_pt.float(), sin_pt.float(), interleaved=interleaved
261
+ ).to(dtype=dtype)
262
+ out_pt = out_pt.masked_fill(rearrange(~padding_mask, "b s -> b s 1 1"), 0.0)
263
+ print(f"Output max diff: {(out - out_pt).abs().max().item()}")
264
+
265
+ g = torch.randn_like(out)
266
+ g_pt = g.clone() # If inplace=True, we might modify the gradient inplace
267
+ out.backward(g)
268
+ out_pt.backward(g_pt)
269
+ x_grad = pad_input(x_unpad.grad, indices, batch_size, seqlen)
270
+ print(f"Grad max diff: {(x_grad - x_pt.grad).abs().max().item()}")
271
+
272
+ if not inplace:
273
+ assert torch.equal(x_unpad, x_unpad_clone)
274
+ # Numerical error if we just do any arithmetic
275
+ atol = ((out_pt + 0.3 - 0.3) - out_pt).abs().max().item()
276
+ assert torch.allclose(out, out_pt, rtol=rtol, atol=2 * atol)
277
+ atol = ((x_pt.grad + 0.3 - 0.3) - x_pt.grad).abs().max().item()
278
+ assert torch.allclose(x_grad, x_pt.grad, rtol=rtol, atol=2 * atol)
279
+
280
+
281
+ def test_compilation_count():
282
+ nheads = 4
283
+ headdim = 128
284
+ device = "cuda"
285
+ dtype = torch.float16
286
+ torch.manual_seed(42)
287
+
288
+ from triton.runtime.jit import JITFunction
289
+ from flash_attn.ops.triton.rotary import rotary_kernel
290
+ compilation_count = 0
291
+
292
+ def count_compilations(*args, **kwargs):
293
+ nonlocal compilation_count
294
+ compilation_count += 1
295
+
296
+ old_cache_func = JITFunction.cache_hook
297
+
298
+ try:
299
+ if hasattr(rotary_kernel, "cache"):
300
+ rotary_kernel.cache.clear()
301
+ else: # Triton 3.3 replaces cache with per-device device_caches
302
+ device = triton.runtime.driver.active.get_current_device()
303
+ # device_caches[device] returns a 4-tuple: (kernel_cache, target, backend, binder)
304
+ rotary_kernel.device_caches[device][0].clear()
305
+
306
+ JITFunction.cache_hook = count_compilations
307
+
308
+ for seqlen in (128, 256):
309
+ for batch_size in (4, 32):
310
+ x = torch.randn(batch_size, seqlen, nheads, headdim, dtype=dtype, device=device)
311
+ x.requires_grad_()
312
+ cos, sin = generate_cos_sin(seqlen, headdim, device, dtype)
313
+ out = apply_rotary_emb(x, cos, sin)
314
+ out.backward(torch.randn_like(out))
315
+
316
+ # Only two kernels are expected to be compiled:
317
+ # * for the forward pass (conjugate=False)
318
+ # * for the backward pass (conjugate=True)
319
+ assert compilation_count == 2
320
+ finally:
321
+ JITFunction.cache_hook = old_cache_func
Code/Baselines/flash-attention/tests/test_util.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import torch
4
+ from einops import rearrange, repeat
5
+ from flash_attn.bert_padding import pad_input, unpad_input
6
+
7
+
8
+ def generate_random_padding_mask(max_seqlen, batch_size, device, mode="random", zero_lengths=False):
9
+ assert mode in ["full", "random", "third"]
10
+ if mode == "full":
11
+ lengths = torch.full((batch_size, 1), max_seqlen, device=device, dtype=torch.int32)
12
+ elif mode == "random":
13
+ lengths = torch.randint(
14
+ max(0 if zero_lengths else 1, max_seqlen - 20), max_seqlen + 1, (batch_size, 1), device=device
15
+ )
16
+ elif mode == "third":
17
+ lengths = torch.randint(max_seqlen // 3, max_seqlen + 1, (batch_size, 1), device=device)
18
+
19
+ if zero_lengths:
20
+ # Generate zero-lengths every 5 batches and the last batch.
21
+ for i in range(batch_size):
22
+ if i % 5 == 0:
23
+ lengths[i] = 0
24
+ lengths[-1] = 0
25
+ padding_mask = (
26
+ repeat(torch.arange(max_seqlen, device=device), "s -> b s", b=batch_size) < lengths
27
+ )
28
+ return padding_mask
29
+
30
+
31
+ def generate_qkv(
32
+ q, k, v, query_padding_mask=None, key_padding_mask=None,
33
+ kvpacked=False, qkvpacked=False, add_unused_qkv=False,
34
+ query_unused_mask=None, key_unused_mask=None,
35
+ ):
36
+ """
37
+ Arguments:
38
+ q: (batch_size, seqlen_q, nheads, d)
39
+ k: (batch_size, seqlen_k, nheads_k, d)
40
+ v: (batch_size, seqlen_k, nheads_k, d)
41
+ query_padding_mask: (batch_size, seqlen), bool
42
+ key_padding_mask: (batch_size, seqlen), bool
43
+ """
44
+ assert not (kvpacked and qkvpacked)
45
+ batch_size, seqlen_q, nheads, d = q.shape
46
+ _, seqlen_k, nheads_k, _ = k.shape
47
+ assert k.shape == (batch_size, seqlen_k, nheads_k, d)
48
+ assert v.shape == (batch_size, seqlen_k, nheads_k, d)
49
+ if query_unused_mask is not None or key_unused_mask is not None:
50
+ assert not kvpacked
51
+ assert not qkvpacked
52
+
53
+ if query_padding_mask is not None:
54
+ q_unpad, indices_q, cu_seqlens_q, max_seqlen_q, seqused_q = unpad_input(
55
+ q, query_padding_mask, query_unused_mask,
56
+ )
57
+ output_pad_fn = lambda output_unpad: pad_input(
58
+ output_unpad, indices_q, batch_size, seqlen_q
59
+ )
60
+ else:
61
+ q_unpad = rearrange(q, "b s h d -> (b s) h d")
62
+ cu_seqlens_q = torch.arange(
63
+ 0, (batch_size + 1) * seqlen_q, step=seqlen_q, dtype=torch.int32, device=q_unpad.device
64
+ )
65
+ seqused_q = None
66
+ max_seqlen_q = seqlen_q
67
+ output_pad_fn = lambda output_unpad: rearrange(
68
+ output_unpad, "(b s) h d -> b s h d", b=batch_size
69
+ )
70
+
71
+ if key_padding_mask is not None:
72
+ k_unpad, indices_k, cu_seqlens_k, max_seqlen_k, seqused_k = unpad_input(k, key_padding_mask, key_unused_mask)
73
+ v_unpad, _, _, _, _ = unpad_input(v, key_padding_mask, key_unused_mask)
74
+ else:
75
+ k_unpad = rearrange(k, "b s h d -> (b s) h d")
76
+ v_unpad = rearrange(v, "b s h d -> (b s) h d")
77
+ cu_seqlens_k = torch.arange(
78
+ 0, (batch_size + 1) * seqlen_k, step=seqlen_k, dtype=torch.int32, device=k_unpad.device
79
+ )
80
+ seqused_k = None
81
+ max_seqlen_k = seqlen_k
82
+
83
+ if qkvpacked:
84
+ assert (query_padding_mask == key_padding_mask).all()
85
+ assert nheads == nheads_k
86
+ qkv_unpad = torch.stack([q_unpad, k_unpad, v_unpad], dim=1)
87
+ qkv = torch.stack([q, k, v], dim=2)
88
+ if query_padding_mask is not None:
89
+ dqkv_pad_fn = lambda dqkv_unpad: pad_input(dqkv_unpad, indices_q, batch_size, seqlen_q)
90
+ else:
91
+ dqkv_pad_fn = lambda dqkv_unpad: rearrange(
92
+ dqkv_unpad, "(b s) t h d -> b s t h d", b=batch_size
93
+ )
94
+ return (
95
+ qkv_unpad.detach().requires_grad_(),
96
+ cu_seqlens_q,
97
+ max_seqlen_q,
98
+ qkv.detach().requires_grad_(),
99
+ output_pad_fn,
100
+ dqkv_pad_fn,
101
+ )
102
+ elif kvpacked:
103
+ kv_unpad = torch.stack([k_unpad, v_unpad], dim=1)
104
+ kv = torch.stack([k, v], dim=2)
105
+ dq_pad_fn = output_pad_fn
106
+ if key_padding_mask is not None:
107
+ dkv_pad_fn = lambda dkv_unpad: pad_input(dkv_unpad, indices_k, batch_size, seqlen_k)
108
+ else:
109
+ dkv_pad_fn = lambda dkv_unpad: rearrange(
110
+ dkv_unpad, "(b s) t h d -> b s t h d", b=batch_size
111
+ )
112
+ return (
113
+ q_unpad.detach().requires_grad_(),
114
+ kv_unpad.detach().requires_grad_(),
115
+ cu_seqlens_q,
116
+ cu_seqlens_k,
117
+ max_seqlen_q,
118
+ max_seqlen_k,
119
+ q.detach().requires_grad_(),
120
+ kv.detach().requires_grad_(),
121
+ output_pad_fn,
122
+ dq_pad_fn,
123
+ dkv_pad_fn,
124
+ )
125
+ else:
126
+ dq_pad_fn = output_pad_fn
127
+ if key_padding_mask is not None:
128
+ dk_pad_fn = lambda dk_unpad: pad_input(dk_unpad, indices_k, batch_size, seqlen_k)
129
+ else:
130
+ dk_pad_fn = lambda dk_unpad: rearrange(dk_unpad, "(b s) h d -> b s h d", b=batch_size)
131
+ return (
132
+ q_unpad.detach().requires_grad_(),
133
+ k_unpad.detach().requires_grad_(),
134
+ v_unpad.detach().requires_grad_(),
135
+ cu_seqlens_q,
136
+ cu_seqlens_k,
137
+ seqused_q,
138
+ seqused_k,
139
+ max_seqlen_q,
140
+ max_seqlen_k,
141
+ q.detach().requires_grad_(),
142
+ k.detach().requires_grad_(),
143
+ v.detach().requires_grad_(),
144
+ output_pad_fn,
145
+ dq_pad_fn,
146
+ dk_pad_fn,
147
+ )
148
+
149
+
150
+ def construct_local_mask(
151
+ seqlen_q,
152
+ seqlen_k,
153
+ window_size=(-1, -1), # -1 means infinite window size
154
+ query_padding_mask=None,
155
+ key_padding_mask=None,
156
+ device=None,
157
+ key_leftpad=None,
158
+ ):
159
+ row_idx = rearrange(torch.arange(seqlen_q, device=device, dtype=torch.long), "s -> s 1")
160
+ col_idx = torch.arange(seqlen_k, device=device, dtype=torch.long)
161
+ if key_leftpad is not None:
162
+ key_leftpad = rearrange(key_leftpad, "b -> b 1 1 1")
163
+ col_idx = repeat(col_idx, "s -> b 1 1 s", b=key_leftpad.shape[0])
164
+ col_idx = torch.where(col_idx >= key_leftpad, col_idx - key_leftpad, 2**32)
165
+ sk = (
166
+ seqlen_k
167
+ if key_padding_mask is None
168
+ else rearrange(key_padding_mask.sum(-1), "b -> b 1 1 1")
169
+ )
170
+ sq = (
171
+ seqlen_q
172
+ if query_padding_mask is None
173
+ else rearrange(query_padding_mask.sum(-1), "b -> b 1 1 1")
174
+ )
175
+ if window_size[0] < 0:
176
+ return col_idx > row_idx + sk - sq + window_size[1]
177
+ else:
178
+ sk = torch.full_like(col_idx, seqlen_k) if key_padding_mask is None else sk
179
+ return torch.logical_or(
180
+ col_idx > torch.minimum(row_idx + sk - sq + window_size[1], sk),
181
+ col_idx < row_idx + sk - sq - window_size[0],
182
+ )
183
+
184
+
185
+ def attention_ref(
186
+ q,
187
+ k,
188
+ v,
189
+ query_padding_mask=None,
190
+ key_padding_mask=None,
191
+ attn_bias=None,
192
+ dropout_p=0.0,
193
+ dropout_mask=None,
194
+ causal=False,
195
+ window_size=(-1, -1), # -1 means infinite window size
196
+ softcap=0.0,
197
+ upcast=True,
198
+ reorder_ops=False,
199
+ key_leftpad=None,
200
+ ):
201
+ """
202
+ Arguments:
203
+ q: (batch_size, seqlen_q, nheads, head_dim)
204
+ k: (batch_size, seqlen_k, nheads_k, head_dim)
205
+ v: (batch_size, seqlen_k, nheads_k, head_dim)
206
+ query_padding_mask: (batch_size, seqlen_q)
207
+ key_padding_mask: (batch_size, seqlen_k)
208
+ attn_bias: broadcastable to (batch_size, nheads, seqlen_q, seqlen_k)
209
+ dropout_p: float
210
+ dropout_mask: (batch_size, nheads, seqlen_q, seqlen_k)
211
+ causal: whether to apply causal masking
212
+ window_size: (int, int), left and right window size
213
+ upcast: whether to cast all inputs to fp32, do all computation in fp32, then cast
214
+ output back to fp16/bf16.
215
+ reorder_ops: whether to change the order of operations (scaling k instead of scaling q, etc.)
216
+ without changing the math. This is to estimate the numerical error from operation
217
+ reordering.
218
+ Output:
219
+ output: (batch_size, seqlen_q, nheads, head_dim)
220
+ attention: (batch_size, nheads, seqlen_q, seqlen_k), softmax after dropout
221
+ """
222
+ if causal:
223
+ window_size = (window_size[0], 0)
224
+ dtype_og = q.dtype
225
+ if upcast:
226
+ q, k, v = q.float(), k.float(), v.float()
227
+ seqlen_q, seqlen_k = q.shape[1], k.shape[1]
228
+ k = repeat(k, "b s h d -> b s (h g) d", g=q.shape[2] // k.shape[2])
229
+ v = repeat(v, "b s h d -> b s (h g) d", g=q.shape[2] // v.shape[2])
230
+ d = q.shape[-1]
231
+ if not reorder_ops:
232
+ scores = torch.einsum("bthd,bshd->bhts", q / math.sqrt(d), k)
233
+ else:
234
+ scores = torch.einsum("bthd,bshd->bhts", q, k / math.sqrt(d))
235
+ if softcap > 0:
236
+ scores /= softcap
237
+ scores = scores.tanh()
238
+ scores *= softcap
239
+ if key_padding_mask is not None:
240
+ scores.masked_fill_(rearrange(~key_padding_mask, "b s -> b 1 1 s"), float("-inf"))
241
+ if window_size[0] >= 0 or window_size[1] >= 0:
242
+ local_mask = construct_local_mask(
243
+ seqlen_q,
244
+ seqlen_k,
245
+ window_size,
246
+ query_padding_mask,
247
+ key_padding_mask,
248
+ q.device,
249
+ key_leftpad=key_leftpad,
250
+ )
251
+ scores.masked_fill_(local_mask, float("-inf"))
252
+ if attn_bias is not None:
253
+ scores = scores + attn_bias
254
+ attention = torch.softmax(scores, dim=-1).to(v.dtype)
255
+ # Some rows might be completely masked out so we fill them with zero instead of NaN
256
+ if window_size[0] >= 0 or window_size[1] >= 0:
257
+ attention = attention.masked_fill(torch.all(local_mask, dim=-1, keepdim=True), 0.0)
258
+ # We want to mask here so that the attention matrix doesn't have any NaNs
259
+ # Otherwise we'll get NaN in dV
260
+ if query_padding_mask is not None:
261
+ attention = attention.masked_fill(rearrange(~query_padding_mask, "b s -> b 1 s 1"), 0.0)
262
+ dropout_scaling = 1.0 / (1 - dropout_p)
263
+ # attention_drop = attention.masked_fill(~dropout_mask, 0.0) * dropout_scaling
264
+ # output = torch.einsum('bhts,bshd->bthd', attention_drop , v)
265
+ if dropout_mask is not None:
266
+ attention_drop = attention.masked_fill(~dropout_mask, 0.0)
267
+ else:
268
+ attention_drop = attention
269
+ output = torch.einsum("bhts,bshd->bthd", attention_drop, v * dropout_scaling)
270
+ if query_padding_mask is not None:
271
+ output.masked_fill_(rearrange(~query_padding_mask, "b s -> b s 1 1"), 0.0)
272
+ if key_padding_mask is not None:
273
+ output.masked_fill_(rearrange(torch.logical_not(torch.any(key_padding_mask, 1)), "b -> b 1 1 1"), 0.0)
274
+ return output.to(dtype=dtype_og), attention.to(dtype=dtype_og)