ADAPT-Chase commited on
Commit
f111f61
·
verified ·
1 Parent(s): 16da1d3

Add files using upload-large-folder tool

Browse files
Files changed (20) hide show
  1. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/__pycache__/__init__.cpython-312.pyc +0 -0
  2. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__init__.py +0 -0
  3. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/cpu_attn.py +920 -0
  4. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/flash_attn.py +802 -0
  5. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/flashinfer.py +1003 -0
  6. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/flex_attention.py +526 -0
  7. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/linear_attn.py +67 -0
  8. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/mamba1_attn.py +83 -0
  9. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/mamba2_attn.py +205 -0
  10. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/mamba_selectors.py +18 -0
  11. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/mla/__init__.py +0 -0
  12. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/mla/common.py +1206 -0
  13. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/mla/cutlass_mla.py +289 -0
  14. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/mla/flashmla.py +199 -0
  15. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/pallas.py +406 -0
  16. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/rocm_aiter_fa.py +546 -0
  17. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/tree_attn.py +447 -0
  18. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/triton_attn.py +417 -0
  19. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/utils.py +715 -0
  20. tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/xformers.py +430 -0
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (187 Bytes). View file
 
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__init__.py ADDED
File without changes
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/cpu_attn.py ADDED
@@ -0,0 +1,920 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ from dataclasses import dataclass
4
+ from typing import Optional
5
+
6
+ import numpy as np
7
+ import torch
8
+ from torch.nn.functional import scaled_dot_product_attention
9
+
10
+ from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl,
11
+ AttentionLayer,
12
+ AttentionMetadata, AttentionType,
13
+ is_quantized_kv_cache)
14
+ from vllm.attention.backends.utils import CommonAttentionState
15
+ from vllm.config import VllmConfig
16
+ from vllm.logger import init_logger
17
+ from vllm.v1.attention.backends.utils import (AttentionMetadataBuilder,
18
+ CommonAttentionMetadata)
19
+ from vllm.v1.core.sched.output import SchedulerOutput
20
+ from vllm.v1.kv_cache_interface import AttentionSpec
21
+ from vllm.v1.worker.gpu_input_batch import InputBatch
22
+
23
+ try:
24
+ import intel_extension_for_pytorch.llm.modules as ipex_modules
25
+ _use_ipex = True
26
+ # AttributeError is to handle a bug in ipex
27
+ # https://github.com/intel/intel-extension-for-pytorch/pull/813
28
+ except (ImportError, AttributeError):
29
+ _use_ipex = False
30
+
31
+ from vllm import _custom_ops as ops
32
+
33
+ logger = init_logger(__name__)
34
+
35
+
36
+ class TorchSDPABackend(AttentionBackend):
37
+ accept_output_buffer: bool = False
38
+
39
+ @classmethod
40
+ def get_supported_dtypes(cls) -> list[torch.dtype]:
41
+ return [torch.float16, torch.bfloat16, torch.float32]
42
+
43
+ @classmethod
44
+ def validate_head_size(cls, head_size: int) -> None:
45
+ attn_impl = _get_paged_attn_impl()
46
+ is_valid, supported_head_sizes = attn_impl.validate_head_size(
47
+ head_size)
48
+ if not is_valid:
49
+ attn_type = cls.__name__.removesuffix("Backend")
50
+ raise ValueError(
51
+ f"Head size {head_size} is not supported by {attn_type}. "
52
+ f"Supported head sizes are: {supported_head_sizes}. "
53
+ "Set VLLM_ATTENTION_BACKEND=FLEX_ATTENTION to use "
54
+ "FlexAttention backend which supports all head sizes.")
55
+
56
+ @staticmethod
57
+ def get_name() -> str:
58
+ return "TORCH_SDPA_VLLM_V1"
59
+
60
+ @staticmethod
61
+ def get_impl_cls() -> type["TorchSDPABackendImpl"]:
62
+ return TorchSDPABackendImpl
63
+
64
+ @staticmethod
65
+ def get_metadata_cls() -> type["AttentionMetadata"]:
66
+ return TorchSDPAMetadata
67
+
68
+ @staticmethod
69
+ def get_state_cls() -> type["CommonAttentionState"]:
70
+ return CommonAttentionState
71
+
72
+ @staticmethod
73
+ def get_builder_cls() -> type["TorchSDPAMetadataBuilderV1"]:
74
+ return TorchSDPAMetadataBuilderV1
75
+
76
+ @staticmethod
77
+ def get_kv_cache_shape(
78
+ num_blocks: int,
79
+ block_size: int,
80
+ num_kv_heads: int,
81
+ head_size: int,
82
+ ) -> tuple[int, ...]:
83
+ return _get_paged_attn_impl().get_kv_cache_shape(
84
+ num_blocks, block_size, num_kv_heads, head_size)
85
+
86
+ @staticmethod
87
+ def use_cascade_attention(*args, **kwargs) -> bool:
88
+ return False
89
+
90
+
91
+ @dataclass
92
+ class TorchSDPAMetadata(AttentionMetadata):
93
+ """Metadata for PagedAttention."""
94
+ # (batch_size,). The length of sequences (entire tokens seen so far) per
95
+ # sequence.
96
+ seq_lens_tensor: Optional[torch.Tensor]
97
+ # Maximum sequence length in the batch. 0 if it is prefill-only batch.
98
+ max_decode_seq_len: int
99
+ # (batch_size, max_blocks_per_seq).
100
+ # Block addresses per sequence. (Seq id -> list of physical block)
101
+ # E.g., [0, 1, 2] means tokens are stored in 0th, 1st, and 2nd blocks
102
+ # in the kv cache. Each block can contain up to block_size tokens.
103
+ # 2nd dimensions are padded up to max_blocks_per_seq if it is cuda-graph
104
+ # captured.
105
+ block_tables: Optional[torch.Tensor]
106
+ """Metadata for TorchSDPABackend.
107
+ """
108
+ # Currently, input sequences can only contain all prompts
109
+ # or all decoding. True if all sequences are prompts.
110
+ chunked_prefill: bool
111
+ seq_lens: Optional[list[int]] = None # For non-chunked prefill
112
+
113
+ # For chunked prefill only
114
+ max_query_len: Optional[int] = None
115
+ max_kv_len: Optional[int] = None
116
+ prefill_query_start_loc: Optional[torch.Tensor] = None
117
+ kv_start_loc: Optional[torch.Tensor] = None
118
+ prefill_block_tables: Optional[torch.Tensor] = None
119
+
120
+ # For V1 logits index only
121
+ query_start_loc: Optional[torch.Tensor] = None
122
+
123
+ # Begin encoder attn & enc/dec cross-attn fields...
124
+ # Encoder sequence lengths representation
125
+ encoder_seq_lens: Optional[list[int]] = None
126
+ encoder_seq_lens_tensor: Optional[torch.Tensor] = None
127
+
128
+ # Maximum sequence length among encoder sequences
129
+ max_encoder_seq_len: Optional[int] = None
130
+
131
+ # Number of tokens input to encoder
132
+ num_encoder_tokens: Optional[int] = None
133
+
134
+ # Cross-attention memory-mapping data structures: slot mapping
135
+ # and block tables
136
+ cross_slot_mapping: Optional[torch.Tensor] = None
137
+ cross_block_tables: Optional[torch.Tensor] = None
138
+
139
+ def __post_init__(self):
140
+ # Set during the execution of the first attention op.
141
+ # It is a list because it is needed to set per prompt
142
+ # when alibi slopes is used. It is because of the limitation
143
+ # from xformer API.
144
+ # will not appear in the __repr__ and __init__
145
+ self.attn_bias: Optional[list[torch.Tensor]] = None
146
+ self.encoder_attn_bias: Optional[list[torch.Tensor]] = None
147
+ self.cross_attn_bias: Optional[list[torch.Tensor]] = None
148
+
149
+ @property
150
+ def is_all_encoder_attn_metadata_set(self):
151
+ '''
152
+ All attention metadata required for encoder attention is set.
153
+ '''
154
+ return ((self.encoder_seq_lens is not None)
155
+ and (self.encoder_seq_lens_tensor is not None)
156
+ and (self.max_encoder_seq_len is not None))
157
+
158
+ @property
159
+ def is_all_cross_attn_metadata_set(self):
160
+ '''
161
+ All attention metadata required for enc/dec cross-attention is set.
162
+
163
+ Superset of encoder attention required metadata.
164
+ '''
165
+ return (self.is_all_encoder_attn_metadata_set
166
+ and (self.cross_slot_mapping is not None)
167
+ and (self.cross_block_tables is not None))
168
+
169
+ @property
170
+ def prefill_metadata(self) -> Optional["TorchSDPAMetadata"]:
171
+ if self.num_prefill_tokens == 0:
172
+ return None
173
+ return self
174
+
175
+ @property
176
+ def decode_metadata(self) -> Optional["TorchSDPAMetadata"]:
177
+ if self.num_decode_tokens == 0:
178
+ return None
179
+ return self
180
+
181
+ def get_seq_lens(
182
+ self,
183
+ attn_type: str,
184
+ ):
185
+ '''
186
+ Extract appropriate sequence lengths from attention metadata
187
+ according to attention type.
188
+
189
+ Arguments:
190
+
191
+ * attn_metadata: Attention metadata structure associated with attention
192
+ * attn_type: encoder attention, decoder self-attention,
193
+ encoder/decoder cross-attention
194
+
195
+ Returns:
196
+ * Appropriate sequence lengths tensor for query
197
+ * Appropriate sequence lengths tensor for key & value
198
+ '''
199
+
200
+ if (attn_type == AttentionType.DECODER
201
+ or attn_type == AttentionType.ENCODER_ONLY):
202
+ seq_lens_q = self.seq_lens
203
+ seq_lens_kv = self.seq_lens
204
+ elif attn_type == AttentionType.ENCODER:
205
+ seq_lens_q = self.encoder_seq_lens
206
+ seq_lens_kv = self.encoder_seq_lens
207
+ elif attn_type == AttentionType.ENCODER_DECODER:
208
+ seq_lens_q = self.seq_lens
209
+ seq_lens_kv = self.encoder_seq_lens
210
+ else:
211
+ raise AttributeError(f"Invalid attention type {str(attn_type)}")
212
+ return seq_lens_q, seq_lens_kv
213
+
214
+ def get_attn_bias(
215
+ self,
216
+ attn_type: str,
217
+ ) -> Optional[list[torch.Tensor]]:
218
+ '''
219
+ Extract appropriate attention bias from attention metadata
220
+ according to attention type.
221
+
222
+ Arguments:
223
+
224
+ * attn_metadata: Attention metadata structure associated with attention
225
+ * attn_type: encoder attention, decoder self-attention,
226
+ encoder/decoder cross-attention
227
+
228
+ Returns:
229
+ * Appropriate attention bias value given the attention type
230
+ '''
231
+
232
+ if (attn_type == AttentionType.DECODER
233
+ or attn_type == AttentionType.ENCODER_ONLY):
234
+ return self.attn_bias
235
+ elif attn_type == AttentionType.ENCODER:
236
+ return self.encoder_attn_bias
237
+ elif attn_type == AttentionType.ENCODER_DECODER:
238
+ return self.cross_attn_bias
239
+ else:
240
+ raise AttributeError(f"Invalid attention type {str(attn_type)}")
241
+
242
+ def set_attn_bias(
243
+ self,
244
+ attn_bias: list[torch.Tensor],
245
+ attn_type: str,
246
+ ) -> None:
247
+ '''
248
+ Update appropriate attention bias field of attention metadata,
249
+ according to attention type.
250
+
251
+ Arguments:
252
+
253
+ * attn_metadata: Attention metadata structure associated with attention
254
+ * attn_bias: The desired attention bias value
255
+ * attn_type: encoder attention, decoder self-attention,
256
+ encoder/decoder cross-attention
257
+ '''
258
+
259
+ if (attn_type == AttentionType.DECODER
260
+ or attn_type == AttentionType.ENCODER_ONLY):
261
+ self.attn_bias = attn_bias
262
+ elif attn_type == AttentionType.ENCODER:
263
+ self.encoder_attn_bias = attn_bias
264
+ elif attn_type == AttentionType.ENCODER_DECODER:
265
+ self.cross_attn_bias = attn_bias
266
+ else:
267
+ raise AttributeError(f"Invalid attention type {str(attn_type)}")
268
+
269
+ def get_seq_len_block_table_args(
270
+ self,
271
+ attn_type: str,
272
+ ) -> tuple:
273
+ '''
274
+ The particular choice of sequence-length- and block-table-related
275
+ attributes which should be extracted from attn_metadata is dependent
276
+ on the type of attention operation.
277
+
278
+ Decoder attn -> select entirely decoder self-attention-related fields
279
+ Encoder/decoder cross-attn -> select encoder sequence lengths &
280
+ cross-attn block-tables fields
281
+ Encoder attn -> select encoder sequence lengths fields & no block tables
282
+
283
+ Arguments:
284
+
285
+ * attn_metadata: Attention metadata structure associated with attention
286
+ * is_prompt: True if prefill, False otherwise
287
+ * attn_type: encoder attention, decoder self-attention,
288
+ encoder/decoder cross-attention
289
+
290
+ Returns:
291
+
292
+ * Appropriate sequence-lengths tensor
293
+ * Appropriate max sequence-length scalar
294
+ * Appropriate block tables (or None)
295
+ '''
296
+
297
+ if (attn_type == AttentionType.DECODER
298
+ or attn_type == AttentionType.ENCODER_ONLY):
299
+ # Decoder self-attention
300
+ # Choose max_seq_len based on whether we are in prompt_run
301
+ return (self.seq_lens_tensor, self.max_decode_seq_len,
302
+ self.block_tables)
303
+ elif attn_type == AttentionType.ENCODER_DECODER:
304
+ # Enc/dec cross-attention KVs match encoder sequence length;
305
+ # cross-attention utilizes special "cross" block tables
306
+ return (self.encoder_seq_lens_tensor, self.max_encoder_seq_len,
307
+ self.cross_block_tables)
308
+ elif attn_type == AttentionType.ENCODER:
309
+ # No block tables associated with encoder attention
310
+ return (self.encoder_seq_lens_tensor, self.max_encoder_seq_len,
311
+ None)
312
+ else:
313
+ raise AttributeError(f"Invalid attention type {str(attn_type)}")
314
+
315
+
316
+ class TorchSDPAMetadataBuilderV1(AttentionMetadataBuilder[TorchSDPAMetadata]):
317
+
318
+ def __init__(self, kv_cache_spec: AttentionSpec, layer_names: list[str],
319
+ vllm_config: VllmConfig, device: torch.device) -> None:
320
+ self.kv_cache_spec = kv_cache_spec
321
+ self.vllm_config = vllm_config
322
+ self.scheduler_config = vllm_config.scheduler_config
323
+
324
+ # For reorder
325
+ self.reorder_prompt_req_index_list = np.empty(
326
+ vllm_config.scheduler_config.max_num_seqs, dtype=np.int64)
327
+ self.reorder_decode_req_index_list = np.empty(
328
+ vllm_config.scheduler_config.max_num_seqs, dtype=np.int64)
329
+ self.num_prompt_req: int = 0
330
+
331
+ self.seq_start_loc_cpu = torch.zeros(
332
+ vllm_config.scheduler_config.max_num_seqs + 1,
333
+ dtype=torch.int32,
334
+ device="cpu",
335
+ )
336
+ self.seq_start_loc_np = self.seq_start_loc_cpu.numpy()
337
+
338
+ def reorder_batch(self, input_batch: InputBatch,
339
+ scheduler_output: SchedulerOutput) -> bool:
340
+ prompt_list_idx = 0
341
+ decode_list_idx = 0
342
+ for req_index in range(input_batch.num_reqs):
343
+ if input_batch.num_computed_tokens_cpu[
344
+ req_index] < input_batch.num_prompt_tokens[req_index]:
345
+ # prompt stage
346
+ self.reorder_prompt_req_index_list[prompt_list_idx] = req_index
347
+ prompt_list_idx += 1
348
+ else:
349
+ # decode stage
350
+ self.reorder_decode_req_index_list[decode_list_idx] = req_index
351
+ decode_list_idx += 1
352
+ assert decode_list_idx + prompt_list_idx == input_batch.num_reqs
353
+
354
+ # Update prompt requests number
355
+ self.num_prompt_req = prompt_list_idx
356
+
357
+ reorder_req_num = 0
358
+ for req_index in range(decode_list_idx):
359
+ if self.reorder_decode_req_index_list[req_index] < prompt_list_idx:
360
+ reorder_req_num += 1
361
+ else:
362
+ break
363
+
364
+ if reorder_req_num == 0:
365
+ return False
366
+
367
+ reorder_prompt_list = (
368
+ self.reorder_prompt_req_index_list[:prompt_list_idx]
369
+ [-reorder_req_num:])
370
+ reorder_decode_list = (
371
+ self.reorder_decode_req_index_list[:decode_list_idx]
372
+ [:reorder_req_num])
373
+ assert reorder_decode_list.size == reorder_prompt_list.size
374
+
375
+ for idx in range(reorder_req_num):
376
+ prompt_req_index = reorder_prompt_list[idx].item()
377
+ decode_req_index = reorder_decode_list[idx].item()
378
+ input_batch.swap_states(prompt_req_index, decode_req_index)
379
+
380
+ return True
381
+
382
+ def build(self,
383
+ common_prefix_len: int,
384
+ common_attn_metadata: CommonAttentionMetadata,
385
+ fast_build: bool = False) -> TorchSDPAMetadata:
386
+ num_reqs = common_attn_metadata.num_reqs
387
+ max_query_len = common_attn_metadata.max_query_len
388
+
389
+ seq_lens_cpu = common_attn_metadata.seq_lens_cpu
390
+ seq_lens_np = seq_lens_cpu.numpy()
391
+ num_prompt_req = self.num_prompt_req
392
+ max_prefill_seq_len = seq_lens_np[:num_prompt_req].max().item(
393
+ ) if num_prompt_req > 0 else 0
394
+ max_decode_seq_len = seq_lens_np[num_prompt_req:num_reqs].max().item(
395
+ ) if num_prompt_req < num_reqs else 0
396
+ self.seq_start_loc_np[0] = 0
397
+ np.cumsum(seq_lens_np, out=self.seq_start_loc_np[1:num_reqs + 1])
398
+
399
+ query_start_loc_cpu = common_attn_metadata.query_start_loc_cpu
400
+ num_prefill_tokens = int(query_start_loc_cpu[num_prompt_req].item())
401
+ num_decode_tokens = int(query_start_loc_cpu[num_reqs].item() -
402
+ num_prefill_tokens)
403
+
404
+ slot_mapping = common_attn_metadata.slot_mapping.long()
405
+ block_table_tensor = common_attn_metadata.block_table_tensor
406
+
407
+ attn_metadata = TorchSDPAMetadata(
408
+ num_prefills=num_prompt_req,
409
+ num_prefill_tokens=num_prefill_tokens,
410
+ num_decode_tokens=num_decode_tokens,
411
+ slot_mapping=slot_mapping,
412
+ # to ensure inference when chunked_prefill is disabled
413
+ seq_lens=seq_lens_cpu.tolist(),
414
+ seq_lens_tensor=seq_lens_cpu[num_prompt_req:num_reqs], # decode
415
+ max_decode_seq_len=max_decode_seq_len, # decode
416
+ block_tables=block_table_tensor[num_prompt_req:num_reqs], # decode
417
+ chunked_prefill=self.scheduler_config.chunked_prefill_enabled,
418
+ max_query_len=max_query_len,
419
+ max_kv_len=max_prefill_seq_len,
420
+ prefill_query_start_loc=query_start_loc_cpu[:num_prompt_req +
421
+ 1], # prefill
422
+ kv_start_loc=self.seq_start_loc_cpu[:num_prompt_req +
423
+ 1], # prefill
424
+ prefill_block_tables=block_table_tensor[:
425
+ num_prompt_req], # prefill
426
+ query_start_loc=query_start_loc_cpu[:num_reqs +
427
+ 1], # for logits index
428
+ multi_modal_placeholder_index_maps=None,
429
+ enable_kv_scales_calculation=False,
430
+ )
431
+
432
+ return attn_metadata
433
+
434
+
435
+ class TorchSDPABackendImpl(AttentionImpl[TorchSDPAMetadata]):
436
+
437
+ def __init__(
438
+ self,
439
+ num_heads: int,
440
+ head_size: int,
441
+ scale: float,
442
+ num_kv_heads: int,
443
+ alibi_slopes: Optional[list[float]],
444
+ sliding_window: Optional[int],
445
+ kv_cache_dtype: str,
446
+ logits_soft_cap: Optional[float] = None,
447
+ attn_type: str = AttentionType.DECODER,
448
+ kv_sharing_target_layer_name: Optional[str] = None,
449
+ ) -> None:
450
+ if kv_sharing_target_layer_name is not None:
451
+ raise NotImplementedError("KV sharing is not supported in V0.")
452
+ if logits_soft_cap is not None:
453
+ logger.warning_once("Torch SPDA does not support logits soft cap. "
454
+ "Outputs may be slightly off.")
455
+ self.paged_attn_impl = _get_paged_attn_impl()
456
+ self.num_heads = num_heads
457
+ self.head_size = head_size
458
+ self.scale = float(scale)
459
+ self.num_kv_heads = num_kv_heads
460
+ if alibi_slopes is not None:
461
+ alibi_slopes = torch.tensor(alibi_slopes, dtype=torch.float32)
462
+ self.alibi_slopes = alibi_slopes
463
+ self.sliding_window = sliding_window
464
+ self.kv_cache_dtype = kv_cache_dtype
465
+
466
+ self.num_queries_per_kv = self.num_heads // self.num_kv_heads
467
+ self.need_mask = (self.alibi_slopes is not None
468
+ or self.sliding_window is not None)
469
+
470
+ if is_quantized_kv_cache(kv_cache_dtype) and not _use_ipex:
471
+ raise NotImplementedError(
472
+ "Torch SDPA backend FP8 KV cache requires "
473
+ "intel_extension_for_pytorch support.")
474
+ self.attn_type = attn_type
475
+
476
+ def forward(
477
+ self,
478
+ layer: AttentionLayer,
479
+ query: torch.Tensor,
480
+ key: torch.Tensor,
481
+ value: torch.Tensor,
482
+ kv_cache: torch.Tensor,
483
+ attn_metadata: TorchSDPAMetadata, # type: ignore
484
+ output: Optional[torch.Tensor] = None,
485
+ output_scale: Optional[torch.Tensor] = None,
486
+ ) -> torch.Tensor:
487
+ """Forward pass with torch SDPA and PagedAttention.
488
+
489
+ Args:
490
+ query: shape = [num_tokens, num_heads * head_size]
491
+ key: shape = [num_tokens, num_kv_heads * head_size]
492
+ value: shape = [num_tokens, num_kv_heads * head_size]
493
+ kv_cache = [2, num_blocks, block_size * num_kv_heads * head_size]
494
+ NOTE: kv_cache will be an empty tensor with shape [0]
495
+ for profiling run.
496
+ attn_metadata: Metadata for attention.
497
+ Returns:
498
+ shape = [num_tokens, num_heads * head_size]
499
+ """
500
+ if output_scale is not None:
501
+ raise NotImplementedError(
502
+ "fused output quantization is not yet supported"
503
+ " for TorchSDPABackendImpl")
504
+
505
+ # For warming-up
506
+ if attn_metadata is None:
507
+ return query
508
+
509
+ attn_type = self.attn_type
510
+ if (attn_type == AttentionType.ENCODER
511
+ and (not attn_metadata.is_all_encoder_attn_metadata_set)):
512
+ raise AttributeError("Encoder attention requires setting "
513
+ "encoder metadata attributes.")
514
+ elif (attn_type == AttentionType.ENCODER_DECODER
515
+ and (not attn_metadata.is_all_cross_attn_metadata_set)):
516
+ raise AttributeError("Encoder/decoder cross-attention "
517
+ "requires setting cross-attention "
518
+ "metadata attributes.")
519
+
520
+ # Reshape the query, key, and value tensors.
521
+ query = query.view(-1, self.num_heads, self.head_size)
522
+ if key is not None:
523
+ assert value is not None
524
+ key = key.view(-1, self.num_kv_heads, self.head_size)
525
+ value = value.view(-1, self.num_kv_heads, self.head_size)
526
+ else:
527
+ assert value is None
528
+
529
+ if (attn_type != AttentionType.ENCODER and kv_cache.numel() > 0):
530
+ # KV-cache during decoder-self- or
531
+ # encoder-decoder-cross-attention, but not
532
+ # during encoder attention.
533
+ #
534
+ # Even if there are no new key/value pairs to cache,
535
+ # we still need to break out key_cache and value_cache
536
+ # i.e. for later use by paged attention
537
+ key_cache, value_cache = self.paged_attn_impl.split_kv_cache(
538
+ kv_cache, self.num_kv_heads, self.head_size)
539
+
540
+ if (key is not None) and (value is not None):
541
+ if attn_type == AttentionType.ENCODER_DECODER:
542
+ # Update cross-attention KV cache (prefill-only)
543
+ # During cross-attention decode, key & value will be None,
544
+ # preventing this IF-statement branch from running
545
+ updated_slot_mapping = attn_metadata.cross_slot_mapping
546
+ else:
547
+ # Update self-attention KV cache (prefill/decode)
548
+ updated_slot_mapping = attn_metadata.slot_mapping
549
+
550
+ self.paged_attn_impl.write_to_paged_cache(
551
+ key, value, key_cache, value_cache, updated_slot_mapping,
552
+ self.kv_cache_dtype, layer._k_scale, layer._v_scale)
553
+
554
+ if attn_type != AttentionType.ENCODER:
555
+ # Decoder self-attention supports chunked prefill.
556
+ # Encoder/decoder cross-attention requires no chunked
557
+ # prefill (100% prefill or 100% decode tokens, no mix)
558
+ num_prefill_tokens = attn_metadata.num_prefill_tokens
559
+ num_decode_tokens = attn_metadata.num_decode_tokens
560
+ else:
561
+ # Encoder attention - chunked prefill is not applicable;
562
+ # derive token-count from query shape & and treat them
563
+ # as 100% prefill tokens
564
+ assert attn_metadata.num_encoder_tokens is not None
565
+ num_prefill_tokens = attn_metadata.num_encoder_tokens
566
+ num_decode_tokens = 0
567
+
568
+ if attn_type == AttentionType.DECODER:
569
+ # Only enforce this shape-constraint for decoder
570
+ # self-attention
571
+ assert key.shape[0] == num_prefill_tokens + num_decode_tokens
572
+ assert value.shape[0] == num_prefill_tokens + num_decode_tokens
573
+
574
+ output = torch.empty_like(query)
575
+ if prefill_meta := attn_metadata.prefill_metadata:
576
+ if not prefill_meta.prefill_metadata.chunked_prefill: # type: ignore
577
+ assert attn_metadata.seq_lens is not None
578
+ self._run_sdpa_forward(output,
579
+ query,
580
+ key,
581
+ value,
582
+ prefill_meta,
583
+ attn_type=attn_type)
584
+ else:
585
+ # prefix-enabled attention
586
+ assert not self.need_mask
587
+ import intel_extension_for_pytorch.llm.modules as ipex_modules
588
+ output = torch.empty_like(query)
589
+ ipex_modules.PagedAttention.flash_attn_varlen_func(
590
+ output[:prefill_meta.num_prefill_tokens, :, :],
591
+ query[:prefill_meta.num_prefill_tokens, :, :],
592
+ key_cache,
593
+ value_cache,
594
+ prefill_meta.prefill_query_start_loc,
595
+ prefill_meta.kv_start_loc,
596
+ prefill_meta.max_query_len,
597
+ prefill_meta.max_kv_len,
598
+ self.scale,
599
+ True,
600
+ prefill_meta.prefill_block_tables,
601
+ self.alibi_slopes,
602
+ )
603
+
604
+ if decode_meta := attn_metadata.decode_metadata:
605
+ assert attn_type != AttentionType.ENCODER_ONLY, (
606
+ "Encoder-only models should not have decode metadata.")
607
+ # Decoding run.
608
+ (
609
+ seq_lens_arg,
610
+ max_seq_len_arg,
611
+ block_tables_arg,
612
+ ) = decode_meta.get_seq_len_block_table_args(attn_type)
613
+
614
+ self.paged_attn_impl.forward_decode(
615
+ output[attn_metadata.num_prefill_tokens:, :, :],
616
+ query[attn_metadata.num_prefill_tokens:, :, :],
617
+ key_cache,
618
+ value_cache,
619
+ block_tables_arg,
620
+ seq_lens_arg,
621
+ max_seq_len_arg,
622
+ self.kv_cache_dtype,
623
+ self.num_kv_heads,
624
+ self.scale,
625
+ self.alibi_slopes,
626
+ layer._k_scale,
627
+ layer._v_scale,
628
+ )
629
+
630
+ # Reshape the output tensor.
631
+ return output.view(-1, self.num_heads * self.head_size)
632
+
633
+ def _run_sdpa_forward(
634
+ self,
635
+ output: torch.Tensor,
636
+ query: torch.Tensor,
637
+ key: torch.Tensor,
638
+ value: torch.Tensor,
639
+ attn_metadata: TorchSDPAMetadata,
640
+ attn_type: str = AttentionType.DECODER,
641
+ ) -> None:
642
+ if self.num_kv_heads != self.num_heads:
643
+ key = key.repeat_interleave(self.num_queries_per_kv, dim=1)
644
+ value = value.repeat_interleave(self.num_queries_per_kv, dim=1)
645
+
646
+ attn_masks = attn_metadata.get_attn_bias(attn_type)
647
+ if attn_masks is None:
648
+ if self.alibi_slopes is not None:
649
+ attn_masks = _make_alibi_bias(
650
+ self.alibi_slopes, query.dtype,
651
+ attn_metadata.seq_lens) # type: ignore
652
+ elif self.sliding_window is not None:
653
+ assert attn_metadata.seq_lens is not None
654
+ attn_masks = _make_sliding_window_bias(
655
+ attn_metadata.seq_lens, self.sliding_window,
656
+ query.dtype) # type: ignore
657
+ else:
658
+ seq_lens, _ = attn_metadata.get_seq_lens(attn_type)
659
+ attn_masks = [None] * len(seq_lens)
660
+ attn_metadata.set_attn_bias(attn_masks, attn_type)
661
+
662
+ query = query.movedim(0, query.dim() - 2)
663
+ key = key.movedim(0, key.dim() - 2)
664
+ value = value.movedim(0, value.dim() - 2)
665
+
666
+ causal_attn = (attn_type == AttentionType.DECODER)
667
+
668
+ seq_lens_q, seq_lens_kv = attn_metadata.get_seq_lens(attn_type)
669
+ start_q, start_kv = 0, 0
670
+ for seq_len_q, seq_len_kv, mask in zip(seq_lens_q, seq_lens_kv,
671
+ attn_masks):
672
+ end_q = start_q + seq_len_q
673
+ end_kv = start_kv + seq_len_kv
674
+ sub_out = scaled_dot_product_attention(
675
+ query[None, :, start_q:end_q, :],
676
+ key[None, :, start_kv:end_kv, :],
677
+ value[None, :, start_kv:end_kv, :],
678
+ attn_mask=mask,
679
+ dropout_p=0.0,
680
+ is_causal=causal_attn and mask is None,
681
+ scale=self.scale).squeeze(0).movedim(query.dim() - 2, 0)
682
+ output[start_q:end_q, :, :] = sub_out
683
+ start_q, start_kv = end_q, end_kv
684
+
685
+
686
+ def _make_alibi_bias(
687
+ alibi_slopes: torch.Tensor,
688
+ dtype: torch.dtype,
689
+ seq_lens: list[int],
690
+ ) -> list[torch.Tensor]:
691
+ attn_biases: list[torch.Tensor] = []
692
+ for seq_len in seq_lens:
693
+ bias = torch.arange(seq_len, dtype=dtype)
694
+ # NOTE(zhuohan): HF uses
695
+ # `bias = bias[None, :].repeat(seq_len, 1)`
696
+ # here. We find that both biases give the same results, but
697
+ # the bias below more accurately follows the original ALiBi
698
+ # paper.
699
+ bias = bias[None, :] - bias[:, None]
700
+
701
+ num_heads = alibi_slopes.shape[0]
702
+ bias = bias[None, :].repeat((num_heads, 1, 1))
703
+ bias.mul_(alibi_slopes[:, None, None]).unsqueeze_(0)
704
+ inf_mask = torch.empty(
705
+ (1, seq_len, seq_len),
706
+ dtype=bias.dtype).fill_(-torch.inf).triu_(diagonal=1)
707
+ attn_biases.append((bias + inf_mask).to(dtype))
708
+
709
+ return attn_biases
710
+
711
+
712
+ def _make_sliding_window_bias(
713
+ seq_lens: list[int],
714
+ window_size: Optional[int],
715
+ dtype: torch.dtype,
716
+ ) -> list[torch.Tensor]:
717
+ attn_biases: list[torch.Tensor] = []
718
+ for seq_len in seq_lens:
719
+ tensor = torch.full(
720
+ (1, seq_len, seq_len),
721
+ dtype=dtype,
722
+ fill_value=1,
723
+ )
724
+ shift = 0
725
+ mask = torch.tril(tensor, diagonal=shift).to(dtype) # type: ignore
726
+ if window_size is not None:
727
+ mask = torch.triu(mask, diagonal=shift - window_size + 1)
728
+ mask = torch.log(mask)
729
+ attn_biases.append(mask.to(dtype))
730
+
731
+ return attn_biases
732
+
733
+
734
+ class _PagedAttention:
735
+
736
+ @staticmethod
737
+ def validate_head_size(head_size: int) -> tuple[bool, list[int]]:
738
+ SUPPORT_HS = [32, 64, 80, 96, 112, 128, 192, 256]
739
+ return head_size in SUPPORT_HS, SUPPORT_HS
740
+
741
+ @staticmethod
742
+ def get_kv_cache_shape(
743
+ num_blocks: int,
744
+ block_size: int,
745
+ num_kv_heads: int,
746
+ head_size: int,
747
+ *args,
748
+ ) -> tuple[int, ...]:
749
+ return 2, num_blocks, block_size * num_kv_heads * head_size
750
+
751
+ @staticmethod
752
+ def split_kv_cache(
753
+ kv_cache: torch.Tensor,
754
+ num_kv_heads: int,
755
+ head_size: int,
756
+ *args,
757
+ ) -> tuple[torch.Tensor, torch.Tensor]:
758
+ x = 16 // kv_cache.element_size()
759
+ num_blocks = kv_cache.shape[1]
760
+
761
+ key_cache = kv_cache[0]
762
+ key_cache = key_cache.view(num_blocks, num_kv_heads, head_size // x,
763
+ -1, x)
764
+ value_cache = kv_cache[1]
765
+ value_cache = value_cache.view(num_blocks, num_kv_heads, head_size, -1)
766
+ return key_cache, value_cache
767
+
768
+ @staticmethod
769
+ def write_to_paged_cache(
770
+ key: torch.Tensor,
771
+ value: torch.Tensor,
772
+ key_cache: torch.Tensor,
773
+ value_cache: torch.Tensor,
774
+ slot_mapping: torch.Tensor,
775
+ kv_cache_dtype: str,
776
+ k_scale: torch.Tensor,
777
+ v_scale: torch.Tensor,
778
+ *args,
779
+ ) -> None:
780
+ ops.reshape_and_cache(
781
+ key,
782
+ value,
783
+ key_cache,
784
+ value_cache,
785
+ slot_mapping.flatten(),
786
+ kv_cache_dtype,
787
+ k_scale,
788
+ v_scale,
789
+ )
790
+
791
+ @staticmethod
792
+ def forward_decode(
793
+ output: torch.Tensor,
794
+ query: torch.Tensor,
795
+ key_cache: torch.Tensor,
796
+ value_cache: torch.Tensor,
797
+ block_tables: torch.Tensor,
798
+ context_lens: torch.Tensor,
799
+ max_context_len: int,
800
+ kv_cache_dtype: str,
801
+ num_kv_heads: int,
802
+ scale: float,
803
+ alibi_slopes: Optional[torch.Tensor],
804
+ k_scale: torch.Tensor,
805
+ v_scale: torch.Tensor,
806
+ *args,
807
+ ) -> None:
808
+ tp_rank: int = 0
809
+ blocksparse_local_blocks: int = 0
810
+ blocksparse_vert_stride: int = 0
811
+ blocksparse_block_size: int = 64
812
+ blocksparse_head_sliding_step: int = 0
813
+ block_size = value_cache.shape[3]
814
+
815
+ ops.paged_attention_v1(
816
+ output,
817
+ query,
818
+ key_cache,
819
+ value_cache,
820
+ num_kv_heads,
821
+ scale,
822
+ block_tables,
823
+ context_lens,
824
+ block_size,
825
+ max_context_len,
826
+ alibi_slopes,
827
+ kv_cache_dtype,
828
+ k_scale,
829
+ v_scale,
830
+ tp_rank,
831
+ blocksparse_local_blocks,
832
+ blocksparse_vert_stride,
833
+ blocksparse_block_size,
834
+ blocksparse_head_sliding_step,
835
+ )
836
+
837
+ @staticmethod
838
+ def copy_blocks(
839
+ kv_caches: list[torch.Tensor],
840
+ src_to_dists: torch.Tensor,
841
+ *args,
842
+ ) -> None:
843
+ key_caches = [kv_cache[0] for kv_cache in kv_caches]
844
+ value_caches = [kv_cache[1] for kv_cache in kv_caches]
845
+ ops.copy_blocks(key_caches, value_caches, src_to_dists)
846
+
847
+
848
+ class _IPEXPagedAttention(_PagedAttention):
849
+
850
+ @staticmethod
851
+ def validate_head_size(head_size: int) -> tuple[bool, list[int]]:
852
+ return True, []
853
+
854
+ @staticmethod
855
+ def split_kv_cache(
856
+ kv_cache: torch.Tensor,
857
+ num_kv_heads: int,
858
+ head_size: int,
859
+ *args,
860
+ ) -> tuple[torch.Tensor, torch.Tensor]:
861
+ num_blocks = kv_cache.shape[1]
862
+
863
+ key_cache = kv_cache[0]
864
+ key_cache = key_cache.view(num_blocks, num_kv_heads, -1, head_size)
865
+ value_cache = kv_cache[1]
866
+ value_cache = value_cache.view(num_blocks, num_kv_heads, -1, head_size)
867
+ return key_cache, value_cache
868
+
869
+ @staticmethod
870
+ def write_to_paged_cache(
871
+ key: torch.Tensor,
872
+ value: torch.Tensor,
873
+ key_cache: torch.Tensor,
874
+ value_cache: torch.Tensor,
875
+ slot_mapping: torch.Tensor,
876
+ kv_cache_dtype: str,
877
+ k_scale: torch.Tensor,
878
+ v_scale: torch.Tensor,
879
+ *args,
880
+ ) -> None:
881
+ ipex_modules.PagedAttention.reshape_and_cache(
882
+ key, value, key_cache, value_cache,
883
+ slot_mapping.flatten().int())
884
+
885
+ @staticmethod
886
+ def forward_decode(
887
+ output: torch.Tensor,
888
+ query: torch.Tensor,
889
+ key_cache: torch.Tensor,
890
+ value_cache: torch.Tensor,
891
+ block_tables: torch.Tensor,
892
+ context_lens: torch.Tensor,
893
+ max_context_len: int,
894
+ kv_cache_dtype: str,
895
+ num_kv_heads: int,
896
+ scale: float,
897
+ alibi_slopes: Optional[torch.Tensor],
898
+ k_scale: torch.Tensor,
899
+ v_scale: torch.Tensor,
900
+ *args,
901
+ ) -> None:
902
+ block_size = value_cache.shape[2]
903
+ head_mapping = torch.arange(
904
+ 0,
905
+ num_kv_heads,
906
+ device="cpu",
907
+ dtype=torch.int32,
908
+ ).view(num_kv_heads,
909
+ 1).repeat_interleave(query.size(1) // num_kv_heads).flatten()
910
+ ipex_modules.PagedAttention.single_query_cached_kv_attention(
911
+ output, query.contiguous(), key_cache, value_cache, head_mapping,
912
+ scale, block_tables, context_lens, block_size, max_context_len,
913
+ alibi_slopes)
914
+
915
+
916
+ def _get_paged_attn_impl():
917
+ if _use_ipex:
918
+ return _IPEXPagedAttention
919
+ else:
920
+ return _PagedAttention
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/flash_attn.py ADDED
@@ -0,0 +1,802 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ """Attention layer with FlashAttention."""
4
+ from dataclasses import dataclass
5
+ from typing import Optional
6
+
7
+ import numpy as np
8
+ import torch
9
+
10
+ from vllm import _custom_ops as ops
11
+ from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl,
12
+ AttentionMetadata, AttentionType,
13
+ is_quantized_kv_cache)
14
+ from vllm.attention.layer import Attention
15
+ from vllm.attention.ops.merge_attn_states import merge_attn_states
16
+ from vllm.attention.utils.fa_utils import (flash_attn_supports_fp8,
17
+ get_flash_attn_version,
18
+ is_flash_attn_varlen_func_available)
19
+
20
+ if is_flash_attn_varlen_func_available():
21
+ from vllm.attention.utils.fa_utils import (flash_attn_varlen_func,
22
+ get_scheduler_metadata,
23
+ reshape_and_cache_flash)
24
+
25
+ from vllm.config import VllmConfig, get_layers_from_vllm_config
26
+ from vllm.logger import init_logger
27
+ from vllm.utils import cdiv
28
+ from vllm.v1.attention.backends.utils import (AttentionCGSupport,
29
+ AttentionMetadataBuilder,
30
+ CommonAttentionMetadata,
31
+ get_kv_cache_layout)
32
+ from vllm.v1.kv_cache_interface import AttentionSpec
33
+
34
+ logger = init_logger(__name__)
35
+
36
+ # NOTE(woosuk): This is an arbitrary number. Tune it if needed.
37
+ _DEFAULT_MAX_NUM_SPLITS_FOR_CUDA_GRAPH = 16
38
+
39
+
40
+ class FlashAttentionBackend(AttentionBackend):
41
+
42
+ accept_output_buffer: bool = True
43
+
44
+ @classmethod
45
+ def get_supported_dtypes(cls) -> list[torch.dtype]:
46
+ return [torch.float16, torch.bfloat16]
47
+
48
+ @classmethod
49
+ def get_supported_head_sizes(cls) -> list[int]:
50
+ return [32, 64, 96, 128, 160, 192, 224, 256]
51
+
52
+ @classmethod
53
+ def validate_head_size(cls, head_size: int) -> None:
54
+ supported_head_sizes = cls.get_supported_head_sizes()
55
+ if head_size not in supported_head_sizes:
56
+ attn_type = cls.__name__.removesuffix("Backend")
57
+ raise ValueError(
58
+ f"Head size {head_size} is not supported by {attn_type}. "
59
+ f"Supported head sizes are: {supported_head_sizes}. "
60
+ "Set VLLM_ATTENTION_BACKEND=FLEX_ATTENTION to use "
61
+ "FlexAttention backend which supports all head sizes.")
62
+
63
+ @staticmethod
64
+ def get_name() -> str:
65
+ return "FLASH_ATTN_VLLM_V1"
66
+
67
+ @staticmethod
68
+ def get_impl_cls() -> type["FlashAttentionImpl"]:
69
+ return FlashAttentionImpl
70
+
71
+ @staticmethod
72
+ def get_metadata_cls() -> type["AttentionMetadata"]:
73
+ return FlashAttentionMetadata
74
+
75
+ @staticmethod
76
+ def get_builder_cls() -> type["FlashAttentionMetadataBuilder"]:
77
+ return FlashAttentionMetadataBuilder
78
+
79
+ @staticmethod
80
+ def get_kv_cache_shape(
81
+ num_blocks: int,
82
+ block_size: int,
83
+ num_kv_heads: int,
84
+ head_size: int,
85
+ ) -> tuple[int, ...]:
86
+ if block_size % 16 != 0:
87
+ raise ValueError("Block size must be a multiple of 16.")
88
+ return (2, num_blocks, block_size, num_kv_heads, head_size)
89
+
90
+ @staticmethod
91
+ def get_kv_cache_stride_order() -> tuple[int, ...]:
92
+ # `stride_order` indicates the permutation that gets
93
+ # us from `get_kv_cache_shape` to the actual memory layout we want.
94
+ cache_layout = get_kv_cache_layout()
95
+ if cache_layout == "NHD":
96
+ stride_order = (0, 1, 2, 3, 4)
97
+ elif cache_layout == "HND":
98
+ stride_order = (0, 1, 3, 2, 4)
99
+ else:
100
+ raise ValueError(f"Unknown cache layout format {cache_layout}.")
101
+ return stride_order
102
+
103
+ @staticmethod
104
+ def get_fp8_dtype_for_flashattn(kv_cache_dtype: str) -> torch.dtype:
105
+ if kv_cache_dtype in ("fp8", "fp8_e4m3"):
106
+ return torch.float8_e4m3fn
107
+ else:
108
+ raise ValueError(f"Unrecognized FP8 dtype: {kv_cache_dtype}")
109
+
110
+
111
+ @dataclass
112
+ class FlashAttentionMetadata:
113
+ # NOTE(sang): Definition of context_len, query_len, and seq_len.
114
+ # |---------- N-1 iteration --------|
115
+ # |---------------- N iteration ---------------------|
116
+ # |- tokenA -|......................|-- newTokens ---|
117
+ # |---------- context_len ----------|
118
+ # |-------------------- seq_len ---------------------|
119
+ # |-- query_len ---|
120
+
121
+ num_actual_tokens: int # Number of tokens excluding padding.
122
+ max_query_len: int
123
+ query_start_loc: torch.Tensor
124
+ max_seq_len: int
125
+ seq_lens: torch.Tensor
126
+ block_table: torch.Tensor
127
+ slot_mapping: torch.Tensor
128
+
129
+ # For cascade attention.
130
+ use_cascade: bool
131
+ common_prefix_len: int
132
+ cu_prefix_query_lens: Optional[torch.Tensor]
133
+ prefix_kv_lens: Optional[torch.Tensor]
134
+ suffix_kv_lens: Optional[torch.Tensor]
135
+
136
+ # Optional aot scheduling
137
+ scheduler_metadata: Optional[torch.Tensor] = None
138
+ prefix_scheduler_metadata: Optional[torch.Tensor] = None
139
+ max_num_splits: int = 0
140
+
141
+ causal: bool = True
142
+
143
+
144
+ def _get_sliding_window_configs(
145
+ vllm_config: VllmConfig) -> set[Optional[tuple[int, int]]]:
146
+ """Get the set of all sliding window configs used in the model."""
147
+ sliding_window_configs: set[Optional[tuple[int, int]]] = set()
148
+ layers = get_layers_from_vllm_config(vllm_config, Attention)
149
+ for layer in layers.values():
150
+ assert isinstance(layer.impl, FlashAttentionImpl)
151
+ sliding_window_configs.add(layer.impl.sliding_window)
152
+ return sliding_window_configs
153
+
154
+
155
+ class FlashAttentionMetadataBuilder(
156
+ AttentionMetadataBuilder[FlashAttentionMetadata]):
157
+ # FA3:
158
+ # Supports full cudagraphs for all cases.
159
+ #
160
+ # FA2:
161
+ # For FA2, a graph is captured with max_query_len=1, (which is what we
162
+ # capture by default for num_tokens <= max_num_seqs when there is no
163
+ # spec-decode) then these graphs will not work for mixed prefill-decode
164
+ # (unlike FA3). This is due to special max_query_len=1 packed-GQA handling
165
+ # in FA2.
166
+ # In summary if we are running with spec decodes the graphs would
167
+ # work for mixed prefill-decode and uniform-decode. But for non-spec decodes
168
+ # the graphs would not work for mixed prefill-decode; sorta the inverse
169
+ # of UNIFORM_SINGLE_TOKEN_DECODE.
170
+ # Theres probably a better way to describe this using `AttentionCGSupport`
171
+ # but for now just set it to `UNIFORM_BATCH` to get use to drop down
172
+ # to FULL_AND_PIECEWISE.
173
+ # TODO(luka, lucas): audit FA2 as part of:
174
+ # https://github.com/vllm-project/vllm/issues/22945
175
+ cudagraph_support = AttentionCGSupport.ALWAYS \
176
+ if get_flash_attn_version() == 3 else AttentionCGSupport.UNIFORM_BATCH
177
+
178
+ def __init__(self, kv_cache_spec: AttentionSpec, layer_names: list[str],
179
+ vllm_config: VllmConfig, device: torch.device):
180
+ self.vllm_config = vllm_config
181
+ self.model_config = vllm_config.model_config
182
+ self.parallel_config = vllm_config.parallel_config
183
+ self.cache_config = vllm_config.cache_config
184
+ self.compilation_config = vllm_config.compilation_config
185
+ self.device = device
186
+
187
+ self.num_heads_q = self.model_config.get_num_attention_heads(
188
+ self.parallel_config)
189
+ self.num_heads_kv = self.model_config.get_num_kv_heads(
190
+ self.parallel_config)
191
+ self.kv_cache_dtype = kv_cache_spec.dtype
192
+ self.headdim = self.model_config.get_head_size()
193
+ self.block_size = kv_cache_spec.block_size
194
+
195
+ self.max_num_splits = 0 # No upper bound on the number of splits.
196
+ self.aot_schedule = (get_flash_attn_version() == 3)
197
+
198
+ self.use_full_cuda_graph = \
199
+ self.compilation_config.cudagraph_mode.has_full_cudagraphs()
200
+
201
+ if self.use_full_cuda_graph and self.aot_schedule:
202
+ self.max_cudagraph_size = self.compilation_config.max_capture_size
203
+
204
+ if self.max_cudagraph_size > 992:
205
+ # This condition derives from FA3's internal heuristic.
206
+ # TODO(woosuk): Support larger cudagraph sizes.
207
+ raise ValueError(
208
+ "Capture size larger than 992 is not supported for "
209
+ "full cuda graph.")
210
+
211
+ self.scheduler_metadata = torch.zeros(
212
+ vllm_config.scheduler_config.max_num_seqs + 1,
213
+ dtype=torch.int32,
214
+ device=self.device,
215
+ )
216
+ # When using cuda graph, we need to set the upper bound of the
217
+ # number of splits so that large enough intermediate buffers are
218
+ # pre-allocated during capture.
219
+ self.max_num_splits = _DEFAULT_MAX_NUM_SPLITS_FOR_CUDA_GRAPH
220
+
221
+ # Sliding window size to be used with the AOT scheduler will be
222
+ # populated on first build() call.
223
+ self.aot_sliding_window: Optional[tuple[int, int]] = None
224
+
225
+ def build(self,
226
+ common_prefix_len: int,
227
+ common_attn_metadata: CommonAttentionMetadata,
228
+ fast_build: bool = False) -> FlashAttentionMetadata:
229
+ """
230
+ fast_build disables AOT scheduling, used when there will be few
231
+ iterations i.e. spec-decode
232
+ """
233
+ num_reqs = common_attn_metadata.num_reqs
234
+ num_actual_tokens = common_attn_metadata.num_actual_tokens
235
+ max_query_len = common_attn_metadata.max_query_len
236
+ max_seq_len = int(common_attn_metadata.seq_lens_cpu.max())
237
+ query_start_loc = common_attn_metadata.query_start_loc
238
+ seq_lens = common_attn_metadata.seq_lens
239
+ seq_lens_cpu = common_attn_metadata.seq_lens_cpu
240
+ block_table_tensor = common_attn_metadata.block_table_tensor
241
+ slot_mapping = common_attn_metadata.slot_mapping
242
+ causal = common_attn_metadata.causal
243
+
244
+ # the overhead of the aot schedule is not worth it for spec-decode
245
+ aot_schedule = self.aot_schedule and not fast_build
246
+
247
+ if self.aot_sliding_window is None:
248
+ self.aot_sliding_window = (-1, -1)
249
+ # For the AOT scheduler we need the sliding window value to be
250
+ # constant for all layers to. We have to populate this on the first
251
+ # build() call so the layers are constructed (cannot populate)
252
+ # in __init__.
253
+ if aot_schedule:
254
+ sliding_window_configs = _get_sliding_window_configs(
255
+ self.vllm_config)
256
+ if len(sliding_window_configs) == 1:
257
+ sliding_window_config = sliding_window_configs.pop()
258
+ if sliding_window_config is not None:
259
+ self.aot_sliding_window = sliding_window_config
260
+ elif len(sliding_window_configs) > 1:
261
+ self.aot_schedule = False
262
+ aot_schedule = False
263
+
264
+ def schedule(batch_size, cu_query_lens, max_query_len, seqlens,
265
+ max_seq_len, causal):
266
+ cache_dtype = self.cache_config.cache_dtype
267
+ if cache_dtype.startswith("fp8"):
268
+ qkv_dtype = FlashAttentionBackend.get_fp8_dtype_for_flashattn(
269
+ cache_dtype)
270
+ else:
271
+ qkv_dtype = self.kv_cache_dtype
272
+ if aot_schedule:
273
+ return get_scheduler_metadata(
274
+ batch_size=batch_size,
275
+ max_seqlen_q=max_query_len,
276
+ max_seqlen_k=max_seq_len,
277
+ num_heads_q=self.num_heads_q,
278
+ num_heads_kv=self.num_heads_kv,
279
+ headdim=self.headdim,
280
+ cache_seqlens=seqlens,
281
+ qkv_dtype=qkv_dtype,
282
+ cu_seqlens_q=cu_query_lens,
283
+ page_size=self.block_size,
284
+ causal=causal,
285
+ window_size=self.aot_sliding_window,
286
+ num_splits=self.max_num_splits,
287
+ )
288
+ return None
289
+
290
+ use_cascade = common_prefix_len > 0
291
+
292
+ if use_cascade:
293
+ cu_prefix_query_lens = torch.tensor([0, num_actual_tokens],
294
+ dtype=torch.int32,
295
+ device=self.device)
296
+ prefix_kv_lens = torch.tensor([common_prefix_len],
297
+ dtype=torch.int32,
298
+ device=self.device)
299
+ suffix_kv_lens = (seq_lens_cpu[:num_reqs] - common_prefix_len).to(
300
+ self.device, non_blocking=True)
301
+ prefix_scheduler_metadata = schedule(
302
+ batch_size=1,
303
+ cu_query_lens=cu_prefix_query_lens,
304
+ max_query_len=num_actual_tokens,
305
+ seqlens=prefix_kv_lens,
306
+ max_seq_len=common_prefix_len,
307
+ causal=False)
308
+ scheduler_metadata = schedule(batch_size=num_reqs,
309
+ cu_query_lens=query_start_loc,
310
+ max_query_len=max_query_len,
311
+ seqlens=suffix_kv_lens,
312
+ max_seq_len=max_seq_len -
313
+ common_prefix_len,
314
+ causal=True)
315
+ else:
316
+ cu_prefix_query_lens = None
317
+ prefix_kv_lens = None
318
+ suffix_kv_lens = None
319
+ prefix_scheduler_metadata = None
320
+ scheduler_metadata = schedule(batch_size=num_reqs,
321
+ cu_query_lens=query_start_loc,
322
+ max_query_len=max_query_len,
323
+ seqlens=seq_lens,
324
+ max_seq_len=max_seq_len,
325
+ causal=causal)
326
+ # For FA3 + full cudagraph
327
+ max_num_splits = 0
328
+ if self.use_full_cuda_graph and scheduler_metadata is not None:
329
+ n = scheduler_metadata.shape[0]
330
+ self.scheduler_metadata[:n] = scheduler_metadata
331
+ # NOTE(woosuk): We should zero out the rest of the scheduler
332
+ # metadata to guarantee the correctness. Otherwise, some thread
333
+ # blocks may use the invalid scheduler metadata and overwrite the
334
+ # output buffer.
335
+ self.scheduler_metadata[n:] = 0
336
+ scheduler_metadata = self.scheduler_metadata[:n]
337
+
338
+ if num_actual_tokens <= self.max_cudagraph_size:
339
+ # NOTE(woosuk): Setting num_splits > 1 may increase the memory
340
+ # usage, because the intermediate buffers of size [num_splits,
341
+ # num_heads, num_tokens, head_size] are allocated. Therefore,
342
+ # we only set num_splits when using cuda graphs.
343
+ max_num_splits = self.max_num_splits
344
+
345
+ attn_metadata = FlashAttentionMetadata(
346
+ num_actual_tokens=num_actual_tokens,
347
+ max_query_len=max_query_len,
348
+ query_start_loc=query_start_loc,
349
+ max_seq_len=max_seq_len,
350
+ seq_lens=seq_lens,
351
+ block_table=block_table_tensor,
352
+ slot_mapping=slot_mapping,
353
+ use_cascade=use_cascade,
354
+ common_prefix_len=common_prefix_len,
355
+ scheduler_metadata=scheduler_metadata,
356
+ cu_prefix_query_lens=cu_prefix_query_lens,
357
+ prefix_kv_lens=prefix_kv_lens,
358
+ suffix_kv_lens=suffix_kv_lens,
359
+ prefix_scheduler_metadata=prefix_scheduler_metadata,
360
+ max_num_splits=max_num_splits,
361
+ causal=causal)
362
+ return attn_metadata
363
+
364
+ def use_cascade_attention(self, *args, **kwargs) -> bool:
365
+ return use_cascade_attention(*args, **kwargs)
366
+
367
+
368
+ class FlashAttentionImpl(AttentionImpl):
369
+
370
+ def __init__(
371
+ self,
372
+ num_heads: int,
373
+ head_size: int,
374
+ scale: float,
375
+ num_kv_heads: int,
376
+ alibi_slopes: Optional[list[float]],
377
+ sliding_window: Optional[int],
378
+ kv_cache_dtype: str,
379
+ logits_soft_cap: Optional[float] = None,
380
+ attn_type: AttentionType = AttentionType.DECODER,
381
+ kv_sharing_target_layer_name: Optional[str] = None,
382
+ sinks: Optional[torch.Tensor] = None,
383
+ ) -> None:
384
+ self.num_heads = num_heads
385
+ self.head_size = head_size
386
+ self.scale = float(scale)
387
+ self.num_kv_heads = num_kv_heads
388
+ if alibi_slopes is not None:
389
+ alibi_slopes = torch.tensor(alibi_slopes, dtype=torch.float32)
390
+ self.alibi_slopes = alibi_slopes
391
+ if sliding_window is None:
392
+ self.sliding_window = (-1, -1)
393
+ elif attn_type == AttentionType.ENCODER_ONLY:
394
+ self.sliding_window = (sliding_window - 1, sliding_window - 1)
395
+ else:
396
+ self.sliding_window = (sliding_window - 1, 0)
397
+ self.kv_cache_dtype = kv_cache_dtype
398
+ if logits_soft_cap is None:
399
+ # In flash-attn, setting logits_soft_cap as 0 means no soft cap.
400
+ logits_soft_cap = 0
401
+ self.logits_soft_cap = logits_soft_cap
402
+ self.kv_sharing_target_layer_name = kv_sharing_target_layer_name
403
+
404
+ self.num_queries_per_kv = self.num_heads // self.num_kv_heads
405
+
406
+ FlashAttentionBackend.validate_head_size(head_size)
407
+
408
+ if attn_type not in [
409
+ AttentionType.DECODER, AttentionType.ENCODER_ONLY
410
+ ]:
411
+ raise NotImplementedError("Encoder/decoder cross-attention "
412
+ "is not implemented for "
413
+ "FlashAttentionImpl")
414
+
415
+ self.attn_type = attn_type
416
+ self.vllm_flash_attn_version = get_flash_attn_version()
417
+ if is_quantized_kv_cache(self.kv_cache_dtype) \
418
+ and not flash_attn_supports_fp8():
419
+ raise NotImplementedError(
420
+ "FlashAttention does not support fp8 kv-cache on this device.")
421
+
422
+ self.sinks = sinks
423
+ if self.sinks is not None:
424
+ assert self.vllm_flash_attn_version == 3, (
425
+ "Sinks are only supported in FlashAttention 3")
426
+ assert self.sinks.shape[0] == num_heads, (
427
+ "Sinks must have the same number of heads as the number of "
428
+ "heads in the layer")
429
+
430
+ def forward(
431
+ self,
432
+ layer: torch.nn.Module,
433
+ query: torch.Tensor,
434
+ key: torch.Tensor,
435
+ value: torch.Tensor,
436
+ kv_cache: torch.Tensor,
437
+ attn_metadata: FlashAttentionMetadata,
438
+ output: Optional[torch.Tensor] = None,
439
+ output_scale: Optional[torch.Tensor] = None,
440
+ ) -> torch.Tensor:
441
+ """Forward pass with FlashAttention.
442
+
443
+ Args:
444
+ query: shape = [num_tokens, num_heads, head_size]
445
+ key: shape = [num_tokens, num_kv_heads, head_size]
446
+ value: shape = [num_tokens, num_kv_heads, head_size]
447
+ kv_cache = [2, num_blocks, block_size, num_kv_heads, head_size]
448
+ attn_metadata: Metadata for attention.
449
+ Returns:
450
+ shape = [num_tokens, num_heads * head_size]
451
+ NOTE: FP8 quantization, flash-attn expect the size of
452
+ {q,k,v}_descale to be (num_sequences, num_kv_heads).
453
+ We use torch's .expand() to avoid duplicating values
454
+ """
455
+ assert output is not None, "Output tensor must be provided."
456
+
457
+ if output_scale is not None:
458
+ raise NotImplementedError(
459
+ "fused output quantization is not yet supported"
460
+ " for FlashAttentionImpl")
461
+
462
+ if attn_metadata is None:
463
+ # Profiling run.
464
+ return output
465
+
466
+ attn_type = self.attn_type
467
+
468
+ # IMPORTANT!
469
+ # NOTE(woosuk): With piece-wise CUDA graphs, this method is executed in
470
+ # eager-mode PyTorch. Thus, we need to be careful about any CPU overhead
471
+ # in this method. For example, `view` and `slice` (or `[:n]`) operations
472
+ # are surprisingly slow even in the case they do not invoke any GPU ops.
473
+ # Minimize the PyTorch ops in this method as much as possible.
474
+ # Whenever making a change in this method, please benchmark the
475
+ # performance to make sure it does not introduce any overhead.
476
+
477
+ num_actual_tokens = attn_metadata.num_actual_tokens
478
+
479
+ # Handle encoder attention differently - no KV cache needed
480
+ if attn_type in (AttentionType.ENCODER_ONLY, ):
481
+ # For encoder attention,
482
+ # we use direct Q, K, V tensors without caching
483
+ return self._forward_encoder_attention(query[:num_actual_tokens],
484
+ key[:num_actual_tokens],
485
+ value[:num_actual_tokens],
486
+ output[:num_actual_tokens],
487
+ attn_metadata, layer)
488
+
489
+ # For decoder and cross-attention, use KV cache as before
490
+ key_cache, value_cache = kv_cache.unbind(0)
491
+
492
+ if self.kv_sharing_target_layer_name is None:
493
+ # Reshape the input keys and values and store them in the cache.
494
+ # Skip this if sharing KV cache with an earlier attention layer.
495
+ # NOTE(woosuk): Here, key and value are padded while slot_mapping is
496
+ # not padded. However, we don't need to do key[:num_actual_tokens]
497
+ # and value[:num_actual_tokens] because the reshape_and_cache_flash
498
+ # op uses the slot_mapping's shape to determine the number of
499
+ # actual tokens.
500
+ reshape_and_cache_flash(
501
+ key,
502
+ value,
503
+ key_cache,
504
+ value_cache,
505
+ attn_metadata.slot_mapping,
506
+ self.kv_cache_dtype,
507
+ layer._k_scale,
508
+ layer._v_scale,
509
+ )
510
+
511
+ if self.kv_cache_dtype.startswith("fp8"):
512
+ dtype = FlashAttentionBackend.get_fp8_dtype_for_flashattn(
513
+ self.kv_cache_dtype)
514
+ key_cache = key_cache.view(dtype)
515
+ value_cache = value_cache.view(dtype)
516
+ num_tokens, num_heads, head_size = query.shape
517
+ query, _ = ops.scaled_fp8_quant(
518
+ query.reshape(
519
+ (num_tokens, num_heads * head_size)).contiguous(),
520
+ layer._q_scale)
521
+ query = query.reshape((num_tokens, num_heads, head_size))
522
+
523
+ if not attn_metadata.use_cascade:
524
+ cu_seqlens_q = attn_metadata.query_start_loc
525
+ seqused_k = attn_metadata.seq_lens
526
+ max_seqlen_q = attn_metadata.max_query_len
527
+ max_seqlen_k = attn_metadata.max_seq_len
528
+ block_table = attn_metadata.block_table
529
+ scheduler_metadata = attn_metadata.scheduler_metadata
530
+
531
+ descale_shape = (cu_seqlens_q.shape[0] - 1, key.shape[1])
532
+
533
+ flash_attn_varlen_func(
534
+ q=query[:num_actual_tokens],
535
+ k=key_cache,
536
+ v=value_cache,
537
+ out=output[:num_actual_tokens],
538
+ cu_seqlens_q=cu_seqlens_q,
539
+ max_seqlen_q=max_seqlen_q,
540
+ seqused_k=seqused_k,
541
+ max_seqlen_k=max_seqlen_k,
542
+ softmax_scale=self.scale,
543
+ causal=attn_metadata.causal,
544
+ alibi_slopes=self.alibi_slopes,
545
+ window_size=self.sliding_window,
546
+ block_table=block_table,
547
+ softcap=self.logits_soft_cap,
548
+ scheduler_metadata=scheduler_metadata,
549
+ fa_version=self.vllm_flash_attn_version,
550
+ q_descale=layer._q_scale.expand(descale_shape),
551
+ k_descale=layer._k_scale.expand(descale_shape),
552
+ v_descale=layer._v_scale.expand(descale_shape),
553
+ num_splits=attn_metadata.max_num_splits,
554
+ s_aux=self.sinks,
555
+ )
556
+ return output
557
+
558
+ # Cascade attention (rare case).
559
+ cascade_attention(
560
+ output[:num_actual_tokens],
561
+ query[:num_actual_tokens],
562
+ key_cache,
563
+ value_cache,
564
+ cu_query_lens=attn_metadata.query_start_loc,
565
+ max_query_len=attn_metadata.max_query_len,
566
+ cu_prefix_query_lens=attn_metadata.cu_prefix_query_lens,
567
+ prefix_kv_lens=attn_metadata.prefix_kv_lens,
568
+ suffix_kv_lens=attn_metadata.suffix_kv_lens,
569
+ max_kv_len=attn_metadata.max_seq_len,
570
+ softmax_scale=self.scale,
571
+ alibi_slopes=self.alibi_slopes,
572
+ sliding_window=self.sliding_window,
573
+ logits_soft_cap=self.logits_soft_cap,
574
+ block_table=attn_metadata.block_table,
575
+ common_prefix_len=attn_metadata.common_prefix_len,
576
+ fa_version=self.vllm_flash_attn_version,
577
+ prefix_scheduler_metadata=attn_metadata.prefix_scheduler_metadata,
578
+ suffix_scheduler_metadata=attn_metadata.scheduler_metadata,
579
+ q_descale=layer._q_scale,
580
+ k_descale=layer._k_scale,
581
+ v_descale=layer._v_scale,
582
+ )
583
+ return output
584
+
585
+ def _forward_encoder_attention(
586
+ self,
587
+ query: torch.Tensor,
588
+ key: torch.Tensor,
589
+ value: torch.Tensor,
590
+ output: torch.Tensor,
591
+ attn_metadata: FlashAttentionMetadata,
592
+ layer: torch.nn.Module,
593
+ ) -> torch.Tensor:
594
+ """Forward pass for encoder attention without KV cache.
595
+
596
+ Args:
597
+ query: shape = [num_encoder_tokens, num_heads, head_size]
598
+ key: shape = [num_encoder_tokens, num_kv_heads, head_size]
599
+ value: shape = [num_encoder_tokens, num_kv_heads, head_size]
600
+ output: shape = [num_encoder_tokens, num_heads, head_size]
601
+ attn_metadata: Encoder attention metadata
602
+ layer: The attention layer
603
+ """
604
+ # For encoder attention, process FP8 quantization if needed
605
+ if self.kv_cache_dtype.startswith("fp8"):
606
+ raise NotImplementedError(
607
+ "quantization is not supported for encoder attention")
608
+
609
+ # Use encoder-specific metadata for sequence information
610
+ cu_seqlens_q = attn_metadata.query_start_loc
611
+ cu_seqlens_k = attn_metadata.query_start_loc
612
+ max_seqlen_q = attn_metadata.max_query_len
613
+ max_seqlen_k = attn_metadata.max_query_len
614
+
615
+ descale_shape = (
616
+ cu_seqlens_q.shape[0] - 1, # type: ignore[union-attr]
617
+ self.num_kv_heads)
618
+
619
+ # Call flash attention directly on Q, K, V tensors
620
+ flash_attn_varlen_func(
621
+ q=query,
622
+ k=key,
623
+ v=value,
624
+ out=output,
625
+ cu_seqlens_q=cu_seqlens_q,
626
+ cu_seqlens_k=cu_seqlens_k,
627
+ max_seqlen_q=max_seqlen_q,
628
+ max_seqlen_k=max_seqlen_k,
629
+ softmax_scale=self.scale,
630
+ causal=False, # Encoder attention is bidirectional
631
+ alibi_slopes=self.alibi_slopes,
632
+ window_size=self.sliding_window,
633
+ softcap=self.logits_soft_cap,
634
+ fa_version=self.vllm_flash_attn_version,
635
+ q_descale=layer._q_scale.expand(descale_shape),
636
+ k_descale=layer._k_scale.expand(descale_shape),
637
+ v_descale=layer._v_scale.expand(descale_shape),
638
+ )
639
+
640
+ return output
641
+
642
+
643
+ def use_cascade_attention(
644
+ common_prefix_len: int,
645
+ query_lens: np.ndarray,
646
+ num_query_heads: int,
647
+ num_kv_heads: int,
648
+ use_alibi: bool,
649
+ use_sliding_window: bool,
650
+ use_local_attention: bool,
651
+ num_sms: int,
652
+ ) -> bool:
653
+ """Decide whether to use cascade attention.
654
+
655
+ This function 1) checks whether cascade attention is supported with the
656
+ given configuration, and 2) heuristically decides whether using cascade
657
+ attention can improve performance.
658
+ """
659
+ # Too short common prefix. Probably not worth using cascade attention.
660
+ # We use an arbitrary threshold of 256 tokens. TODO: Tune this threshold.
661
+ # NOTE(woosuk): This is the common case. We should return False as soon as
662
+ # possible to avoid any unnecessary computation.
663
+ if common_prefix_len < 256:
664
+ return False
665
+ # Cascade attention is currently not supported with these variants.
666
+ if use_alibi or use_sliding_window or use_local_attention:
667
+ return False
668
+ # Too few queries. Probably not worth using cascade attention.
669
+ # We use an arbitrary threshold of 8 queries. TODO: Tune this threshold.
670
+ num_reqs = len(query_lens)
671
+ if num_reqs < 8:
672
+ return False
673
+
674
+ # Heuristics to decide whether using cascade attention is beneficial.
675
+ # 1. When FlashDecoding is not used for normal attention, cascade attention
676
+ # is likely to be faster since it saves memory bandwidth.
677
+ num_queries_per_kv = num_query_heads // num_kv_heads
678
+ # The criteria for using FlashDecoding can be found in the following link:
679
+ # https://github.com/vllm-project/flash-attention/blob/96266b1111111f3d11aabefaf3bacbab6a89d03c/csrc/flash_attn/flash_api.cpp#L535
680
+ use_flash_decoding = (num_queries_per_kv > 1 and not use_sliding_window
681
+ and not use_alibi and np.all(query_lens == 1))
682
+ if not use_flash_decoding:
683
+ # Use cascade attention.
684
+ return True
685
+
686
+ # 2. When FlashDecoding is used for normal attention, it is not clear
687
+ # whether cascade attention is beneficial, because FlashDecoding can
688
+ # launch more CTAs than cascade attention.
689
+ # We use a simple performance model to compare the two methods.
690
+ # NOTE(woosuk): The performance model is very rough and may not be
691
+ # accurate.
692
+ num_tokens = num_reqs
693
+ # NOTE(woosuk): These are default tile sizes. flash-attn might use
694
+ # different tile sizes (e.g., 64 or 256) depending on the configuration.
695
+ q_tile_size = 128
696
+ kv_tile_size = 128
697
+ num_prefix_tiles = cdiv(common_prefix_len, kv_tile_size)
698
+
699
+ cascade_ctas = num_query_heads * cdiv(num_tokens, q_tile_size)
700
+ cascade_waves = cdiv(cascade_ctas, num_sms)
701
+ cascade_time = cascade_waves * num_prefix_tiles
702
+
703
+ flash_decoding_ctas = (num_reqs * num_kv_heads *
704
+ cdiv(num_queries_per_kv, q_tile_size))
705
+ flash_decoding_ctas *= num_prefix_tiles
706
+ flash_decoding_time = cdiv(flash_decoding_ctas, num_sms)
707
+
708
+ # Use cascade attention if it is faster than FlashDecoding.
709
+ return cascade_time < flash_decoding_time
710
+
711
+
712
+ def cascade_attention(
713
+ output: torch.Tensor,
714
+ query: torch.Tensor,
715
+ key_cache: torch.Tensor,
716
+ value_cache: torch.Tensor,
717
+ cu_query_lens: torch.Tensor,
718
+ max_query_len: int,
719
+ cu_prefix_query_lens: torch.Tensor,
720
+ prefix_kv_lens: torch.Tensor,
721
+ suffix_kv_lens: torch.Tensor,
722
+ max_kv_len: int,
723
+ softmax_scale: float,
724
+ alibi_slopes: Optional[torch.Tensor],
725
+ sliding_window: tuple[int, int],
726
+ logits_soft_cap: float,
727
+ block_table: torch.Tensor,
728
+ common_prefix_len: int,
729
+ fa_version: int,
730
+ prefix_scheduler_metadata: Optional[torch.Tensor] = None,
731
+ suffix_scheduler_metadata: Optional[torch.Tensor] = None,
732
+ q_descale: Optional[torch.Tensor] = None,
733
+ k_descale: Optional[torch.Tensor] = None,
734
+ v_descale: Optional[torch.Tensor] = None,
735
+ ) -> torch.Tensor:
736
+ assert alibi_slopes is None, ("Cascade attention does not support ALiBi.")
737
+ # TODO: Support sliding window.
738
+ assert sliding_window == (-1, -1), (
739
+ "Cascade attention does not support sliding window.")
740
+
741
+ num_tokens = query.shape[0]
742
+ block_size = key_cache.shape[-3]
743
+ assert common_prefix_len % block_size == 0
744
+ num_common_kv_blocks = common_prefix_len // block_size
745
+ assert num_common_kv_blocks > 0
746
+ descale_shape = (cu_prefix_query_lens.shape[0] - 1, key_cache.shape[-2])
747
+
748
+ # Process shared prefix.
749
+ prefix_output, prefix_lse = flash_attn_varlen_func(
750
+ q=query,
751
+ k=key_cache,
752
+ v=value_cache,
753
+ cu_seqlens_q=cu_prefix_query_lens,
754
+ seqused_k=prefix_kv_lens,
755
+ max_seqlen_q=num_tokens,
756
+ max_seqlen_k=common_prefix_len,
757
+ softmax_scale=softmax_scale,
758
+ causal=False,
759
+ window_size=sliding_window,
760
+ block_table=block_table[:1],
761
+ softcap=logits_soft_cap,
762
+ return_softmax_lse=True,
763
+ scheduler_metadata=prefix_scheduler_metadata,
764
+ fa_version=fa_version,
765
+ q_descale=q_descale.expand(descale_shape)
766
+ if q_descale is not None else None,
767
+ k_descale=k_descale.expand(descale_shape)
768
+ if k_descale is not None else None,
769
+ v_descale=v_descale.expand(descale_shape)
770
+ if v_descale is not None else None,
771
+ )
772
+
773
+ descale_shape = (cu_query_lens.shape[0] - 1, key_cache.shape[-2])
774
+
775
+ # Process suffix per query.
776
+ suffix_output, suffix_lse = flash_attn_varlen_func(
777
+ q=query,
778
+ k=key_cache,
779
+ v=value_cache,
780
+ cu_seqlens_q=cu_query_lens,
781
+ seqused_k=suffix_kv_lens,
782
+ max_seqlen_q=max_query_len,
783
+ max_seqlen_k=max_kv_len - common_prefix_len,
784
+ softmax_scale=softmax_scale,
785
+ causal=True,
786
+ window_size=sliding_window,
787
+ block_table=block_table[:, num_common_kv_blocks:],
788
+ softcap=logits_soft_cap,
789
+ return_softmax_lse=True,
790
+ scheduler_metadata=suffix_scheduler_metadata,
791
+ fa_version=fa_version,
792
+ q_descale=q_descale.expand(descale_shape)
793
+ if q_descale is not None else None,
794
+ k_descale=k_descale.expand(descale_shape)
795
+ if k_descale is not None else None,
796
+ v_descale=v_descale.expand(descale_shape)
797
+ if v_descale is not None else None,
798
+ )
799
+
800
+ # Merge prefix and suffix outputs, and store the result in output.
801
+ merge_attn_states(output, prefix_output, prefix_lse, suffix_output,
802
+ suffix_lse)
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/flashinfer.py ADDED
@@ -0,0 +1,1003 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ """Attention layer with FlashInfer."""
4
+ from __future__ import annotations
5
+
6
+ from dataclasses import dataclass
7
+ from typing import ClassVar, Optional, Union
8
+
9
+ import torch
10
+ from flashinfer import (BatchDecodeWithPagedKVCacheWrapper,
11
+ BatchPrefillWithPagedKVCacheWrapper,
12
+ MultiLevelCascadeAttentionWrapper)
13
+ from flashinfer.decode import (_get_range_buf, get_seq_lens,
14
+ trtllm_batch_decode_with_kv_cache)
15
+ from flashinfer.prefill import trtllm_batch_context_with_kv_cache
16
+
17
+ import vllm.envs as envs
18
+ from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl,
19
+ AttentionType)
20
+ from vllm.config import CUDAGraphMode, VllmConfig
21
+ from vllm.logger import init_logger
22
+ from vllm.utils import cdiv, is_pin_memory_available
23
+ from vllm.utils.flashinfer import use_trtllm_attention
24
+ from vllm.v1.attention.backends.flash_attn import use_cascade_attention
25
+ # yapf conflicts with isort for this block
26
+ # yapf: disable
27
+ from vllm.v1.attention.backends.utils import (AttentionCGSupport,
28
+ AttentionMetadataBuilder,
29
+ CommonAttentionMetadata,
30
+ get_kv_cache_layout,
31
+ get_per_layer_parameters,
32
+ infer_global_hyperparameters,
33
+ split_decodes_and_prefills)
34
+ from vllm.v1.kv_cache_interface import AttentionSpec
35
+
36
+ FLASHINFER_WORKSPACE_BUFFER_SIZE = 256 * 1024 * 1024
37
+
38
+ logger = init_logger(__name__)
39
+
40
+
41
+ class FlashInferBackend(AttentionBackend):
42
+
43
+ accept_output_buffer: bool = True
44
+
45
+ @classmethod
46
+ def get_supported_dtypes(cls) -> list[torch.dtype]:
47
+ return [torch.float16, torch.bfloat16]
48
+
49
+ @classmethod
50
+ def get_supported_head_sizes(cls) -> list[int]:
51
+ # https://github.com/flashinfer-ai/flashinfer/blob/3d55c71a62052c590c130897d3a3db49b14fcc34/include/flashinfer/utils.cuh#L157
52
+ return [64, 128, 256]
53
+
54
+ @classmethod
55
+ def validate_head_size(cls, head_size: int) -> None:
56
+ supported_head_sizes = cls.get_supported_head_sizes()
57
+ if head_size not in supported_head_sizes:
58
+ attn_type = cls.__name__.removesuffix("Backend")
59
+ raise ValueError(
60
+ f"Head size {head_size} is not supported by {attn_type}. "
61
+ f"Supported head sizes are: {supported_head_sizes}. "
62
+ "Set VLLM_ATTENTION_BACKEND=FLEX_ATTENTION to use "
63
+ "FlexAttention backend which supports all head sizes.")
64
+
65
+ @staticmethod
66
+ def get_name() -> str:
67
+ return "FLASHINFER_VLLM_V1"
68
+
69
+ @staticmethod
70
+ def get_impl_cls() -> type[FlashInferImpl]:
71
+ return FlashInferImpl
72
+
73
+ @staticmethod
74
+ def get_metadata_cls() -> type[FlashInferMetadata]:
75
+ return FlashInferMetadata
76
+
77
+ @staticmethod
78
+ def get_builder_cls() -> type[FlashInferMetadataBuilder]:
79
+ return FlashInferMetadataBuilder
80
+
81
+ @staticmethod
82
+ def get_kv_cache_shape(
83
+ num_blocks: int,
84
+ block_size: int,
85
+ num_kv_heads: int,
86
+ head_size: int,
87
+ ) -> tuple[int, ...]:
88
+ return (num_blocks, 2, block_size, num_kv_heads, head_size)
89
+
90
+ @staticmethod
91
+ def get_kv_cache_stride_order() -> tuple[int, ...]:
92
+ # `stride_order` indicates the permutation that gets us from
93
+ # `get_kv_cache_shape` to the actual memory layout we want.
94
+ cache_layout = get_kv_cache_layout()
95
+ if cache_layout == "NHD":
96
+ stride_order = (0, 1, 2, 3, 4)
97
+ elif cache_layout == "HND":
98
+ stride_order = (0, 1, 3, 2, 4)
99
+ else:
100
+ raise ValueError(f"Unknown cache layout format {cache_layout}.")
101
+ return stride_order
102
+
103
+ @staticmethod
104
+ def get_fp8_dtype_for_flashinfer(kv_cache_dtype: str) -> torch.dtype:
105
+ if kv_cache_dtype in ("fp8", "fp8_e4m3"):
106
+ return torch.float8_e4m3fn
107
+ elif kv_cache_dtype == "fp8_e5m2":
108
+ return torch.float8_e5m2
109
+ else:
110
+ raise ValueError(f"Unrecognized FP8 dtype: {kv_cache_dtype}")
111
+
112
+
113
+ @dataclass
114
+ class FlashInferMetadata:
115
+
116
+ num_actual_tokens: int # Number of tokens excluding padding.
117
+
118
+ # (batch_size + 1,). The cumulative subquery lengths of the sequences in
119
+ # the batch, used to index into subquery. E.g., if the subquery length
120
+ # is [4, 6], it is [0, 4, 10].
121
+ qo_indptr_cpu: torch.Tensor
122
+ # An example for paged_kv_indices, paged_kv_indptr:
123
+ # request 1, page indices [0, 5, 8]
124
+ # request 2, page indices [1, 6, 7]
125
+ # request 3, page indices [3, 4]
126
+ # paged_kv_indices is a concatenation of page indices of all requests:
127
+ # [0, 5, 8, 1, 6, 7, 3, 4]
128
+ # paged_kv_indptr is used to index into paged_kv_indices:
129
+ # [0, 3, 6, 8]
130
+ # The indptr of the paged kv cache, shape: [batch_size + 1] (CPU for plan)
131
+ paged_kv_indptr_cpu: torch.Tensor
132
+ # The page indices of the paged kv cache (on device for plan)
133
+ paged_kv_indices: torch.Tensor
134
+ # The number of entries in the last page of each request in
135
+ # the paged kv cache, shape: [batch_size] (CPU for plan)
136
+ paged_kv_last_page_len_cpu: torch.Tensor
137
+ # The number of query/output heads
138
+ num_qo_heads: int
139
+ # The number of key/value heads
140
+ num_kv_heads: int
141
+ # The dimension of the attention heads
142
+ head_dim: int
143
+ # Block size of vllm
144
+ page_size: int
145
+ # The data type of the paged kv cache
146
+ kv_data_type: torch.dtype
147
+ # The data type of the query
148
+ q_data_type: torch.dtype
149
+
150
+ slot_mapping: torch.Tensor
151
+
152
+ # For flashinfer trtllm batch decode
153
+ max_q_len: int
154
+ max_seq_len: int
155
+ seq_lens: torch.Tensor
156
+ block_table_tensor: torch.Tensor
157
+ prefill_use_trtllm: bool
158
+ decode_use_trtllm: bool
159
+
160
+ # For handling prefill decode split
161
+ num_decodes: int
162
+ num_decode_tokens: int
163
+ num_prefills: int
164
+ num_prefill_tokens: int
165
+
166
+ # For cascade attention (CPU for planning).
167
+ use_cascade: bool
168
+ shared_qo_indptr_cpu: Optional[torch.Tensor] = None
169
+ shared_kv_page_indptr_cpu: Optional[torch.Tensor] = None
170
+ shared_kv_page_indices_cpu: Optional[torch.Tensor] = None
171
+ shared_kv_last_page_len_cpu: Optional[torch.Tensor] = None
172
+
173
+ prefill_wrapper: Optional[BatchPrefillWithPagedKVCacheWrapper] = None
174
+ decode_wrapper: Optional[BatchDecodeWithPagedKVCacheWrapper] = None
175
+ cascade_wrapper: Optional[MultiLevelCascadeAttentionWrapper] = None
176
+
177
+ qo_indptr_gpu: Optional[torch.Tensor] = None
178
+ paged_kv_indptr_gpu: Optional[torch.Tensor] = None
179
+
180
+ def __post_init__(self):
181
+ if self.head_dim is not None:
182
+ FlashInferBackend.validate_head_size(self.head_dim)
183
+
184
+
185
+ class FlashInferMetadataBuilder(AttentionMetadataBuilder[FlashInferMetadata]):
186
+ cudagraph_support: ClassVar[AttentionCGSupport] = \
187
+ AttentionCGSupport.UNIFORM_SINGLE_TOKEN_DECODE
188
+
189
+ reorder_batch_threshold: ClassVar[int] = 1
190
+
191
+ def __init__(self, kv_cache_spec: AttentionSpec, layer_names: list[str],
192
+ vllm_config: VllmConfig, device: torch.device):
193
+ self.device = device
194
+ self.vllm_config = vllm_config
195
+ self.cache_config = vllm_config.cache_config
196
+ self.kv_cache_spec = kv_cache_spec
197
+ self._workspace_buffer = None
198
+ self._prefill_wrapper = None # Wrapper for prefill/append
199
+ self._decode_wrapper = None # Wrapper for decode (general shape)
200
+
201
+ self.compilation_config = vllm_config.compilation_config
202
+ max_num_pages_per_req = cdiv(vllm_config.model_config.max_model_len,
203
+ self.kv_cache_spec.block_size)
204
+ max_num_reqs = vllm_config.scheduler_config.max_num_seqs
205
+ max_num_pages = max_num_reqs * max_num_pages_per_req
206
+ self.enable_cuda_graph = self.compilation_config.cudagraph_mode.\
207
+ decode_mode() == CUDAGraphMode.FULL
208
+ if self.enable_cuda_graph:
209
+ # For full cudagraph capture, one `decode_wrapper` for each batch
210
+ # size is needed for FlashInfer.
211
+ self._decode_wrappers_cudagraph: dict[
212
+ int, BatchDecodeWithPagedKVCacheWrapper] = {}
213
+ self._decode_cudagraph_max_bs = min(
214
+ max_num_reqs, self.compilation_config.max_capture_size)
215
+
216
+ self._cascade_wrapper = None # Wrapper for cascade attention
217
+
218
+ # Global hyperparameters shared by all attention layers
219
+ # TODO: discard this for trtllm-gen backend
220
+ self.global_hyperparameters = infer_global_hyperparameters(
221
+ get_per_layer_parameters(vllm_config, layer_names, FlashInferImpl))
222
+
223
+ # Preparing persistent buffers (device-side)
224
+ self.paged_kv_indptr = torch.zeros(max_num_reqs + 1,
225
+ dtype=torch.int32,
226
+ device=self.device)
227
+ self.paged_kv_indices = torch.zeros(
228
+ max_num_pages, # max num pages possible
229
+ dtype=torch.int32,
230
+ device=self.device)
231
+ self.paged_kv_last_page_len = torch.zeros(max_num_reqs,
232
+ dtype=torch.int32,
233
+ device=self.device)
234
+ # host-side buffer
235
+ pin_memory = is_pin_memory_available()
236
+ self.paged_kv_indptr_cpu = torch.zeros(max_num_reqs + 1,
237
+ dtype=torch.int32,
238
+ device="cpu",
239
+ pin_memory=pin_memory)
240
+ self.paged_kv_indices_cpu = torch.zeros(max_num_pages,
241
+ dtype=torch.int32,
242
+ device="cpu",
243
+ pin_memory=pin_memory)
244
+ self.paged_kv_last_page_len_cpu = torch.zeros(max_num_reqs,
245
+ dtype=torch.int32,
246
+ device="cpu",
247
+ pin_memory=pin_memory)
248
+
249
+ self.block_table_arange = torch.arange(max_num_pages_per_req,
250
+ dtype=torch.int32,
251
+ device=self.device)
252
+
253
+ def _get_workspace_buffer(self):
254
+ if self._workspace_buffer is None:
255
+ self._workspace_buffer = torch.zeros(
256
+ FLASHINFER_WORKSPACE_BUFFER_SIZE,
257
+ dtype=torch.uint8,
258
+ device=self.device)
259
+ return self._workspace_buffer
260
+
261
+ def _get_prefill_wrapper(self):
262
+ if self._prefill_wrapper is None:
263
+ self._prefill_wrapper = BatchPrefillWithPagedKVCacheWrapper(
264
+ self._get_workspace_buffer(), get_kv_cache_layout())
265
+ return self._prefill_wrapper
266
+
267
+ def _get_decode_wrapper(self,
268
+ batch_size: int,
269
+ use_cudagraph: bool = False):
270
+ if use_cudagraph:
271
+ decode_wrapper = self._decode_wrappers_cudagraph.get(
272
+ batch_size, None)
273
+ else:
274
+ decode_wrapper = self._decode_wrapper
275
+
276
+ if decode_wrapper is None:
277
+ num_qo_heads = (
278
+ self.vllm_config.model_config.get_num_attention_heads(
279
+ self.vllm_config.parallel_config))
280
+ num_kv_heads = self.vllm_config.model_config.get_num_kv_heads(
281
+ self.vllm_config.parallel_config)
282
+ use_tensor_cores = envs.VLLM_FLASHINFER_FORCE_TENSOR_CORES or (
283
+ num_qo_heads // num_kv_heads > 4)
284
+
285
+ if use_cudagraph:
286
+ paged_kv_indptr = self.paged_kv_indptr[:batch_size + 1]
287
+ paged_kv_indices = self.paged_kv_indices
288
+ paged_kv_last_page_len = self.paged_kv_last_page_len[:
289
+ batch_size]
290
+ else:
291
+ paged_kv_indptr = None
292
+ paged_kv_indices = None
293
+ paged_kv_last_page_len = None
294
+ decode_wrapper = BatchDecodeWithPagedKVCacheWrapper(
295
+ self._get_workspace_buffer(),
296
+ get_kv_cache_layout(),
297
+ use_cuda_graph=use_cudagraph,
298
+ paged_kv_indptr_buffer=paged_kv_indptr,
299
+ paged_kv_indices_buffer=paged_kv_indices,
300
+ paged_kv_last_page_len_buffer=paged_kv_last_page_len,
301
+ use_tensor_cores=use_tensor_cores)
302
+
303
+ # save the decode wrapper
304
+ if use_cudagraph:
305
+ self._decode_wrappers_cudagraph[batch_size] = decode_wrapper
306
+ else:
307
+ self._decode_wrapper = decode_wrapper
308
+
309
+ return decode_wrapper
310
+
311
+ def _get_cascade_wrapper(self):
312
+ if self._cascade_wrapper is None:
313
+ self._cascade_wrapper = MultiLevelCascadeAttentionWrapper(
314
+ 2, self._get_workspace_buffer(), get_kv_cache_layout())
315
+ return self._cascade_wrapper
316
+
317
+ def _plan(self, attn_metadata: FlashInferMetadata):
318
+ if attn_metadata.use_cascade:
319
+ attn_metadata.cascade_wrapper = self._get_cascade_wrapper()
320
+ attn_metadata.cascade_wrapper.plan(
321
+ [
322
+ attn_metadata.shared_qo_indptr_cpu,
323
+ attn_metadata.qo_indptr_cpu
324
+ ],
325
+ [
326
+ attn_metadata.shared_kv_page_indptr_cpu,
327
+ attn_metadata.paged_kv_indptr_cpu
328
+ ],
329
+ [
330
+ attn_metadata.shared_kv_page_indices_cpu,
331
+ attn_metadata.paged_kv_indices
332
+ ],
333
+ [
334
+ attn_metadata.shared_kv_last_page_len_cpu,
335
+ attn_metadata.paged_kv_last_page_len_cpu
336
+ ],
337
+ attn_metadata.num_qo_heads,
338
+ attn_metadata.num_kv_heads,
339
+ attn_metadata.head_dim,
340
+ attn_metadata.page_size,
341
+ causal=True,
342
+ sm_scale=self.global_hyperparameters.sm_scale,
343
+ window_left=self.global_hyperparameters.window_left,
344
+ logits_soft_cap=self.global_hyperparameters.logits_soft_cap,
345
+ q_data_type=attn_metadata.q_data_type,
346
+ kv_data_type=attn_metadata.kv_data_type,
347
+ )
348
+ else:
349
+ # Regular attention (common case).
350
+ # Decodes are at the front and prefills are at the back,
351
+ # according to reorder_batch()
352
+ num_prefills = attn_metadata.num_prefills
353
+ num_decodes = attn_metadata.num_decodes
354
+ if num_prefills > 0:
355
+ # Decodes are first so prefills start after the last decode
356
+ prefill_start = num_decodes
357
+ attn_metadata.prefill_wrapper = self._get_prefill_wrapper()
358
+ assert attn_metadata.qo_indptr_cpu[prefill_start:].shape[
359
+ 0] == num_prefills + 1
360
+ assert attn_metadata.paged_kv_indptr_cpu[prefill_start:].shape[
361
+ 0] == num_prefills + 1
362
+ assert attn_metadata.paged_kv_last_page_len_cpu[
363
+ prefill_start:].shape[0] == num_prefills
364
+ # Since prefill_wrapper.run() will be called with
365
+ # query[num_decode_tokens:] we need to adjust the qo_indptr
366
+ # to be relative to the start of the prefill queries.
367
+ qo_indptr_cpu = attn_metadata.qo_indptr_cpu[
368
+ prefill_start:] - attn_metadata.qo_indptr_cpu[prefill_start]
369
+ paged_kv_indptr_cpu = attn_metadata.paged_kv_indptr_cpu[
370
+ prefill_start:]
371
+ if not attn_metadata.prefill_use_trtllm:
372
+ attn_metadata.prefill_wrapper.plan(
373
+ qo_indptr_cpu,
374
+ paged_kv_indptr_cpu,
375
+ attn_metadata.paged_kv_indices,
376
+ attn_metadata.
377
+ paged_kv_last_page_len_cpu[prefill_start:],
378
+ attn_metadata.num_qo_heads,
379
+ attn_metadata.num_kv_heads,
380
+ attn_metadata.head_dim,
381
+ attn_metadata.page_size,
382
+ causal=True,
383
+ sm_scale=self.global_hyperparameters.sm_scale,
384
+ window_left=self.global_hyperparameters.window_left,
385
+ logits_soft_cap=self.global_hyperparameters.
386
+ logits_soft_cap,
387
+ q_data_type=attn_metadata.q_data_type,
388
+ kv_data_type=attn_metadata.kv_data_type,
389
+ )
390
+ else:
391
+ attn_metadata.qo_indptr_gpu = qo_indptr_cpu.to(self.device)
392
+ attn_metadata.paged_kv_indptr_gpu = paged_kv_indptr_cpu.to(
393
+ self.device)
394
+
395
+ if num_decodes > 0:
396
+ pure_decode = num_prefills == 0
397
+ # possible required padding for cudagraph replay
398
+ use_cudagraph = (self.enable_cuda_graph and pure_decode and
399
+ num_decodes <= self._decode_cudagraph_max_bs)
400
+ if use_cudagraph:
401
+ num_input_tokens = (
402
+ self.vllm_config.pad_for_cudagraph(num_decodes))
403
+ # Carefully fulfill the padding region with reasonable value
404
+ # on cpu.
405
+ # Make sure paged_kv_indptr_cpu is not decreasing
406
+ self.paged_kv_indptr_cpu[1 + num_decodes:1 +
407
+ num_input_tokens].fill_(
408
+ attn_metadata.
409
+ paged_kv_indptr_cpu[-1])
410
+ # Fill the remaining paged_kv_last_page_len_cpu with 1.
411
+ # This is because flashinfer treats 0 as a full page
412
+ # instead of empty.
413
+ self.paged_kv_last_page_len_cpu[
414
+ num_decodes:num_input_tokens].fill_(1)
415
+
416
+ else:
417
+ num_input_tokens = num_decodes
418
+
419
+ attn_metadata.decode_wrapper = self._get_decode_wrapper(
420
+ num_input_tokens, use_cudagraph)
421
+ if not attn_metadata.decode_use_trtllm:
422
+ # Use the persistent buffer with padding length,
423
+ # instead of the same address but chunked version
424
+ # in atten_metadata when using cudagraph.
425
+ fast_plan_decode(
426
+ attn_metadata.decode_wrapper,
427
+ self.paged_kv_indptr_cpu[:num_input_tokens + 1],
428
+ attn_metadata.paged_kv_indices,
429
+ self.paged_kv_last_page_len_cpu[:num_input_tokens],
430
+ attn_metadata.num_qo_heads,
431
+ attn_metadata.num_kv_heads,
432
+ attn_metadata.head_dim,
433
+ attn_metadata.page_size,
434
+ # Disable flashinfer's pos encoding and use vllm's rope.
435
+ pos_encoding_mode="NONE",
436
+ sm_scale=self.global_hyperparameters.sm_scale,
437
+ window_left=self.global_hyperparameters.window_left,
438
+ logits_soft_cap=self.global_hyperparameters.
439
+ logits_soft_cap,
440
+ q_data_type=attn_metadata.q_data_type,
441
+ kv_data_type=attn_metadata.kv_data_type,
442
+ )
443
+
444
+ def build(self,
445
+ common_prefix_len: int,
446
+ common_attn_metadata: CommonAttentionMetadata,
447
+ fast_build: bool = False) -> FlashInferMetadata:
448
+ num_reqs = common_attn_metadata.num_reqs
449
+ num_actual_tokens = common_attn_metadata.num_actual_tokens
450
+ num_decodes, num_prefills, num_decode_tokens, num_prefill_tokens =\
451
+ split_decodes_and_prefills(common_attn_metadata)
452
+
453
+ page_size = self.kv_cache_spec.block_size
454
+ max_q_len = common_attn_metadata.max_query_len
455
+ max_seq_len = common_attn_metadata.seq_lens_cpu.max()
456
+ seq_lens = common_attn_metadata.seq_lens
457
+ seq_lens_cpu = common_attn_metadata.seq_lens_cpu
458
+ block_table_tensor = common_attn_metadata.block_table_tensor
459
+
460
+ block_table_bounds_cpu = (seq_lens_cpu + page_size - 1) // page_size
461
+
462
+ use_cascade = common_prefix_len > 0
463
+ if use_cascade:
464
+ # Grab the blocks of the shared prefix from the first request.
465
+ assert common_prefix_len % page_size == 0
466
+ num_common_kv_blocks = common_prefix_len // page_size
467
+
468
+ # Create CPU versions directly for cascade (no GPU versions needed)
469
+ shared_qo_indptr_cpu = torch.tensor([0, num_actual_tokens],
470
+ dtype=torch.int32,
471
+ device='cpu')
472
+ shared_kv_page_indptr_cpu = torch.tensor([0, num_common_kv_blocks],
473
+ dtype=torch.int32,
474
+ device='cpu')
475
+ shared_kv_page_indices_cpu = block_table_tensor[
476
+ 0, :num_common_kv_blocks]
477
+ shared_kv_last_page_len_cpu = torch.tensor([page_size],
478
+ dtype=torch.int32,
479
+ device='cpu')
480
+
481
+ # Remove the blocks of the shared prefix from all requests.
482
+ block_table_tensor = block_table_tensor[:, num_common_kv_blocks:]
483
+ block_table_bounds_cpu -= num_common_kv_blocks
484
+ else:
485
+ shared_qo_indptr_cpu = None
486
+ shared_kv_page_indptr_cpu = None
487
+ shared_kv_page_indices_cpu = None
488
+ shared_kv_last_page_len_cpu = None
489
+
490
+ max_num_blocks = block_table_bounds_cpu.max()
491
+ block_table_bounds = block_table_bounds_cpu.to(self.device,
492
+ non_blocking=True)
493
+ mask = (self.block_table_arange[:max_num_blocks].unsqueeze(0)
494
+ < block_table_bounds.unsqueeze(1))
495
+ # write self.paged_kv_indices inplace
496
+ num_actual_pages = torch.sum(mask)
497
+ paged_kv_indices = self.paged_kv_indices[:num_actual_pages]
498
+ torch.masked_select(block_table_tensor[:, :max_num_blocks],
499
+ mask,
500
+ out=paged_kv_indices)
501
+
502
+ # write self.paged_kv_indptr_cpu inplace (0-index is always 0)
503
+ torch.cumsum(block_table_bounds_cpu,
504
+ dim=0,
505
+ dtype=torch.int32,
506
+ out=self.paged_kv_indptr_cpu[1:1 + num_reqs])
507
+
508
+ paged_kv_last_page_len_cpu = seq_lens_cpu % page_size
509
+ # write self.paged_kv_last_page_len_cpu inplace
510
+ torch.where(paged_kv_last_page_len_cpu == 0,
511
+ torch.tensor(page_size),
512
+ paged_kv_last_page_len_cpu,
513
+ out=self.paged_kv_last_page_len_cpu[:num_reqs])
514
+
515
+ cache_dtype = self.cache_config.cache_dtype
516
+ if cache_dtype.startswith("fp8"):
517
+ kv_cache_dtype = FlashInferBackend.get_fp8_dtype_for_flashinfer(
518
+ cache_dtype)
519
+ else:
520
+ kv_cache_dtype = self.kv_cache_spec.dtype
521
+
522
+ num_qo_heads = self.vllm_config.model_config.get_num_attention_heads(
523
+ self.vllm_config.parallel_config)
524
+ num_kv_heads = self.kv_cache_spec.num_kv_heads
525
+ head_dim = self.kv_cache_spec.head_size
526
+
527
+ # Check if any layer uses sinks (requires TRTLLM attention)
528
+ has_sinks = self.global_hyperparameters.has_sinks
529
+
530
+ # currently prefill trtllm attention does not support fp8 kv cache
531
+ prefill_use_trtllm = not cache_dtype.startswith("fp8") \
532
+ and use_trtllm_attention(
533
+ num_prefill_tokens, max_seq_len, cache_dtype,
534
+ num_qo_heads, num_kv_heads, head_dim, has_sinks)
535
+ decode_use_trtllm = use_trtllm_attention(
536
+ num_decode_tokens, max_seq_len, cache_dtype,
537
+ num_qo_heads, num_kv_heads, head_dim, has_sinks)
538
+
539
+ attn_metadata = FlashInferMetadata(
540
+ num_actual_tokens=num_actual_tokens,
541
+ qo_indptr_cpu=common_attn_metadata.query_start_loc_cpu,
542
+ paged_kv_indptr_cpu=self.paged_kv_indptr_cpu[:1 + num_reqs],
543
+ paged_kv_indices=paged_kv_indices,
544
+ paged_kv_last_page_len_cpu=self.
545
+ paged_kv_last_page_len_cpu[:num_reqs],
546
+ num_qo_heads=num_qo_heads,
547
+ num_kv_heads=num_kv_heads,
548
+ head_dim=head_dim,
549
+ page_size=page_size,
550
+ kv_data_type=kv_cache_dtype,
551
+ q_data_type=self.vllm_config.model_config.dtype,
552
+ slot_mapping=common_attn_metadata.slot_mapping,
553
+ max_q_len=max_q_len,
554
+ max_seq_len=max_seq_len,
555
+ seq_lens=seq_lens,
556
+ block_table_tensor=block_table_tensor,
557
+ prefill_use_trtllm=prefill_use_trtllm,
558
+ decode_use_trtllm=decode_use_trtllm,
559
+ num_decodes=num_decodes,
560
+ num_decode_tokens=num_decode_tokens,
561
+ num_prefills=num_prefills,
562
+ num_prefill_tokens=num_prefill_tokens,
563
+ use_cascade=use_cascade,
564
+ shared_qo_indptr_cpu=shared_qo_indptr_cpu,
565
+ shared_kv_page_indptr_cpu=shared_kv_page_indptr_cpu,
566
+ shared_kv_page_indices_cpu=shared_kv_page_indices_cpu,
567
+ shared_kv_last_page_len_cpu=shared_kv_last_page_len_cpu,
568
+ )
569
+
570
+ self._plan(attn_metadata)
571
+
572
+ return attn_metadata
573
+
574
+ def build_for_cudagraph_capture(
575
+ self, common_attn_metadata: CommonAttentionMetadata):
576
+ """
577
+ This method builds the metadata for full cudagraph capture.
578
+ Currently, only decode is supported for full cudagraphs with FlashInfer.
579
+ """
580
+ m = common_attn_metadata
581
+
582
+ assert m.num_reqs == m.num_actual_tokens, \
583
+ "FlashInfer only supports decode-only full CUDAGraph capture. " \
584
+ "Make sure all cudagraph capture sizes <= max_num_seq."
585
+
586
+ m.max_query_len = 1 # decode-only
587
+
588
+ return self.build(0, m)
589
+
590
+ def use_cascade_attention(self, *args, **kwargs) -> bool:
591
+ if self.kv_cache_spec.dtype != self.vllm_config.model_config.dtype:
592
+ # TODO: The cascade wrapper currently does not support setting
593
+ # kv cache dtype to something different from query dtype.
594
+ return False
595
+ return use_cascade_attention(*args, **kwargs)
596
+
597
+
598
+ class FlashInferImpl(AttentionImpl):
599
+
600
+ def __init__(
601
+ self,
602
+ num_heads: int,
603
+ head_size: int,
604
+ scale: float,
605
+ num_kv_heads: int,
606
+ alibi_slopes: Optional[list[float]],
607
+ sliding_window: Optional[int],
608
+ kv_cache_dtype: str,
609
+ logits_soft_cap: Optional[float] = None,
610
+ attn_type: AttentionType = AttentionType.DECODER,
611
+ kv_sharing_target_layer_name: Optional[int] = None,
612
+ sinks: Optional[torch.Tensor] = None,
613
+ ) -> None:
614
+ self.num_heads = num_heads
615
+ self.head_size = head_size
616
+ self.scale = float(scale)
617
+ self.num_kv_heads = num_kv_heads
618
+ if alibi_slopes is not None:
619
+ alibi_slopes = torch.tensor(alibi_slopes, dtype=torch.float32)
620
+ self.alibi_slopes = alibi_slopes
621
+ if sliding_window is None:
622
+ self.sliding_window = (-1, -1)
623
+ else:
624
+ self.sliding_window = (sliding_window - 1, 0)
625
+ self.kv_cache_dtype = kv_cache_dtype
626
+ self.logits_soft_cap = logits_soft_cap
627
+ self.kv_sharing_target_layer_name = kv_sharing_target_layer_name
628
+
629
+ self.num_queries_per_kv = self.num_heads // self.num_kv_heads
630
+
631
+ if attn_type != AttentionType.DECODER:
632
+ raise NotImplementedError("Encoder self-attention and "
633
+ "encoder/decoder cross-attention "
634
+ "are not implemented for "
635
+ "FlashInferImpl")
636
+
637
+ self.sinks: Optional[torch.Tensor] = None
638
+ if sinks is not None:
639
+ if sinks.shape[0] != num_heads:
640
+ raise ValueError(
641
+ "Sinks must have the same number of heads as the number of "
642
+ f"heads in the layer. Expected {num_heads}, but got "
643
+ f"{sinks.shape[0]}."
644
+ )
645
+ self.sinks = sinks
646
+
647
+ def forward(
648
+ self,
649
+ layer: torch.nn.Module,
650
+ query: torch.Tensor,
651
+ key: torch.Tensor,
652
+ value: torch.Tensor,
653
+ kv_cache: torch.Tensor,
654
+ attn_metadata: FlashInferMetadata,
655
+ output: Optional[torch.Tensor] = None,
656
+ output_scale: Optional[torch.Tensor] = None,
657
+ ) -> torch.Tensor:
658
+ """Forward pass with FlashInfer.
659
+
660
+ Args:
661
+ query: shape = [num_tokens, num_heads, head_size]
662
+ key: shape = [num_tokens, num_kv_heads, head_size]
663
+ value: shape = [num_tokens, num_kv_heads, head_size]
664
+ kv_cache: shape -
665
+ # NHD: [num_blocks, 2, block_size, num_kv_heads, head_size]
666
+ # HND: [num_blocks, 2, num_kv_heads, block_size, head_size]
667
+
668
+
669
+ attn_metadata: Metadata for attention.
670
+ Returns:
671
+ shape = [num_tokens, num_heads * head_size]
672
+ """
673
+ assert output is not None, "Output tensor must be provided."
674
+
675
+ if output_scale is not None:
676
+ raise NotImplementedError(
677
+ "fused output quantization is not yet supported"
678
+ " for FlashInferImpl")
679
+
680
+ if attn_metadata is None:
681
+ # Profiling run.
682
+ return output
683
+
684
+ # IMPORTANT!
685
+ # NOTE(woosuk): With piece-wise CUDA graphs, this method is executed in
686
+ # eager-mode PyTorch. Thus, we need to be careful about any CPU overhead
687
+ # in this method. For example, `view` and `slice` (or `[:n]`) operations
688
+ # are surprisingly slow even in the case they do not invoke any GPU ops.
689
+ # Minimize the PyTorch ops in this method as much as possible.
690
+ # Whenever making a change in this method, please benchmark the
691
+ # performance to make sure it does not introduce any overhead.
692
+
693
+ num_actual_tokens = attn_metadata.num_actual_tokens
694
+
695
+ if self.kv_sharing_target_layer_name is None:
696
+ # Reshape the input keys and values and store them in the cache.
697
+ # Skip this if sharing KV cache with an earlier attention layer.
698
+ # NOTE(woosuk): Here, key and value are padded while slot_mapping is
699
+ # not padded. However, we don't need to do key[:num_actual_tokens]
700
+ # and value[:num_actual_tokens] because the reshape_and_cache_flash
701
+ # op uses the slot_mapping's shape to determine the number of
702
+ # actual tokens.
703
+ torch.ops._C_cache_ops.reshape_and_cache_flash(
704
+ key,
705
+ value,
706
+ kv_cache[:, 0],
707
+ kv_cache[:, 1],
708
+ attn_metadata.slot_mapping,
709
+ self.kv_cache_dtype,
710
+ layer._k_scale,
711
+ layer._v_scale,
712
+ )
713
+
714
+ # The FlashInfer api requires data to be in fp8_e4m3 or fp8_e5m2
715
+ # to process the cache when the kv_cache_dtype is fp8
716
+ if self.kv_cache_dtype.startswith("fp8"):
717
+ torch_dtype = FlashInferBackend.get_fp8_dtype_for_flashinfer(
718
+ self.kv_cache_dtype)
719
+ kv_cache = kv_cache.view(torch_dtype)
720
+
721
+ window_left = (self.sliding_window[0]
722
+ if self.sliding_window is not None else -1)
723
+
724
+ # Inputs and outputs may be padded for CUDA graphs
725
+ query = query[:num_actual_tokens]
726
+ output_padded = output
727
+ output = output[:num_actual_tokens]
728
+
729
+ if attn_metadata.use_cascade:
730
+ # Cascade attention (rare case).
731
+ assert attn_metadata.cascade_wrapper is not None
732
+ output.copy_(attn_metadata.cascade_wrapper.run(query, kv_cache))
733
+ return output
734
+
735
+ num_decode_tokens = attn_metadata.num_decode_tokens
736
+ num_prefill_tokens = attn_metadata.num_prefill_tokens
737
+
738
+ stride_order = FlashInferBackend.get_kv_cache_stride_order()
739
+ kv_cache_permute = kv_cache.permute(*stride_order)
740
+ # Regular attention (common case).
741
+ # Decodes are at the front and prefills are at the back,
742
+ # according to reorder_batch()
743
+ if num_prefill_tokens > 0:
744
+ prefill_wrapper = attn_metadata.prefill_wrapper
745
+ prefill_query = query[num_decode_tokens:]
746
+ assert prefill_query.shape[0] == num_prefill_tokens
747
+ assert prefill_wrapper is not None
748
+
749
+ if not attn_metadata.prefill_use_trtllm:
750
+ assert prefill_wrapper._causal
751
+ assert prefill_wrapper._window_left == window_left
752
+ assert prefill_wrapper._logits_soft_cap == (
753
+ self.logits_soft_cap or 0.0)
754
+ assert prefill_wrapper._sm_scale == self.scale
755
+ prefill_wrapper.run(
756
+ prefill_query,
757
+ kv_cache_permute,
758
+ k_scale=layer._k_scale_float,
759
+ v_scale=layer._v_scale_float,
760
+ out=output[num_decode_tokens:],
761
+ )
762
+ else:
763
+ # prefill_query may be non-contiguous
764
+ prefill_query = prefill_query.contiguous()
765
+ workspace_buffer = prefill_wrapper._float_workspace_buffer
766
+ block_tables_prefill = attn_metadata.block_table_tensor[
767
+ num_decode_tokens:]
768
+ seq_lens_prefill = attn_metadata.seq_lens[num_decode_tokens:]
769
+
770
+ # This path needs to be enabled with VLLM_KV_CACHE_LAYOUT = HND
771
+ assert get_kv_cache_layout() == "HND"
772
+ assert prefill_query.is_contiguous()
773
+ assert kv_cache_permute.is_contiguous()
774
+ assert workspace_buffer.is_contiguous()
775
+ assert block_tables_prefill.is_contiguous()
776
+ assert seq_lens_prefill.is_contiguous()
777
+
778
+ trtllm_batch_context_with_kv_cache(
779
+ query=prefill_query,
780
+ kv_cache=kv_cache_permute,
781
+ workspace_buffer=workspace_buffer,
782
+ block_tables=block_tables_prefill,
783
+ seq_lens=seq_lens_prefill,
784
+ max_q_len=attn_metadata.max_q_len,
785
+ max_kv_len=attn_metadata.max_seq_len,
786
+ bmm1_scale=layer._k_scale_float * self.scale,
787
+ bmm2_scale=layer._v_scale_float,
788
+ batch_size=attn_metadata.num_prefills,
789
+ cum_seq_lens_q=attn_metadata.qo_indptr_gpu,
790
+ cum_seq_lens_kv=attn_metadata.paged_kv_indptr_gpu,
791
+ window_left=window_left,
792
+ sinks=self.sinks,
793
+ out=output[num_decode_tokens:],
794
+ )
795
+
796
+ if num_decode_tokens > 0:
797
+ decode_wrapper = attn_metadata.decode_wrapper
798
+ decode_query = query[:num_decode_tokens]
799
+ assert decode_query.shape[0] == num_decode_tokens
800
+ assert decode_wrapper is not None
801
+
802
+ if not attn_metadata.decode_use_trtllm:
803
+ assert decode_wrapper._window_left == window_left
804
+ assert decode_wrapper._logits_soft_cap == (self.logits_soft_cap
805
+ or 0.0)
806
+ assert decode_wrapper._sm_scale == self.scale
807
+ decode_wrapper.run(
808
+ decode_query,
809
+ kv_cache_permute,
810
+ k_scale=layer._k_scale_float,
811
+ v_scale=layer._v_scale_float,
812
+ out=output[:num_decode_tokens],
813
+ )
814
+ else:
815
+ # decode_query may be non-contiguous
816
+ decode_query = decode_query.contiguous()
817
+ workspace_buffer = decode_wrapper._float_workspace_buffer
818
+ block_tables_decode = attn_metadata.block_table_tensor[:
819
+ num_decode_tokens]
820
+ seq_lens_decode = attn_metadata.seq_lens[:num_decode_tokens]
821
+
822
+ # This path needs to be enabled with VLLM_KV_CACHE_LAYOUT = HND
823
+ assert get_kv_cache_layout() == "HND"
824
+ assert decode_query.is_contiguous()
825
+ assert kv_cache_permute.is_contiguous()
826
+ assert workspace_buffer.is_contiguous()
827
+ assert block_tables_decode.is_contiguous()
828
+ assert seq_lens_decode.is_contiguous()
829
+
830
+ trtllm_batch_decode_with_kv_cache(
831
+ query=decode_query,
832
+ kv_cache=kv_cache_permute,
833
+ workspace_buffer=workspace_buffer,
834
+ block_tables=block_tables_decode,
835
+ seq_lens=seq_lens_decode,
836
+ max_seq_len=attn_metadata.max_seq_len,
837
+ bmm1_scale=layer._k_scale_float * self.scale,
838
+ bmm2_scale=layer._v_scale_float,
839
+ window_left=window_left,
840
+ sinks=self.sinks,
841
+ out=output[:num_decode_tokens],
842
+ )
843
+ return output_padded
844
+
845
+
846
+ def fast_plan_decode(
847
+ self, # decode wrapper
848
+ indptr_cpu: torch.Tensor,
849
+ indices: torch.Tensor,
850
+ last_page_len_cpu: torch.Tensor,
851
+ num_qo_heads: int,
852
+ num_kv_heads: int,
853
+ head_dim: int,
854
+ page_size: int,
855
+ pos_encoding_mode: str = "NONE",
856
+ window_left: int = -1,
857
+ logits_soft_cap: Optional[float] = None,
858
+ q_data_type: Optional[Union[str, torch.dtype]] = "float16",
859
+ kv_data_type: Optional[Union[str, torch.dtype]] = None,
860
+ data_type: Optional[Union[str, torch.dtype]] = None,
861
+ sm_scale: Optional[float] = None,
862
+ rope_scale: Optional[float] = None,
863
+ rope_theta: Optional[float] = None,
864
+ non_blocking: bool = True,
865
+ ) -> None:
866
+ """
867
+ A faster version of BatchDecodeWithPagedKVCacheWrapper::plan used for
868
+ cudagraph capture/replay, while the no cudagraph version turns back
869
+ to the original plan.
870
+ using original plan after passing host-side buffers:
871
+ - only host-to-device copy of indptr and last_page_len buffers
872
+ Modifications for cudagraph:
873
+ - only host-to-device copy of indptr and last_page_len buffers.
874
+ - avoid device-to-device copy of indices buffer.
875
+
876
+ Part of the code get inspiration from the original plan from FlashInfer repo
877
+ and the implementation of fast_decode_plan for FlashInfer in SGlang repo.
878
+ """
879
+ # Warm up with the original plan if it is first call, and always run the
880
+ # original plan if we run for dynamic shape. For fixed shape (cudagraph),
881
+ # this warm up is to generate the _cached_module for the decode wrapper.
882
+ if not self.is_cuda_graph_enabled or \
883
+ getattr(self, "vllm_first_call", True):
884
+ self.plan(
885
+ indptr_cpu,
886
+ indices,
887
+ last_page_len_cpu,
888
+ num_qo_heads,
889
+ num_kv_heads,
890
+ head_dim,
891
+ page_size,
892
+ pos_encoding_mode,
893
+ window_left,
894
+ logits_soft_cap,
895
+ q_data_type,
896
+ kv_data_type,
897
+ data_type,
898
+ sm_scale,
899
+ rope_scale,
900
+ rope_theta,
901
+ non_blocking,
902
+ )
903
+ self.vllm_first_call = False
904
+ return
905
+
906
+ assert self.is_cuda_graph_enabled, "Should be cudagraph only here"
907
+
908
+ batch_size = len(last_page_len_cpu)
909
+ if logits_soft_cap is None:
910
+ logits_soft_cap = 0.0
911
+
912
+ # Handle data types consistently
913
+ if data_type is not None:
914
+ if q_data_type is None:
915
+ q_data_type = data_type
916
+ if kv_data_type is None:
917
+ kv_data_type = data_type
918
+ elif q_data_type is None:
919
+ q_data_type = "float16"
920
+
921
+ if kv_data_type is None:
922
+ kv_data_type = q_data_type
923
+ q_data_type = getattr(torch, q_data_type) if isinstance(
924
+ q_data_type, str) else q_data_type
925
+ kv_data_type = getattr(torch, kv_data_type) if isinstance(
926
+ kv_data_type, str) else kv_data_type
927
+
928
+ if self.use_tensor_cores:
929
+ qo_indptr_host = _get_range_buf(batch_size + 1, "cpu")
930
+
931
+ if batch_size != self._fixed_batch_size:
932
+ raise ValueError(
933
+ "The batch size should be fixed in cudagraph mode, the runtime "
934
+ "batch size {} mismatches the batch size set during "
935
+ "initialization {}".format(batch_size, self._fixed_batch_size))
936
+ if len(indices) > len(self._paged_kv_indices_buf):
937
+ raise ValueError(
938
+ "The size of indices should be less than or equal to the "
939
+ "allocated buffer")
940
+
941
+ # host-to-device copy for the indptr buffer
942
+ self._paged_kv_indptr_buf.copy_(indptr_cpu, non_blocking=True)
943
+ # host-to-device copy for the last_page_len buffer
944
+ self._paged_kv_last_page_len_buf.copy_(last_page_len_cpu,
945
+ non_blocking=True)
946
+
947
+ indptr_host = indptr_cpu
948
+ last_page_len_host = last_page_len_cpu
949
+
950
+ if self.use_tensor_cores:
951
+ kv_lens_arr_host = get_seq_lens(indptr_host, last_page_len_host,
952
+ page_size)
953
+
954
+ try:
955
+ # Make sure we pass exactly 15 arguments for tensor core version
956
+ self._plan_info = self._cached_module.plan(
957
+ self._float_workspace_buffer,
958
+ self._int_workspace_buffer,
959
+ self._pin_memory_int_workspace_buffer,
960
+ qo_indptr_host,
961
+ indptr_host,
962
+ kv_lens_arr_host,
963
+ batch_size, # total_num_rows
964
+ batch_size,
965
+ num_qo_heads,
966
+ num_kv_heads,
967
+ page_size,
968
+ self.is_cuda_graph_enabled,
969
+ head_dim,
970
+ head_dim,
971
+ False, # causal
972
+ )
973
+ except Exception as e:
974
+ raise RuntimeError(f"Error in tensor core plan: {e}") from e
975
+ else:
976
+ try:
977
+ # Make sure we pass exactly 15 arguments for standard version
978
+ self._plan_info = self._cached_module.plan(
979
+ self._float_workspace_buffer,
980
+ self._int_workspace_buffer,
981
+ self._pin_memory_int_workspace_buffer,
982
+ indptr_host,
983
+ batch_size,
984
+ num_qo_heads,
985
+ num_kv_heads,
986
+ page_size,
987
+ self.is_cuda_graph_enabled,
988
+ window_left,
989
+ logits_soft_cap,
990
+ head_dim,
991
+ head_dim,
992
+ torch.empty(0, dtype=q_data_type),
993
+ torch.empty(0, dtype=kv_data_type),
994
+ )
995
+ except Exception as e:
996
+ raise RuntimeError(f"Error in standard plan: {e}") from e
997
+
998
+ self._pos_encoding_mode = pos_encoding_mode
999
+ self._window_left = window_left
1000
+ self._logits_soft_cap = logits_soft_cap
1001
+ self._sm_scale = sm_scale
1002
+ self._rope_scale = rope_scale
1003
+ self._rope_theta = rope_theta
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/flex_attention.py ADDED
@@ -0,0 +1,526 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ """Attention layer with FlashAttention."""
4
+ from collections import defaultdict
5
+ from dataclasses import dataclass
6
+ from typing import Optional
7
+
8
+ import torch
9
+ from torch.nn.attention.flex_attention import (BlockMask, _mask_mod_signature,
10
+ _score_mod_signature,
11
+ create_block_mask,
12
+ flex_attention)
13
+
14
+ from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl,
15
+ AttentionMetadata, AttentionType,
16
+ is_quantized_kv_cache)
17
+ from vllm.config import VllmConfig
18
+ from vllm.logger import init_logger
19
+ from vllm.platforms import current_platform
20
+ from vllm.v1.attention.backends.utils import (AttentionMetadataBuilder,
21
+ CommonAttentionMetadata)
22
+ from vllm.v1.kv_cache_interface import AttentionSpec
23
+
24
+ logger = init_logger(__name__)
25
+
26
+ create_block_mask_compiled = torch.compile(create_block_mask,
27
+ fullgraph=True,
28
+ mode="reduce-overhead")
29
+ flex_attention_compiled = torch.compile(flex_attention, fullgraph=True)
30
+
31
+
32
+ def _offsets_to_doc_ids_tensor(offsets: torch.Tensor) -> torch.Tensor:
33
+ device = offsets.device
34
+ counts = offsets[1:] - offsets[:-1]
35
+ return torch.repeat_interleave(
36
+ torch.arange(len(counts), device=device, dtype=torch.int32), counts)
37
+
38
+
39
+ class FlexAttentionBackend(AttentionBackend):
40
+ accept_output_buffer: bool = True
41
+
42
+ @classmethod
43
+ def get_supported_dtypes(cls) -> list[torch.dtype]:
44
+ return [torch.float16, torch.bfloat16, torch.float32]
45
+
46
+ @classmethod
47
+ def validate_head_size(cls, head_size: int) -> None:
48
+ return # FlexAttention supports any head size
49
+
50
+ @staticmethod
51
+ def get_name() -> str:
52
+ return "FLEX_ATTENTION"
53
+
54
+ @staticmethod
55
+ def get_impl_cls() -> type["FlexAttentionImpl"]:
56
+ return FlexAttentionImpl
57
+
58
+ @staticmethod
59
+ def get_metadata_cls() -> type["AttentionMetadata"]:
60
+ return FlexAttentionMetadata
61
+
62
+ @staticmethod
63
+ def get_kv_cache_shape(
64
+ num_blocks: int,
65
+ block_size: int,
66
+ num_kv_heads: int,
67
+ head_size: int,
68
+ ) -> tuple[int, ...]:
69
+ return (2, num_blocks, block_size, num_kv_heads, head_size)
70
+
71
+ @staticmethod
72
+ def get_builder_cls() -> type["FlexAttentionMetadataBuilder"]:
73
+ return FlexAttentionMetadataBuilder
74
+
75
+ @staticmethod
76
+ def use_cascade_attention(*args, **kwargs) -> bool:
77
+ return False
78
+
79
+
80
+ # @torch.compile(fullgraph=True, mode="reduce-overhead")
81
+ def physical_to_logical_mapping(
82
+ block_table: torch.Tensor,
83
+ total_blocks: Optional[int] = None) -> torch.Tensor:
84
+ """
85
+ Creates an inverse mapping from physical block locations to logical indices.
86
+
87
+ The original block_table maps from logical blocks to physical locations:
88
+
89
+ Logical to Physical (Original block_table):
90
+ ┌───────────────────────────────────────────┐
91
+ │ Request 0: │
92
+ │ │
93
+ │ Logical Blocks: 0 1 2 3 4 5 6 7 │
94
+ │ │ │ │ │ │ │ │ │ │
95
+ │ v v v v v v v v │
96
+ │ Physical Blocks: 3 5 1 7 4 2 0 6 │
97
+ └───────────────────────────────────────────┘
98
+
99
+ This function creates the inverse mapping:
100
+
101
+ Physical to Logical (Inverse mapping):
102
+ ┌───────────────────────────────────────────┐
103
+ │ Request 0: │
104
+ │ │
105
+ │ Physical Blocks: 0 1 2 3 4 5 6 7 │
106
+ │ │ │ │ │ │ │ │ │ │
107
+ │ v v v v v v v v │
108
+ │ Logical Blocks: 6 2 5 0 4 1 7 3 │
109
+ └───────────────────────────────────────────┘
110
+
111
+ If multiple logical blocks map to the same physical block,
112
+ this function returns the first (minimum) logical block index.
113
+
114
+ If a physical block is not mapped to by any logical block,
115
+ its value in the result will be -1.
116
+
117
+
118
+ Args:
119
+ block_table: Tensor of shape [max_reqs, max_num_blocks]
120
+ mapping logical blocks to physical locations
121
+
122
+ Returns:
123
+ A tensor of shape [max_reqs, max_physical_block]
124
+ """
125
+ max_reqs, max_num_blocks = block_table.shape
126
+ device = block_table.device
127
+
128
+ physical_to_logical = torch.full((max_reqs, total_blocks),
129
+ -1,
130
+ dtype=torch.long,
131
+ device=device)
132
+
133
+ logical_indices = (torch.arange(max_num_blocks,
134
+ device=device).unsqueeze(0).expand(
135
+ max_reqs, -1))
136
+
137
+ physical_to_logical.scatter_(-1, block_table.to(torch.int64),
138
+ logical_indices)
139
+ # TODO Confirm - Seems like block 0 is always empty so we reset it manually
140
+ physical_to_logical[:, 0] = -1
141
+ return physical_to_logical
142
+
143
+
144
+ def causal_mask_mod(b: torch.Tensor, h: torch.Tensor, q_idx: torch.Tensor,
145
+ kv_idx: torch.Tensor):
146
+ return q_idx >= kv_idx
147
+
148
+
149
+ @dataclass
150
+ class FlexAttentionMetadata:
151
+ causal: bool
152
+ num_actual_tokens: int # Number of tokens excluding padding.
153
+ max_query_len: int
154
+ query_start_loc: torch.Tensor
155
+ max_seq_len: int
156
+ seq_lens: torch.Tensor
157
+ block_table: torch.Tensor
158
+ slot_mapping: torch.Tensor
159
+
160
+ use_cascade: bool
161
+ common_prefix_len: int
162
+ cu_prefix_query_lens: Optional[torch.Tensor]
163
+ prefix_kv_lens: Optional[torch.Tensor]
164
+ suffix_kv_lens: Optional[torch.Tensor]
165
+
166
+ # Block info
167
+ total_cache_tokens: int
168
+ block_size: int
169
+ max_possible_sequence_length: int
170
+ num_reqs: int
171
+ physical_to_logical: torch.Tensor
172
+ decode_offset: torch.Tensor
173
+
174
+ # For logging.
175
+ num_input_tokens: int = 0 # Number of tokens including padding.
176
+
177
+ # Flex Metadata
178
+ num_blocks = 0
179
+ block_mask: Optional[BlockMask] = None
180
+ score_mod: Optional[_score_mod_signature] = None
181
+ logical_mask_mod: _mask_mod_signature = causal_mask_mod
182
+
183
+ def get_causal_mask_mod(self) -> _mask_mod_signature:
184
+ """Creates the mask_mod function for FlexAttention.
185
+
186
+ This function creates the combined mask mod function that handles:
187
+ 1. The paged attention block mapping
188
+ 2. The mapping from packed query sequences to logical query entries
189
+
190
+ It also by defaults adds the decoding offset to the query indices.
191
+ With this info we create the "logical" indices that are passed to
192
+ mask_mod functions. This allows mask mod functions to be agnostic to
193
+ layout of the query and key/value tensors.
194
+
195
+ TODO is_within_lower_bound: do sequences start on block_boundaries?
196
+ """
197
+ # Create a lookup mapping from query indices -> request number
198
+ request_lookup = _offsets_to_doc_ids_tensor(self.query_start_loc)
199
+
200
+ def final_mask_mod(
201
+ b: torch.Tensor,
202
+ h: torch.Tensor,
203
+ q_idx: torch.Tensor,
204
+ physical_kv_idx: torch.Tensor,
205
+ ) -> torch.Tensor:
206
+ # Map query indices to corresponding request indices
207
+ q_req = request_lookup[q_idx]
208
+
209
+ # Convert physical KV indices to logical indices
210
+ physical_kv_block = physical_kv_idx // self.block_size
211
+ physical_kv_offset = physical_kv_idx % self.block_size
212
+ logical_block_idx = self.physical_to_logical[q_req,
213
+ physical_kv_block]
214
+ logical_kv_idx = logical_block_idx * self.block_size + physical_kv_offset # noqa: E501
215
+
216
+ # Determine valid kv indices
217
+ live_block = logical_block_idx >= 0
218
+ within_upper_bound = logical_kv_idx < self.seq_lens[q_req]
219
+ within_lower_bound = logical_kv_idx >= 0
220
+
221
+ is_valid = live_block & within_upper_bound & within_lower_bound
222
+
223
+ # Convert physical query indices to logical indices
224
+ local_q_idx = q_idx - self.query_start_loc[q_req]
225
+ logical_q_idx = local_q_idx + self.decode_offset[q_req]
226
+
227
+ # Apply mask modification only for valid indices
228
+ return torch.where(
229
+ is_valid,
230
+ self.logical_mask_mod(b, h, logical_q_idx, logical_kv_idx),
231
+ False,
232
+ )
233
+
234
+ return final_mask_mod
235
+
236
+ def get_bidirectional_mask_mod(self) -> _mask_mod_signature:
237
+ """Creates the encoder mask_mod function for FlexAttention.
238
+
239
+ Since the encoder bidirectional attention doesn't run with
240
+ KV cache, this function creates a mask based on the
241
+ packed query sequences.
242
+ """
243
+ # Create a lookup mapping from query indices -> request number
244
+ request_lookup = _offsets_to_doc_ids_tensor(self.query_start_loc)
245
+
246
+ def final_mask_mod(
247
+ b: torch.Tensor,
248
+ h: torch.Tensor,
249
+ q_idx: torch.Tensor,
250
+ kv_idx: torch.Tensor,
251
+ ) -> torch.Tensor:
252
+ return request_lookup[q_idx] == request_lookup[kv_idx]
253
+
254
+ return final_mask_mod
255
+
256
+ def build_block_mask(self) -> BlockMask:
257
+ if self.causal:
258
+ mask_mod = self.get_causal_mask_mod()
259
+ kv_len = self.total_cache_tokens
260
+ else:
261
+ mask_mod = self.get_bidirectional_mask_mod()
262
+ kv_len = self.num_actual_tokens
263
+ return create_block_mask_compiled(
264
+ mask_mod,
265
+ None,
266
+ None,
267
+ self.num_actual_tokens,
268
+ kv_len,
269
+ device=self.block_table.device,
270
+ )
271
+
272
+ def __post_init__(self):
273
+ assert self.use_cascade is False, "Not implemented yet."
274
+ assert self.common_prefix_len == 0, "Not implemented yet."
275
+ assert self.cu_prefix_query_lens is None, "Not implemented yet."
276
+ assert self.prefix_kv_lens is None, "Not implemented yet."
277
+ assert self.suffix_kv_lens is None, "Not implemented yet."
278
+ self.num_blocks = self.total_cache_tokens // self.block_size
279
+ self.block_mask = self.build_block_mask()
280
+
281
+
282
+ class FlexAttentionMetadataBuilder(
283
+ AttentionMetadataBuilder[FlexAttentionMetadata]):
284
+
285
+ def __init__(self, kv_cache_spec: AttentionSpec, layer_names: list[str],
286
+ vllm_config: VllmConfig, device: torch.device):
287
+ self.model_config = vllm_config.model_config
288
+ self.parallel_config = vllm_config.parallel_config
289
+ self.cache_config = vllm_config.cache_config
290
+
291
+ self.num_heads_q = self.model_config.get_num_attention_heads(
292
+ vllm_config.parallel_config)
293
+ self.num_heads_kv = self.model_config.get_num_kv_heads(
294
+ vllm_config.parallel_config)
295
+ self.headdim = self.model_config.get_head_size()
296
+ self.block_size = kv_cache_spec.block_size
297
+ self.kv_cache_spec = kv_cache_spec
298
+ self.device = device
299
+
300
+ def build(self,
301
+ common_prefix_len: int,
302
+ common_attn_metadata: CommonAttentionMetadata,
303
+ fast_build: bool = False) -> FlexAttentionMetadata:
304
+ num_reqs = common_attn_metadata.num_reqs
305
+ num_actual_tokens = common_attn_metadata.num_actual_tokens
306
+ max_query_len = common_attn_metadata.max_query_len
307
+
308
+ max_seq_len = int(common_attn_metadata.seq_lens_cpu.max())
309
+ query_start_loc = common_attn_metadata.query_start_loc
310
+ seq_lens = common_attn_metadata.seq_lens
311
+ block_table_tensor = common_attn_metadata.block_table_tensor
312
+ slot_mapping = common_attn_metadata.slot_mapping
313
+
314
+ use_cascade = common_prefix_len > 0
315
+ cu_prefix_query_lens = None
316
+ prefix_kv_lens = None
317
+ suffix_kv_lens = None
318
+ if use_cascade:
319
+ raise NotImplementedError("Not yet my friend")
320
+
321
+ block_size = self.kv_cache_spec.block_size
322
+ max_possible_seq_len = self.model_config.max_model_len
323
+ total_cache_tokens = self.cache_config.num_gpu_blocks * block_size
324
+
325
+ inverse_block_table = physical_to_logical_mapping(
326
+ block_table_tensor, self.cache_config.num_gpu_blocks)
327
+
328
+ # Get the original offset tensor
329
+ offset_tensor = common_attn_metadata.num_computed_tokens_cpu.to(
330
+ self.device, non_blocking=True)
331
+
332
+ out = FlexAttentionMetadata(
333
+ causal=common_attn_metadata.causal,
334
+ num_actual_tokens=num_actual_tokens,
335
+ max_query_len=max_query_len,
336
+ query_start_loc=query_start_loc,
337
+ max_seq_len=max_seq_len,
338
+ seq_lens=seq_lens,
339
+ block_table=block_table_tensor,
340
+ slot_mapping=slot_mapping,
341
+ use_cascade=use_cascade,
342
+ common_prefix_len=common_prefix_len,
343
+ cu_prefix_query_lens=cu_prefix_query_lens,
344
+ prefix_kv_lens=prefix_kv_lens,
345
+ suffix_kv_lens=suffix_kv_lens,
346
+ block_size=block_size,
347
+ max_possible_sequence_length=max_possible_seq_len,
348
+ num_reqs=num_reqs,
349
+ physical_to_logical=inverse_block_table,
350
+ total_cache_tokens=total_cache_tokens,
351
+ decode_offset=offset_tensor,
352
+ )
353
+ return out
354
+
355
+
356
+ class FlexAttentionImpl(AttentionImpl):
357
+ sliding_window: Optional[tuple[int, int]]
358
+ alibi_slopes: Optional[torch.Tensor]
359
+ logits_soft_cap: Optional[float]
360
+
361
+ def __init__(
362
+ self,
363
+ num_heads: int,
364
+ head_size: int,
365
+ scale: float,
366
+ num_kv_heads: int,
367
+ alibi_slopes: Optional[list[float]],
368
+ sliding_window: Optional[int],
369
+ kv_cache_dtype: str,
370
+ logits_soft_cap: Optional[float] = None,
371
+ attn_type: AttentionType = AttentionType.DECODER,
372
+ kv_sharing_target_layer_name: Optional[str] = None,
373
+ ) -> None:
374
+ self.num_heads = num_heads
375
+ self.head_size = head_size
376
+ self.scale = float(scale)
377
+ self.num_kv_heads = num_kv_heads
378
+ self.attn_type = attn_type
379
+
380
+ if attn_type not in (AttentionType.ENCODER_ONLY,
381
+ AttentionType.DECODER):
382
+ raise NotImplementedError(
383
+ f"FlexAttention does not support {attn_type} attention")
384
+
385
+ if alibi_slopes is not None:
386
+ raise NotImplementedError(
387
+ "FlexAttention does not support alibi slopes yet.")
388
+ else:
389
+ self.alibi_slopes = None
390
+ if sliding_window is not None:
391
+ raise NotImplementedError(
392
+ "FlexAttention does not support sliding window yet.")
393
+ else:
394
+ self.sliding_window = (-1, -1)
395
+ self.kv_cache_dtype = kv_cache_dtype
396
+ self.logits_soft_cap = logits_soft_cap
397
+ if self.logits_soft_cap is not None:
398
+ raise NotImplementedError(
399
+ "FlexAttention does not support logits soft cap yet.")
400
+
401
+ self.num_queries_per_kv = self.num_heads // self.num_kv_heads
402
+
403
+ if kv_sharing_target_layer_name is not None:
404
+ raise NotImplementedError(
405
+ "FlexAttention does not support kv sharing yet.")
406
+
407
+ FlexAttentionBackend.validate_head_size(head_size)
408
+
409
+ if is_quantized_kv_cache(self.kv_cache_dtype):
410
+ raise NotImplementedError(
411
+ "FlexAttention does not support quantized kv-cache. Yet")
412
+
413
+ @staticmethod
414
+ def view_as_4d(tensor: torch.Tensor) -> torch.Tensor:
415
+ """View a 3d tensor as 4D."""
416
+ if tensor.ndim == 4:
417
+ return tensor
418
+ assert tensor.ndim == 3
419
+ return tensor[None, :, :, :]
420
+
421
+ def forward(
422
+ self,
423
+ layer: torch.nn.Module,
424
+ query: torch.Tensor,
425
+ key: torch.Tensor,
426
+ value: torch.Tensor,
427
+ kv_cache: torch.Tensor,
428
+ attn_metadata: FlexAttentionMetadata,
429
+ output: Optional[torch.Tensor] = None,
430
+ output_scale: Optional[torch.Tensor] = None,
431
+ ) -> torch.Tensor:
432
+ """Forward pass with FLexAttention.
433
+
434
+ Args:
435
+ query: shape = [num_tokens, num_heads, head_size]
436
+ key: shape = [num_tokens, num_kv_heads, head_size]
437
+ value: shape = [num_tokens, num_kv_heads, head_size]
438
+ kv_cache = [2, num_blocks, block_size, num_kv_heads, head_size]
439
+ attn_metadata: Metadata for attention.
440
+ Returns:
441
+ shape = [num_tokens, num_heads * head_size]
442
+ """
443
+ assert output is not None, "Output tensor must be provided."
444
+ if output_scale is not None:
445
+ raise NotImplementedError(
446
+ "fused output quantization is not yet supported"
447
+ " for FlexAttentionImpl")
448
+
449
+ enable_gqa = self.num_kv_heads != self.num_heads
450
+
451
+ if attn_metadata is None:
452
+ # Profiling run.
453
+ return output
454
+ # query = self.view_as_4d(query).permute(0, 2, 1, 3)
455
+ # return torch.empty_like(query)
456
+
457
+ num_actual_tokens = attn_metadata.num_actual_tokens
458
+
459
+ if not attn_metadata.causal:
460
+ assert self.attn_type == AttentionType.ENCODER_ONLY
461
+
462
+ query, key_tensor, value_tensor = map(
463
+ lambda x: self.view_as_4d(x).permute(0, 2, 1, 3),
464
+ (query, key, value),
465
+ )
466
+
467
+ else:
468
+ assert self.attn_type == AttentionType.DECODER
469
+ key_cache, value_cache = kv_cache.unbind(0)
470
+
471
+ torch.ops._C_cache_ops.reshape_and_cache_flash(
472
+ key,
473
+ value,
474
+ key_cache,
475
+ value_cache,
476
+ attn_metadata.slot_mapping,
477
+ self.kv_cache_dtype,
478
+ layer._k_scale,
479
+ layer._v_scale,
480
+ )
481
+
482
+ # View out the block_size dim
483
+ key_cache = key_cache.view(-1, self.num_kv_heads, self.head_size)
484
+ value_cache = value_cache.view(-1, self.num_kv_heads,
485
+ self.head_size)
486
+ query, key_tensor, value_tensor = map(
487
+ lambda x: self.view_as_4d(x).permute(0, 2, 1, 3),
488
+ (query, key_cache, value_cache),
489
+ )
490
+
491
+ query = query[:, :, :num_actual_tokens, :]
492
+ # Doesn't work for now -> constraint violation
493
+ # torch._dynamo.try_mark_dynamic(query, 2)
494
+
495
+ # default M=64, N=64 may run out of shared memory on some GPUs
496
+ # TODO: Explicit configs for each GPU?
497
+ # Not sure how to calculate the shared memory requirement
498
+ extra_kernel_options = defaultdict[str, int](lambda: 64)
499
+ if query.dtype == torch.float32:
500
+ extra_kernel_options["BLOCK_M"] //= 2
501
+ extra_kernel_options["BLOCK_N"] //= 2
502
+ if current_platform.is_cuda():
503
+ device_props = torch.cuda.get_device_properties()
504
+ max_shared_memory = device_props.shared_memory_per_block_optin
505
+ if max_shared_memory < 144 * 1024:
506
+ extra_kernel_options["BLOCK_M"] //= 2
507
+ extra_kernel_options["BLOCK_N"] //= 2
508
+
509
+ out = flex_attention_compiled(
510
+ query,
511
+ key_tensor,
512
+ value_tensor,
513
+ attn_metadata.score_mod,
514
+ attn_metadata.block_mask,
515
+ self.scale,
516
+ enable_gqa=enable_gqa,
517
+ kernel_options={
518
+ "FORCE_USE_FLEX_ATTENTION": True,
519
+ **extra_kernel_options
520
+ },
521
+ )
522
+
523
+ # Flex doesn't have an out variant today, rely on epilogue fusion
524
+ out = out.permute(0, 2, 1, 3).squeeze(0)
525
+ output[:num_actual_tokens, :, :].copy_(out)
526
+ return output
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/linear_attn.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ from dataclasses import dataclass
4
+ from typing import ClassVar
5
+
6
+ import torch
7
+
8
+ from vllm.attention.backends.abstract import AttentionBackend
9
+ from vllm.config import VllmConfig
10
+ from vllm.v1.attention.backends.utils import (AttentionMetadataBuilder,
11
+ CommonAttentionMetadata,
12
+ split_decodes_and_prefills)
13
+ from vllm.v1.kv_cache_interface import AttentionSpec, MambaSpec
14
+
15
+
16
+ class LinearAttentionBackend(AttentionBackend):
17
+
18
+ @staticmethod
19
+ def get_builder_cls() -> type["LinearAttentionMetadataBuilder"]:
20
+ return LinearAttentionMetadataBuilder
21
+
22
+
23
+ @dataclass
24
+ class LinearAttentionMetadata:
25
+ num_prefills: int
26
+ num_prefill_tokens: int
27
+ num_decodes: int
28
+ num_decode_tokens: int
29
+ query_start_loc: torch.Tensor
30
+ seq_lens: torch.Tensor
31
+
32
+ state_indices_tensor: torch.Tensor # shape: [batch,]
33
+
34
+
35
+ class LinearAttentionMetadataBuilder(
36
+ AttentionMetadataBuilder[LinearAttentionMetadata]):
37
+
38
+ reorder_batch_threshold: ClassVar[int] = 1
39
+
40
+ def __init__(self, kv_cache_spec: AttentionSpec, layer_names: list[str],
41
+ vllm_config: VllmConfig, device: torch.device):
42
+ assert isinstance(kv_cache_spec, MambaSpec)
43
+ self.kv_cache_spec = kv_cache_spec
44
+
45
+ def build(self,
46
+ common_prefix_len: int,
47
+ common_attn_metadata: CommonAttentionMetadata,
48
+ fast_build: bool = False) -> LinearAttentionMetadata:
49
+ query_start_loc = common_attn_metadata.query_start_loc
50
+ seq_lens = common_attn_metadata.seq_lens
51
+
52
+ state_indices_tensor = common_attn_metadata.block_table_tensor[:, 0]
53
+
54
+ num_decodes, num_prefills, num_decode_tokens, num_prefill_tokens = (
55
+ split_decodes_and_prefills(common_attn_metadata,
56
+ decode_threshold=1))
57
+
58
+ attn_metadata = LinearAttentionMetadata(
59
+ num_prefills=num_prefills,
60
+ num_prefill_tokens=num_prefill_tokens,
61
+ num_decodes=num_decodes,
62
+ num_decode_tokens=num_decode_tokens,
63
+ query_start_loc=query_start_loc,
64
+ seq_lens=seq_lens,
65
+ state_indices_tensor=state_indices_tensor,
66
+ )
67
+ return attn_metadata
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/mamba1_attn.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+
4
+ from dataclasses import dataclass
5
+ from typing import ClassVar, Optional
6
+
7
+ import torch
8
+
9
+ from vllm.attention.backends.abstract import AttentionBackend
10
+ from vllm.config import VllmConfig
11
+ from vllm.v1.attention.backends.utils import (AttentionMetadataBuilder,
12
+ CommonAttentionMetadata,
13
+ split_decodes_and_prefills)
14
+ from vllm.v1.kv_cache_interface import AttentionSpec, MambaSpec
15
+
16
+
17
+ class Mamba1AttentionBackend(AttentionBackend):
18
+
19
+ @staticmethod
20
+ def get_builder_cls() -> type["Mamba1AttentionMetadataBuilder"]:
21
+ return Mamba1AttentionMetadataBuilder
22
+
23
+
24
+ @dataclass
25
+ class Mamba1AttentionMetadata:
26
+ query_start_loc: torch.Tensor
27
+ context_lens_tensor: torch.Tensor
28
+ state_indices_tensor: torch.Tensor
29
+ has_initial_states: Optional[torch.Tensor]
30
+ num_prefills: int
31
+ num_prefill_tokens: int
32
+ num_decodes: int
33
+ num_decode_tokens: int
34
+
35
+
36
+ class Mamba1AttentionMetadataBuilder(
37
+ AttentionMetadataBuilder[Mamba1AttentionMetadata]):
38
+ reorder_batch_threshold: ClassVar[int] = 1
39
+
40
+ def __init__(
41
+ self,
42
+ kv_cache_spec: AttentionSpec,
43
+ vllm_config: VllmConfig,
44
+ device: torch.device,
45
+ layer_names: list[str],
46
+ ):
47
+ assert isinstance(kv_cache_spec, MambaSpec)
48
+ self.kv_cache_spec = kv_cache_spec
49
+ self.device = device
50
+ self.vllm_config = vllm_config
51
+ self.layer_names = layer_names
52
+
53
+ def build(
54
+ self,
55
+ common_prefix_len: int,
56
+ common_attn_metadata: CommonAttentionMetadata,
57
+ fast_build: bool = False,
58
+ ) -> Mamba1AttentionMetadata:
59
+ query_start_loc = common_attn_metadata.query_start_loc
60
+
61
+ state_indices_tensor = common_attn_metadata.block_table_tensor[:, 0]
62
+ context_lens_tensor = common_attn_metadata.num_computed_tokens_cpu.to(
63
+ query_start_loc.device)
64
+
65
+ num_decodes, num_prefills, num_decode_tokens, num_prefill_tokens = (
66
+ split_decodes_and_prefills(common_attn_metadata,
67
+ decode_threshold=1))
68
+
69
+ has_initial_states = None
70
+
71
+ if num_prefills > 0:
72
+ has_initial_states = context_lens_tensor > 0
73
+
74
+ return Mamba1AttentionMetadata(
75
+ query_start_loc=query_start_loc,
76
+ context_lens_tensor=context_lens_tensor,
77
+ has_initial_states=has_initial_states,
78
+ state_indices_tensor=state_indices_tensor,
79
+ num_prefills=num_prefills,
80
+ num_prefill_tokens=num_prefill_tokens,
81
+ num_decodes=num_decodes,
82
+ num_decode_tokens=num_decode_tokens,
83
+ )
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/mamba2_attn.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ import math
4
+ from dataclasses import dataclass
5
+ from typing import ClassVar, Optional
6
+
7
+ import torch
8
+
9
+ from vllm.attention.backends.abstract import AttentionBackend
10
+ from vllm.attention.backends.utils import PAD_SLOT_ID
11
+ from vllm.config import VllmConfig
12
+ from vllm.v1.attention.backends.utils import (AttentionCGSupport,
13
+ AttentionMetadataBuilder,
14
+ CommonAttentionMetadata,
15
+ split_decodes_and_prefills)
16
+ from vllm.v1.kv_cache_interface import AttentionSpec, MambaSpec
17
+
18
+
19
+ def _query_start_loc_to_chunk_indices_offsets(query_start_loc: torch.Tensor,
20
+ chunk_size: int,
21
+ total_seqlens: int):
22
+
23
+ cu_seqlens = query_start_loc[1:] # remove prepended 0
24
+
25
+ # outputs will have length expansion of chunks that do not divide
26
+ # chunk_size
27
+ N = math.ceil(total_seqlens / chunk_size) + (cu_seqlens[:-1] % chunk_size
28
+ > 0).sum()
29
+ chunk_indices = torch.arange(N,
30
+ dtype=torch.int,
31
+ device=query_start_loc.device)
32
+ chunk_offsets = torch.zeros((N, ),
33
+ dtype=torch.int,
34
+ device=query_start_loc.device)
35
+
36
+ p = 0 # num of insertions
37
+ for s, e in zip(cu_seqlens[:-1], cu_seqlens[1:]):
38
+
39
+ # if does not divide chunk_size, then there is one chunk insertion
40
+ p += (s % chunk_size > 0)
41
+
42
+ # get the dimensions
43
+ # - the + 1 for _e is to shift the boundary by one chunk
44
+ # - this shifting is not needed if chunk_size divides e
45
+ _s, _e = s // chunk_size + p, e // chunk_size + p + (e % chunk_size
46
+ > 0)
47
+
48
+ # adjust indices and offsets
49
+ chunk_indices[_s:_e] -= p
50
+ chunk_offsets[_s] = s % chunk_size
51
+
52
+ return chunk_indices, chunk_offsets
53
+
54
+
55
+ class Mamba2AttentionBackend(AttentionBackend):
56
+
57
+ @staticmethod
58
+ def get_builder_cls() -> type["Mamba2AttentionMetadataBuilder"]:
59
+ return Mamba2AttentionMetadataBuilder
60
+
61
+
62
+ @dataclass
63
+ class Mamba2AttentionMetadata:
64
+ num_prefills: int
65
+ num_prefill_tokens: int
66
+ num_decodes: int
67
+ num_decode_tokens: int
68
+ query_start_loc: torch.Tensor
69
+ seq_lens: torch.Tensor
70
+
71
+ prep_initial_states: bool
72
+ chunk_size: int
73
+
74
+ # The following tensors only contain prefill requests and will be None if
75
+ # the batch has no prefill request.
76
+ has_initial_states_p: Optional[torch.Tensor]
77
+ seq_idx_p: Optional[torch.Tensor]
78
+ chunk_indices_p: Optional[torch.Tensor]
79
+ chunk_offsets_p: Optional[torch.Tensor]
80
+
81
+ state_indices_tensor: torch.Tensor # shape: [batch,]
82
+
83
+ # The following attributes are for triton implementation of causal_conv1d
84
+ nums_dict: Optional[dict] = None
85
+ cu_seqlen: Optional[int] = None
86
+ batch_ptr: Optional[torch.tensor] = None
87
+ token_chunk_offset_ptr: Optional[torch.tensor] = None
88
+
89
+
90
+ class Mamba2AttentionMetadataBuilder(
91
+ AttentionMetadataBuilder[Mamba2AttentionMetadata]):
92
+ cudagraph_support: ClassVar[AttentionCGSupport] = \
93
+ AttentionCGSupport.UNIFORM_SINGLE_TOKEN_DECODE
94
+
95
+ reorder_batch_threshold: ClassVar[int] = 1
96
+
97
+ def __init__(self, kv_cache_spec: AttentionSpec, layer_names: list[str],
98
+ vllm_config: VllmConfig, device: torch.device):
99
+ assert isinstance(kv_cache_spec, MambaSpec)
100
+ self.kv_cache_spec = kv_cache_spec
101
+ self.chunk_size = vllm_config.model_config.get_mamba_chunk_size()
102
+ self.vllm_config = vllm_config
103
+ self.compilation_config = vllm_config.compilation_config
104
+ assert self.chunk_size is not None, (
105
+ "chunk_size needs to be set in the model config for Mamba2 models")
106
+ self.decode_cudagraph_max_bs = min(
107
+ self.vllm_config.scheduler_config.max_num_seqs,
108
+ self.compilation_config.max_capture_size)
109
+ self.state_indices_tensor = torch.empty(
110
+ (self.decode_cudagraph_max_bs, ),
111
+ dtype=torch.int32,
112
+ device=device,
113
+ )
114
+
115
+ def build(self,
116
+ common_prefix_len: int,
117
+ common_attn_metadata: CommonAttentionMetadata,
118
+ fast_build: bool = False) -> Mamba2AttentionMetadata:
119
+ num_reqs = common_attn_metadata.num_reqs
120
+ query_start_loc = common_attn_metadata.query_start_loc
121
+ seq_lens = common_attn_metadata.seq_lens
122
+
123
+ seq_idx_p = None
124
+ chunk_indices_p, chunk_offsets_p = None, None
125
+ # Need flags to indicate if there are initial states
126
+ # currently we really only support the FlashAttention backend
127
+ has_initial_states_p = None
128
+ prep_initial_states = False
129
+
130
+ state_indices_tensor = common_attn_metadata.block_table_tensor[:, 0]
131
+
132
+ num_decodes, num_prefills, num_decode_tokens, num_prefill_tokens = (
133
+ split_decodes_and_prefills(common_attn_metadata,
134
+ decode_threshold=1))
135
+
136
+ # Compute seq_idx, chunk_indices and chunk_offsets for prefill only
137
+ if num_prefills > 0:
138
+ #[batch,]
139
+ has_initial_states_cpu = (
140
+ common_attn_metadata.
141
+ num_computed_tokens_cpu[num_reqs - num_prefills:num_reqs] > 0)
142
+ prep_initial_states = torch.any(has_initial_states_cpu).item()
143
+ has_initial_states_p = has_initial_states_cpu.to(
144
+ query_start_loc.device)
145
+
146
+ query_start_loc_p = common_attn_metadata.query_start_loc[
147
+ -num_prefills - 1:] - num_decode_tokens
148
+
149
+ seq_idx_p = torch.repeat_interleave(torch.arange(
150
+ num_prefills,
151
+ dtype=torch.int32,
152
+ device=query_start_loc_p.device),
153
+ query_start_loc_p.diff(),
154
+ output_size=num_prefill_tokens)
155
+ seq_idx_p.unsqueeze_(0)
156
+
157
+ # We compute metadata for chunked prefill once at the top level
158
+ # model forward and reuse them in mamba layers. If not needed,
159
+ # they will be ignored inside mamba kernels.
160
+ if prep_initial_states:
161
+ chunk_indices_p, chunk_offsets_p = (
162
+ _query_start_loc_to_chunk_indices_offsets(
163
+ query_start_loc_p, self.chunk_size,
164
+ num_prefill_tokens))
165
+
166
+ elif num_decodes <= self.decode_cudagraph_max_bs:
167
+ # Pad state tensor for CUDA graph
168
+ num_input_tokens = self.vllm_config.pad_for_cudagraph(num_decodes)
169
+ self.state_indices_tensor[:num_decodes].copy_(state_indices_tensor,
170
+ non_blocking=True)
171
+ state_indices_tensor = self.state_indices_tensor[:num_input_tokens]
172
+ state_indices_tensor[num_decodes:] = PAD_SLOT_ID
173
+
174
+ attn_metadata = Mamba2AttentionMetadata(
175
+ num_prefills=num_prefills,
176
+ num_prefill_tokens=num_prefill_tokens,
177
+ num_decodes=num_decodes,
178
+ num_decode_tokens=num_decode_tokens,
179
+ query_start_loc=query_start_loc,
180
+ seq_lens=seq_lens,
181
+ prep_initial_states=prep_initial_states,
182
+ chunk_size=self.chunk_size,
183
+ has_initial_states_p=has_initial_states_p,
184
+ seq_idx_p=seq_idx_p,
185
+ chunk_indices_p=chunk_indices_p,
186
+ chunk_offsets_p=chunk_offsets_p,
187
+ state_indices_tensor=state_indices_tensor,
188
+ )
189
+ return attn_metadata
190
+
191
+ def build_for_cudagraph_capture(
192
+ self, common_attn_metadata: CommonAttentionMetadata):
193
+ """
194
+ This method builds the metadata for full cudagraph capture.
195
+ Currently, only decode is supported for full cudagraphs with Mamba.
196
+ """
197
+ m = common_attn_metadata
198
+
199
+ assert m.num_reqs == m.num_actual_tokens, \
200
+ "Mamba only supports decode-only full CUDAGraph capture. " \
201
+ "Make sure all cudagraph capture sizes <= max_num_seq."
202
+
203
+ m.max_query_len = 1 # decode-only
204
+
205
+ return self.build(0, m)
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/mamba_selectors.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ from vllm.attention.backends.abstract import AttentionBackend
4
+ from vllm.v1.attention.backends.linear_attn import LinearAttentionBackend
5
+ from vllm.v1.attention.backends.mamba1_attn import Mamba1AttentionBackend
6
+ from vllm.v1.attention.backends.mamba2_attn import Mamba2AttentionBackend
7
+
8
+
9
+ def get_mamba_attn_backend(mamba_type: str) -> type[AttentionBackend]:
10
+ if mamba_type == "mamba1":
11
+ return Mamba1AttentionBackend
12
+ if mamba_type == "mamba2":
13
+ return Mamba2AttentionBackend
14
+ if mamba_type == "linear_attention":
15
+ return LinearAttentionBackend
16
+
17
+ raise NotImplementedError(f"Mamba Attention type {mamba_type} is not "
18
+ "supported yet.")
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/mla/__init__.py ADDED
File without changes
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/mla/common.py ADDED
@@ -0,0 +1,1206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ """
4
+ # MLA Common Components
5
+
6
+ This file implements common components for MLA implementations.
7
+
8
+ First we define:
9
+
10
+ Sq as Q sequence length
11
+ Skv as KV sequence length
12
+
13
+ MLA has two possible ways of computing, a data-movement friendly approach and a
14
+ compute friendly approach, we generally want to use the compute friendly
15
+ approach for "prefill" (i.e. the ratio Sq / Skv is "small", is near 1)
16
+ and the data-movement friendly approach for "decode" (i.e. the ratio
17
+ Sq / Skv is "large").
18
+
19
+ NOTE what we deem small and large is currently determined by if its labelled
20
+ prefill or decode by the scheduler, but this is something we should probably
21
+ tune.
22
+
23
+ Main reference: DeepseekV2 paper, and FlashInfer Implementation
24
+ (https://arxiv.org/abs/2405.04434 and https://github.com/flashinfer-ai/flashinfer/pull/551).
25
+
26
+ Deepseek's MLA attention works the following way:
27
+ * Use a single latent vector to represent the per-token entry of the KV cache.
28
+ * For decode (i.e. the memory friendly approach) the attention "simulates" a
29
+ multi-head attention, while the compute is similar to multi-query attention.
30
+
31
+ Below is example of both paths assuming batchsize = 1
32
+
33
+ ## More Extent Definitions:
34
+
35
+ C Context length, `Skv - Sq`
36
+ H hidden size
37
+ N number of attention heads
38
+ Lq latent dimension for Q 1536 in DSV3
39
+ Lkv latent dimension for K/V 512 in DSV3
40
+ P nope dimension, no rope. 128 in DSV3
41
+ R rope dimension, goes through rope. 64 in DSV3
42
+ V V head dim. 128 in DSV3
43
+
44
+ ## Vector/Matrix Definitions
45
+
46
+ h_t hidden states (input to attention) shape [Sq, H]
47
+ q_c latent/compressed Q shape [Sq, Lq]
48
+ q_nope uncompressed Q (no-rope) shape [Sq, N, P]
49
+ q_pe uncompressed Q (rope) shape [Sq, N, R]
50
+ kv_c latent/compressed KV shape [Skv, Lkv]
51
+ k_pe decoupled k position embeddings shape [Skv, R]
52
+ new_kv_c new kv_c from current iter shape [Sq, Lkv]
53
+ new_k_pe new k_pe from current iter shape [Sq, R]
54
+ cache_kv_c cached k_c from previous iters shape [C, Lkv]
55
+ cache_k_pe cached k_pe from previous iters shape [C, R]
56
+ W_DQ project h_t to q_c shape [H, Lq]
57
+ W_UQ project q_c to q_nope shape [Lq, N * P]
58
+ W_QR project q_c to q_pe shape [Lq, N * R]
59
+ W_DKV project h_t to kv_c shape [H, Lkv]
60
+ W_UK project kv_c to k_nope shape [Lkv, N, P]
61
+ W_KR project h_t to k_pe shape [H, R]
62
+ W_UV project kv_c to v shape [Lkv, N, V]
63
+ W_O project v to h_t shape [N * V, H]
64
+
65
+
66
+ ## Compute Friendly Approach (i.e. "_forward_prefill"):
67
+
68
+ q_c = h_t @ W_DQ
69
+ q_nope = (q_c @ W_UQ).view(Sq, N, P)
70
+ q_pe = RoPE(q_c @ W_QR).view(Sq, N, R)
71
+ new_kv_c = h_t @ W_DKV
72
+ new_k_pe = RoPE(h_t @ W_KR)
73
+ kv_c = torch.cat([new_kv_c, cache_kv_c], dim=0)
74
+ k_pe = torch.cat([new_k_pe, cache_k_pe], dim=0)
75
+ k_nope = (kv_c @ W_UK.view(Lkv, N * P)).view(Skv, N, P)
76
+ v = (kv_c @ W_UV.view(Lkv, N * V)).view(Skv, N, V)
77
+
78
+ // MHA with QK headdim = P + R
79
+ // V headdim = V
80
+ // spda_o shape [Sq, N, V]
81
+ spda_o = scaled_dot_product_attention(
82
+ torch.cat([q_nope, q_pe], dim=-1),
83
+ torch.cat([k_nope, k_pe.unsqueeze(1).expand(-1, N, -1)], dim=-1),
84
+ v
85
+ )
86
+ return spda_o @ W_O
87
+
88
+ NOTE: in the actual code,
89
+ `kv_b_proj` is [W_UK; W_UV] concatenated per head
90
+ `q_b_proj` is [W_UQ; W_QR] concatenated per head
91
+ `out_proj` is W_O
92
+
93
+
94
+ ## Data-Movement Friendly Approach (i.e. "_forward_decode"):
95
+
96
+ Runtime
97
+ q_c = h_t @ W_DQ
98
+ q_nope = (q_c @ W_UQ).view(-1, N, P)
99
+ ql_nope = einsum("snh,lnh->snl", q, W_UK)
100
+ q_pe = RoPE(q_c @ W_QR).view(Sq, N, R)
101
+ new_kv_c = h_t @ W_DKV
102
+ new_k_pe = RoPE(h_t @ W_KR)
103
+ kv_c = torch.cat([new_kv_c, cache_kv_c], dim=0)
104
+ k_pe = torch.cat([new_k_pe, cache_k_pe], dim=0)
105
+
106
+ // MQA with QK headdim = Lkv + R
107
+ // V headdim = Lkv
108
+ // spda_o shape [Sq, N, Lkv]
109
+ // NOTE: this is less compute-friendly since Lkv > P
110
+ // but is more data-movement friendly since its MQA vs MHA
111
+ spda_o = scaled_dot_product_attention(
112
+ torch.cat([ql_nope, q_pe], dim=-1),
113
+ torch.cat([kv_c, k_pe], dim=-1),
114
+ kv_c
115
+ )
116
+
117
+ o = einsum("snl,lnv->snv", spda_o.reshape(-1, N, Lkv), W_UV)
118
+ return o.view(-1, N * V) @ self.num_heads @ W_O
119
+
120
+
121
+ ## Chunked Prefill
122
+
123
+ For chunked prefill we want to use the compute friendly algorithm. We are
124
+ assuming sufficiently large Sq / Skv ratio, in the future may want to switch to
125
+ the data-movement friendly approach if the chunk (i.e. `Sq`) is small.
126
+
127
+ However, the compute-friendly approach can potentially run out of memory if Skv
128
+ is large due to: `k_nope = (kv_c @ W_UK).view(Skv, N, P)`
129
+
130
+ To mitigate this, we chunk the computation of attention with respect to the
131
+ current context (i.e. `cache_kv_c` and `cache_k_pe`) so that we can used a
132
+ fixed workspace size.
133
+
134
+ The chunked prefill approach is as follows:
135
+
136
+ MCC Max chunk of context to process per iter, computed dynamically,
137
+ used to bound the memory usage
138
+
139
+ q_c = h_t @ W_DQ
140
+ q_nope = (q_c @ W_UQ).view(Sq, N, P)
141
+ q_pe = RoPE(q_c @ W_QR).view(Sq, N, R)
142
+ new_kv_c = h_t @ W_DKV
143
+ new_k_pe = RoPE(h_t @ W_KR)
144
+ new_k_nope = (new_kv_c @ W_UK.view(Lkv, N * P)).view(Sq, N, P)
145
+ new_v = (new_kv_c @ W_UV.view(Lkv, N * V)).view(Sq, N, V)
146
+
147
+ // MHA between queries and new KV
148
+ // with QK headdim = P + R
149
+ // V headdim = V
150
+ // curr_o shape [Sq, N, V]
151
+ // curr_lse shape [N, Sq], this is just order FA returns
152
+ curr_o, curr_lse = scaled_dot_product_attention(
153
+ torch.cat([q_nope, q_pe], dim=-1),
154
+ torch.cat([new_k_nope, new_k_pe.unsqueeze(1).expand(-1, N, -1)], dim=-1),
155
+ new_v,
156
+ casual=True,
157
+ return_softmax_lse=True
158
+ )
159
+
160
+ // Compute attention with the already existing context
161
+ for chunk_idx in range(cdiv(C, MCC)):
162
+ chunk_start = chunk_idx * MCC
163
+ chunk_end = min(chunk_start + MCC, C)
164
+ Sc = chunk_end - chunk_start
165
+ cache_kv_c_chunk = cache_kv_c[chunk_start:chunk_end]
166
+ cache_k_pe_chunk = cache_k_pe[chunk_start:chunk_end]
167
+ cache_k_nope_chunk = (cache_kv_c_chunk @ W_UK).view(-1, N, P)
168
+ cache_v_chunk = (cache_kv_c_chunk @ W_UV).view(-1, N, V)
169
+
170
+ chunk_o, chunk_lse = scaled_dot_product_attention(
171
+ torch.cat([q_nope, q_pe], dim=-1),
172
+ torch.cat([cache_k_nope_chunk,
173
+ cache_k_pe_chunk.unsqueeze(1).expand(-1, N, -1)],
174
+ dim=-1),
175
+ cache_v_chunk,
176
+ casual=False,
177
+ return_softmax_lse=True
178
+ )
179
+
180
+ curr_o, curr_lse = merge_attn_states(
181
+ suffix_output=curr_o,
182
+ suffix_lse=curr_lse,
183
+ prefix_output=chunk_o,
184
+ prefix_lse=chunk_lse,
185
+ )
186
+
187
+ return curr_o @ W_O
188
+ """
189
+
190
+ import functools
191
+ from abc import abstractmethod
192
+ from dataclasses import dataclass, field
193
+ from typing import ClassVar, Generic, Optional, TypeVar, Union
194
+
195
+ import torch
196
+
197
+ import vllm.envs as envs
198
+ from vllm import _custom_ops as ops
199
+ from vllm.attention.backends.abstract import (AttentionBackend, AttentionLayer,
200
+ AttentionMetadata,
201
+ MLAAttentionImpl)
202
+ from vllm.attention.backends.utils import get_mla_dims
203
+ from vllm.attention.ops.merge_attn_states import merge_attn_states
204
+ from vllm.attention.utils.fa_utils import get_flash_attn_version
205
+ from vllm.config import VllmConfig
206
+ from vllm.logger import init_logger
207
+ from vllm.model_executor.layers.linear import (ColumnParallelLinear,
208
+ LinearBase,
209
+ UnquantizedLinearMethod)
210
+ from vllm.platforms import current_platform
211
+ from vllm.utils import cdiv, round_down
212
+ from vllm.utils.flashinfer import has_nvidia_artifactory
213
+ from vllm.v1.attention.backends.utils import (AttentionMetadataBuilder,
214
+ CommonAttentionMetadata,
215
+ get_per_layer_parameters,
216
+ infer_global_hyperparameters,
217
+ split_decodes_and_prefills)
218
+ from vllm.v1.kv_cache_interface import AttentionSpec
219
+
220
+ try:
221
+ from vllm.vllm_flash_attn import flash_attn_varlen_func
222
+ is_vllm_fa = True
223
+ except ImportError:
224
+ # For rocm use upstream flash attention
225
+ if current_platform.is_rocm():
226
+ from flash_attn import flash_attn_varlen_func
227
+ is_vllm_fa = False
228
+
229
+ try:
230
+ from flashinfer import BatchPrefillWithRaggedKVCacheWrapper
231
+ from flashinfer.prefill import ( # noqa: F401
232
+ cudnn_batch_prefill_with_kv_cache)
233
+ flashinfer_available = True
234
+ except ImportError:
235
+ flashinfer_available = False
236
+
237
+ logger = init_logger(__name__)
238
+
239
+ CUDNN_WORKSPACE_SIZE = 12800
240
+
241
+
242
+ class MLACommonBackend(AttentionBackend):
243
+
244
+ accept_output_buffer: bool = True
245
+
246
+ @staticmethod
247
+ def get_name() -> str:
248
+ return "TRITON_MLA_VLLM_V1"
249
+
250
+ @staticmethod
251
+ def get_metadata_cls() -> type["AttentionMetadata"]:
252
+ return MLACommonMetadata
253
+
254
+ @staticmethod
255
+ def get_builder_cls() -> type["MLACommonMetadataBuilder"]:
256
+ return MLACommonMetadataBuilder
257
+
258
+ @staticmethod
259
+ def get_kv_cache_shape(
260
+ num_blocks: int,
261
+ block_size: int,
262
+ num_kv_heads: int, # assumed to be 1 for MLA
263
+ head_size: int,
264
+ ) -> tuple[int, ...]:
265
+ return (num_blocks, block_size, head_size)
266
+
267
+ @classmethod
268
+ def get_supported_dtypes(cls) -> list[torch.dtype]:
269
+ return [torch.float16, torch.bfloat16]
270
+
271
+ @classmethod
272
+ def get_supported_head_sizes(cls) -> list[int]:
273
+ return [576]
274
+
275
+ @classmethod
276
+ def validate_head_size(cls, head_size: int) -> None:
277
+ supported_head_sizes = cls.get_supported_head_sizes()
278
+ if head_size not in supported_head_sizes:
279
+ attn_type = cls.__name__.removesuffix("Backend")
280
+ raise ValueError(
281
+ f"Head size {head_size} is not supported by {attn_type}. "
282
+ f"Supported head sizes are: {supported_head_sizes}. "
283
+ "Set VLLM_ATTENTION_BACKEND=FLEX_ATTENTION to use "
284
+ "FlexAttention backend which supports all head sizes.")
285
+
286
+
287
+ @dataclass
288
+ class MLACommonPrefillMetadata:
289
+ """ Prefill Specific Metadata """
290
+
291
+ @dataclass
292
+ class ChunkedContextMetadata:
293
+ # New for MLA (compared to FlashAttention)
294
+ # For handling chunked prefill
295
+ cu_seq_lens: torch.Tensor
296
+ starts: torch.Tensor
297
+ seq_tot: list[int]
298
+ max_seq_lens: list[int]
299
+ seq_lens: torch.Tensor
300
+ workspace: torch.Tensor
301
+
302
+ block_table: torch.Tensor
303
+ query_start_loc: torch.Tensor
304
+ max_query_len: int
305
+ chunked_context: Optional[ChunkedContextMetadata] = None
306
+
307
+
308
+ @dataclass
309
+ class FlashInferPrefillMetadata(MLACommonPrefillMetadata):
310
+ prefill_main: Optional['BatchPrefillWithRaggedKVCacheWrapper'] = None
311
+ prefill_chunks: list['BatchPrefillWithRaggedKVCacheWrapper'] = field(
312
+ default_factory=list)
313
+
314
+
315
+ @dataclass
316
+ class CudnnPrefillMetadata(MLACommonPrefillMetadata):
317
+
318
+ class ChunkedContextMetadata(
319
+ MLACommonPrefillMetadata.ChunkedContextMetadata):
320
+ seq_lens: torch.Tensor
321
+
322
+ query_seq_lens: Optional[torch.Tensor] = None
323
+ cudnn_workspace: Optional[torch.Tensor] = None
324
+
325
+
326
+ @dataclass
327
+ class MLACommonDecodeMetadata:
328
+ block_table: torch.Tensor
329
+ seq_lens: torch.Tensor
330
+
331
+
332
+ D = TypeVar("D", bound=MLACommonDecodeMetadata)
333
+
334
+
335
+ @dataclass
336
+ class MLACommonMetadata(Generic[D]):
337
+ """Metadata for MLACommon.
338
+
339
+ NOTE: Please read the comment at the top of the file before trying to
340
+ understand this class
341
+ """
342
+ # NOTE(sang): Definition of context_len, query_len, and seq_len.
343
+ # |---------- N-1 iteration --------|
344
+ # |---------------- N iteration ---------------------|
345
+ # |- tokenA -|......................|-- newTokens ---|
346
+ # |---------- context_len ----------|
347
+ # |-------------------- seq_len ---------------------|
348
+ # |-- query_len ---|
349
+
350
+ num_reqs: int
351
+ max_query_len: int
352
+
353
+ num_actual_tokens: int # Number of tokens excluding padding.
354
+ query_start_loc: torch.Tensor
355
+ slot_mapping: torch.Tensor
356
+
357
+ # New for MLA (compared to FlashAttention)
358
+ # For handling prefill decode split
359
+ num_decodes: int
360
+ num_decode_tokens: int
361
+ num_prefills: int
362
+
363
+ # The dimension of the attention heads
364
+ head_dim: Optional[int] = None
365
+
366
+ decode: Optional[D] = None
367
+ prefill: Optional[Union[MLACommonPrefillMetadata,
368
+ FlashInferPrefillMetadata,
369
+ CudnnPrefillMetadata]] = None
370
+
371
+ def __post_init__(self):
372
+ if self.head_dim is not None:
373
+ MLACommonBackend.validate_head_size(self.head_dim)
374
+
375
+
376
+ M = TypeVar("M", bound=MLACommonMetadata)
377
+
378
+
379
+ def use_flashinfer_prefill() -> bool:
380
+ # For blackwell default to flashinfer prefill if its available since
381
+ # it is faster than FA2.
382
+ return (flashinfer_available and not envs.VLLM_USE_CUDNN_PREFILL
383
+ and current_platform.is_device_capability(100))
384
+
385
+
386
+ def use_cudnn_prefill() -> bool:
387
+ return (flashinfer_available and envs.VLLM_USE_CUDNN_PREFILL
388
+ and current_platform.is_device_capability(100)
389
+ and has_nvidia_artifactory())
390
+
391
+
392
+ # Currently 394MB, this can be tuned based on GEMM sizes used.
393
+ # Chosen to be the same as sglang:
394
+ # https://github.com/sgl-project/sglang/blob/766392c6bda2558b61ce6d1c1bfd8081a549e1f1/python/sglang/global_config.py#L37
395
+ FLASHINFER_WORKSPACE_BUFFER_SIZE = 394 * 1024 * 1024
396
+
397
+
398
+ class MLACommonMetadataBuilder(AttentionMetadataBuilder[M]):
399
+ """
400
+ NOTE: Please read the comment at the top of the file before trying to
401
+ understand this class
402
+ """
403
+ reorder_batch_threshold: ClassVar[int] = 1
404
+
405
+ def __init__(self,
406
+ kv_cache_spec: AttentionSpec,
407
+ layer_names: list[str],
408
+ vllm_config: VllmConfig,
409
+ device: torch.device,
410
+ metadata_cls: Optional[type[M]] = None):
411
+ self.metadata_cls = metadata_cls \
412
+ if metadata_cls is not None else MLACommonMetadata
413
+ self.kv_cache_spec = kv_cache_spec
414
+ self.device = device
415
+ scheduler_config = vllm_config.scheduler_config
416
+ self.model_config = vllm_config.model_config
417
+ cache_config = vllm_config.cache_config
418
+ parallel_config = vllm_config.parallel_config
419
+ self.chunked_prefill_enabled = scheduler_config.chunked_prefill_enabled
420
+ self.num_heads = self.model_config.get_num_attention_heads(
421
+ parallel_config)
422
+ self.mla_dims = get_mla_dims(self.model_config)
423
+ self.aot_schedule = current_platform.is_cuda()
424
+
425
+ # Dont try to access the runner on AMD
426
+ if self.aot_schedule:
427
+ self.page_size = self.kv_cache_spec.block_size
428
+
429
+ if self.chunked_prefill_enabled:
430
+ self.chunked_prefill_workspace_size = min(
431
+ # Max sure there is enough for 8 full length request or at least
432
+ # 4 pages of cache per request
433
+ max(
434
+ 8 * self.model_config.max_model_len, 4 *
435
+ scheduler_config.max_num_seqs * cache_config.block_size),
436
+ # For long-context models try not to over-allocate limiting
437
+ # kv-cache space, limiting it to 64k tokens,
438
+ # which would result in the workspace being:
439
+ # 2*(576)*(64*1024) = 144mb
440
+ # (assuming 576 MLA head dim, and fp16)
441
+ # which would result in up-projected context being
442
+ # 2*(192*128)*(64*1024) = 3gb
443
+ # (assuming 192 QK head dim, 128 heads, and fp16)
444
+ 128 * 1024)
445
+ assert self.chunked_prefill_workspace_size >= \
446
+ scheduler_config.max_num_seqs * cache_config.block_size
447
+ self.chunked_prefill_workspace = torch.empty(
448
+ (self.chunked_prefill_workspace_size,
449
+ self.model_config.get_head_size()),
450
+ dtype=self.model_config.dtype,
451
+ device=device,
452
+ )
453
+
454
+ self._use_cudnn_prefill = use_cudnn_prefill()
455
+ self._use_fi_prefill = use_flashinfer_prefill()
456
+ self.prefill_metadata_cls = (
457
+ FlashInferPrefillMetadata
458
+ if self._use_fi_prefill else CudnnPrefillMetadata
459
+ if self._use_cudnn_prefill else MLACommonPrefillMetadata)
460
+
461
+ if self._use_fi_prefill:
462
+ self._workspace_buffer = torch.empty(
463
+ FLASHINFER_WORKSPACE_BUFFER_SIZE,
464
+ dtype=torch.uint8,
465
+ device=device)
466
+
467
+ self._fi_prefill_main: Optional[
468
+ BatchPrefillWithRaggedKVCacheWrapper] = None
469
+ self._fi_prefill_chunks: list[
470
+ BatchPrefillWithRaggedKVCacheWrapper] = []
471
+
472
+ self._global_hyperparameters = infer_global_hyperparameters(
473
+ get_per_layer_parameters(vllm_config, layer_names,
474
+ MLACommonImpl))
475
+
476
+ if self._use_cudnn_prefill:
477
+ self.cudnn_workspace = torch.empty(
478
+ CUDNN_WORKSPACE_SIZE * scheduler_config.max_num_seqs,
479
+ dtype=torch.int8,
480
+ device=device,
481
+ )
482
+
483
+ def _build_fi_prefill_wrappers(self, prefill: FlashInferPrefillMetadata):
484
+ qo_indptr = prefill.query_start_loc
485
+
486
+ has_context = False
487
+ if prefill.chunked_context is not None:
488
+ chunked_context = prefill.chunked_context
489
+ has_context = True
490
+
491
+ if self._fi_prefill_main is None:
492
+ self._fi_prefill_main = BatchPrefillWithRaggedKVCacheWrapper(
493
+ self._workspace_buffer, "NHD", backend="cutlass")
494
+
495
+ if has_context:
496
+ num_chunks = chunked_context.cu_seq_lens.shape[0]
497
+ # Allocate more prefill chunk wrappers if needed
498
+ if len(self._fi_prefill_chunks) < num_chunks:
499
+ for _ in range(len(self._fi_prefill_chunks), num_chunks):
500
+ self._fi_prefill_chunks.append(
501
+ BatchPrefillWithRaggedKVCacheWrapper(
502
+ self._workspace_buffer, "NHD", backend="cutlass"))
503
+ assert num_chunks <= len(self._fi_prefill_chunks)
504
+
505
+ # In MLA, the non-latent num_qo_heads == num_kv_heads
506
+ num_qo_heads = self.num_heads
507
+ num_kv_heads = num_qo_heads
508
+
509
+ # Sanity: Verify that num_kv_heads == 1 since it is latent space
510
+ assert self.kv_cache_spec.num_kv_heads == 1
511
+
512
+ # Get non-latent head_dim_qk and head_dim_vo
513
+ head_dim_qk = (self.mla_dims.qk_nope_head_dim +
514
+ self.mla_dims.qk_rope_head_dim)
515
+ head_dim_vo = self.mla_dims.v_head_dim
516
+
517
+ # For main run, qo_indptr == kv_indptr
518
+ kv_indptr = qo_indptr.clone()
519
+
520
+ # Prepare main prefill
521
+ self._fi_prefill_main.plan(
522
+ qo_indptr=qo_indptr,
523
+ kv_indptr=kv_indptr,
524
+ num_qo_heads=num_qo_heads,
525
+ num_kv_heads=num_kv_heads,
526
+ head_dim_qk=head_dim_qk,
527
+ head_dim_vo=head_dim_vo,
528
+ causal=True, # This is main run
529
+ sm_scale=self._global_hyperparameters.sm_scale,
530
+ window_left=self._global_hyperparameters.window_left,
531
+ logits_soft_cap=self._global_hyperparameters.logits_soft_cap,
532
+ q_data_type=self.model_config.dtype,
533
+ kv_data_type=self.kv_cache_spec.dtype,
534
+ )
535
+
536
+ # Prepare context prefills
537
+ if has_context:
538
+ for i in range(num_chunks):
539
+ kv_indptr_chunk = chunked_context.cu_seq_lens[i]
540
+
541
+ self._fi_prefill_chunks[i].plan(
542
+ qo_indptr=qo_indptr,
543
+ kv_indptr=kv_indptr_chunk,
544
+ num_qo_heads=num_qo_heads,
545
+ num_kv_heads=num_kv_heads,
546
+ head_dim_qk=head_dim_qk,
547
+ head_dim_vo=head_dim_vo,
548
+ causal=False, # This is context run
549
+ sm_scale=self._global_hyperparameters.sm_scale,
550
+ window_left=self._global_hyperparameters.window_left,
551
+ logits_soft_cap=self._global_hyperparameters.
552
+ logits_soft_cap,
553
+ q_data_type=self.model_config.dtype,
554
+ kv_data_type=self.kv_cache_spec.dtype,
555
+ )
556
+
557
+ prefill.prefill_main = self._fi_prefill_main
558
+ prefill.prefill_chunks = self._fi_prefill_chunks
559
+
560
+ def _build_decode(self, block_table_tensor: torch.Tensor,
561
+ seq_lens: torch.Tensor):
562
+ return MLACommonDecodeMetadata(
563
+ block_table=block_table_tensor,
564
+ seq_lens=seq_lens,
565
+ )
566
+
567
+ def build_for_cudagraph_capture(
568
+ self, common_attn_metadata: CommonAttentionMetadata) -> M:
569
+ """
570
+ This method builds the metadata for full cudagraph capture.
571
+ Currently, only decode is supported for full cudagraphs with MLA.
572
+ """
573
+ m = common_attn_metadata
574
+ assert m.num_reqs == m.num_actual_tokens, \
575
+ "MLA only supports decode-only full CUDAGraph capture. " \
576
+ "Make sure all cudagraph capture sizes <= max_num_seq."
577
+
578
+ assert m.max_query_len == 1 # decode-only
579
+
580
+ return self.build(0, m)
581
+
582
+ def build(self,
583
+ common_prefix_len: int,
584
+ common_attn_metadata: CommonAttentionMetadata,
585
+ fast_build: bool = False) -> M:
586
+ num_reqs = common_attn_metadata.num_reqs
587
+ num_tokens = common_attn_metadata.num_actual_tokens
588
+ max_query_len = common_attn_metadata.max_query_len
589
+
590
+ # Note(simon): be careful about the CPU <> GPU memory movement in this
591
+ # function. We should avoid GPU -> CPU sync as much as possible because
592
+ # it blocks on all previous kernels.
593
+ device = self.device
594
+ block_table_tensor = common_attn_metadata.block_table_tensor
595
+ slot_mapping = common_attn_metadata.slot_mapping
596
+
597
+ query_start_loc = common_attn_metadata.query_start_loc
598
+ query_start_loc_cpu = common_attn_metadata.query_start_loc_cpu
599
+ seq_lens = common_attn_metadata.seq_lens
600
+
601
+ query_seq_lens_cpu = query_start_loc_cpu[1:] - query_start_loc_cpu[:-1]
602
+
603
+ num_computed_tokens_cpu = (common_attn_metadata.seq_lens_cpu -
604
+ query_seq_lens_cpu)
605
+
606
+ num_decodes, num_prefills, num_decode_tokens, num_prefill_tokens = \
607
+ split_decodes_and_prefills(common_attn_metadata)
608
+
609
+ assert num_decodes + num_prefills == num_reqs
610
+ assert num_decode_tokens + num_prefill_tokens == num_tokens
611
+
612
+ prefill_metadata = None
613
+ if num_prefills > 0:
614
+ reqs_start = num_decodes # prefill_start
615
+
616
+ context_lens_cpu = num_computed_tokens_cpu[reqs_start:num_reqs]
617
+ max_context_len_cpu = context_lens_cpu.max().item()
618
+ num_prefills_with_context_cpu = (context_lens_cpu > 0).sum().item()
619
+ prefill_query_start_loc = query_start_loc[
620
+ reqs_start:] - query_start_loc[reqs_start]
621
+
622
+ chunked_context_metadata = None
623
+ if self.chunked_prefill_enabled and num_prefills > 0 \
624
+ and max_context_len_cpu > 0:
625
+ # NOTE: it is recommend you read the `Chunked Prefill` section
626
+ # in the comment at the top of the file before trying to
627
+ # understand the following code
628
+
629
+ # currently we allocate an equal amount of workspace for each
630
+ # prefill in the batch, we could probably use a more advanced
631
+ # algorithm here and allocate more workspace to prefills with
632
+ # longer context lengths
633
+ max_context_chunk = (self.chunked_prefill_workspace_size //
634
+ num_prefills_with_context_cpu)
635
+
636
+ if self.aot_schedule:
637
+ # align max_context_chunk to page_size by rounding down,
638
+ # currently the `gather_cache` kernel cannot handle
639
+ # `context_chunk_starts` that are not aligned to page_size
640
+ max_context_chunk = round_down(max_context_chunk,
641
+ self.page_size)
642
+
643
+ assert max_context_chunk > 0
644
+ num_chunks = cdiv(max_context_len_cpu, max_context_chunk)
645
+
646
+ # if `max_context_chunk = 256`, `num_chunks = 3`, and
647
+ # `num_prefills_with_context = 4`, create a tensor that looks
648
+ # like
649
+ # [[0, 0, 0, 0], [256, 256, 256, 256], [512, 512, 512, 512]]
650
+ # Note(simon): this is done in CPU because of downstream's
651
+ # of `to_list`.
652
+ chunk_starts = \
653
+ torch.arange(num_chunks, dtype=torch.int32) \
654
+ .unsqueeze(1).expand(-1, num_prefills) \
655
+ * max_context_chunk
656
+ chunk_ends = torch.min(context_lens_cpu.unsqueeze(0),
657
+ chunk_starts + max_context_chunk)
658
+ chunk_seq_lens = (chunk_ends - chunk_starts).clamp(min=0)
659
+
660
+ cu_seq_lens_cpu = torch.zeros(num_chunks,
661
+ num_prefills + 1,
662
+ dtype=torch.int32,
663
+ pin_memory=True)
664
+ torch.cumsum(chunk_seq_lens,
665
+ dim=1,
666
+ out=cu_seq_lens_cpu[:, 1:],
667
+ dtype=torch.int32)
668
+
669
+ chunked_context_metadata_cls = \
670
+ CudnnPrefillMetadata.ChunkedContextMetadata \
671
+ if self._use_cudnn_prefill else \
672
+ MLACommonPrefillMetadata.ChunkedContextMetadata
673
+
674
+ chunked_context_metadata = \
675
+ chunked_context_metadata_cls(
676
+ cu_seq_lens=cu_seq_lens_cpu.to(device, non_blocking=True),
677
+ starts=chunk_starts.to(device, non_blocking=True),
678
+ seq_tot=chunk_seq_lens.sum(dim=1).tolist(),
679
+ max_seq_lens=chunk_seq_lens.max(dim=1).values.tolist(),
680
+ seq_lens=chunk_seq_lens,
681
+ workspace=self.chunked_prefill_workspace,
682
+ )
683
+
684
+ if self._use_cudnn_prefill:
685
+ chunked_context_metadata.seq_lens = chunk_seq_lens
686
+
687
+ assert max(chunked_context_metadata.max_seq_lens) <= \
688
+ self.chunked_prefill_workspace_size
689
+
690
+ prefill_metadata = self.prefill_metadata_cls(
691
+ block_table=block_table_tensor[reqs_start:, ...],
692
+ query_start_loc=prefill_query_start_loc,
693
+ max_query_len=max_query_len,
694
+ chunked_context=chunked_context_metadata,
695
+ )
696
+
697
+ if self._use_cudnn_prefill:
698
+ assert isinstance(prefill_metadata, CudnnPrefillMetadata)
699
+ prefill_metadata.query_seq_lens = prefill_query_start_loc[1:] \
700
+ - prefill_query_start_loc[:-1]
701
+ prefill_metadata.cudnn_workspace = self.cudnn_workspace
702
+
703
+ decode_metadata = None
704
+ if num_decodes > 0:
705
+ decode_metadata = self._build_decode(
706
+ block_table_tensor=block_table_tensor[:num_decodes, ...],
707
+ seq_lens=seq_lens[:num_decodes],
708
+ )
709
+
710
+ attn_metadata = self.metadata_cls(
711
+ num_reqs=common_attn_metadata.num_reqs,
712
+ max_query_len=common_attn_metadata.max_query_len,
713
+ num_actual_tokens=num_tokens,
714
+ query_start_loc=query_start_loc,
715
+ slot_mapping=slot_mapping,
716
+ head_dim=self.model_config.get_head_size(),
717
+ # MLACommonMetadata Chunk prefill specific
718
+ num_decodes=num_decodes,
719
+ num_decode_tokens=num_decode_tokens,
720
+ num_prefills=num_prefills,
721
+ prefill=prefill_metadata,
722
+ decode=decode_metadata,
723
+ )
724
+
725
+ if self._use_fi_prefill and num_prefills > 0:
726
+ assert isinstance(attn_metadata.prefill, FlashInferPrefillMetadata)
727
+ self._build_fi_prefill_wrappers(attn_metadata.prefill)
728
+
729
+ return attn_metadata
730
+
731
+
732
+ class MLACommonImpl(MLAAttentionImpl[M], Generic[M]):
733
+ """
734
+ NOTE: Please read the comment at the top of the file before trying to
735
+ understand this class
736
+ """
737
+
738
+ def __init__(
739
+ self,
740
+ num_heads: int,
741
+ head_size: int,
742
+ scale: float,
743
+ num_kv_heads: int,
744
+ alibi_slopes: Optional[list[float]],
745
+ sliding_window: Optional[int],
746
+ kv_cache_dtype: str,
747
+ logits_soft_cap: Optional[float],
748
+ attn_type: str,
749
+ kv_sharing_target_layer_name: Optional[str],
750
+ # MLA Specific Arguments
751
+ q_lora_rank: Optional[int],
752
+ kv_lora_rank: int,
753
+ qk_nope_head_dim: int,
754
+ qk_rope_head_dim: int,
755
+ qk_head_dim: int,
756
+ v_head_dim: int,
757
+ kv_b_proj: ColumnParallelLinear,
758
+ ) -> None:
759
+ if kv_sharing_target_layer_name is not None:
760
+ raise NotImplementedError("KV sharing is not supported for MLA")
761
+
762
+ self.num_heads = num_heads
763
+ self.head_size = head_size
764
+ self.scale = float(scale)
765
+ self.num_kv_heads = num_kv_heads
766
+ self.kv_cache_dtype = kv_cache_dtype
767
+
768
+ self.q_lora_rank = q_lora_rank
769
+ self.kv_lora_rank = kv_lora_rank
770
+ self.qk_nope_head_dim = qk_nope_head_dim
771
+ self.qk_rope_head_dim = qk_rope_head_dim
772
+ self.qk_head_dim = qk_head_dim
773
+ self.v_head_dim = v_head_dim
774
+ self.kv_b_proj = kv_b_proj
775
+
776
+ if use_flashinfer_prefill():
777
+ logger.debug_once("Using FlashInfer prefill for MLA")
778
+ self._run_prefill_context_chunk = self._run_prefill_context_chunk_fi
779
+ self._run_prefill_new_tokens = self._run_prefill_new_tokens_fi
780
+ self._pad_v = False
781
+ elif use_cudnn_prefill():
782
+ logger.debug_once("Using CUDNN prefill for MLA")
783
+ self._run_prefill_context_chunk = \
784
+ self._run_prefill_context_chunk_cudnn
785
+ self._run_prefill_new_tokens = self._run_prefill_new_tokens_cudnn
786
+ self._pad_v = False
787
+ else: # Use FlashAttention
788
+ logger.debug_once("Using FlashAttention prefill for MLA")
789
+ self._run_prefill_context_chunk = self._run_prefill_context_chunk_fa
790
+ self._run_prefill_new_tokens = self._run_prefill_new_tokens_fa
791
+
792
+ # Handle the differences between the flash_attn_varlen from
793
+ # flash_attn and the one from vllm_flash_attn. The former is used on
794
+ # RoCM and the latter has an additional parameter to control
795
+ # FA2 vs FA3
796
+ self.flash_attn_varlen_func = flash_attn_varlen_func
797
+ self.vllm_flash_attn_version = get_flash_attn_version()
798
+ if self.vllm_flash_attn_version is not None:
799
+ self.flash_attn_varlen_func = \
800
+ functools.partial(flash_attn_varlen_func,
801
+ fa_version=self.vllm_flash_attn_version)
802
+
803
+ # For MLA the v head dim is smaller than qk head dim so we pad out
804
+ # v with 0s to match the qk head dim for attention backends that do
805
+ # not support different headdims
806
+ # We don't need to pad V if we are on a hopper system with FA3
807
+ self._pad_v = self.vllm_flash_attn_version is None or not (
808
+ self.vllm_flash_attn_version == 3
809
+ and current_platform.get_device_capability()[0] == 9)
810
+
811
+ def _flash_attn_varlen_diff_headdims(self,
812
+ q,
813
+ k,
814
+ v,
815
+ return_softmax_lse=False,
816
+ softmax_scale=None,
817
+ **kwargs):
818
+ maybe_padded_v = v
819
+ if self._pad_v:
820
+ maybe_padded_v = torch.nn.functional.pad(
821
+ v, [0, q.shape[-1] - v.shape[-1]], value=0)
822
+
823
+ if is_vllm_fa:
824
+ kwargs["return_softmax_lse"] = return_softmax_lse
825
+ else:
826
+ # ROCm leverages the upstream flash_attn, which takes a parameter
827
+ # called "return_attn_probs" instead of return_softmax_lse
828
+ kwargs["return_attn_probs"] = return_softmax_lse
829
+
830
+ attn_out = self.flash_attn_varlen_func(
831
+ q=q,
832
+ k=k,
833
+ v=maybe_padded_v,
834
+ softmax_scale=softmax_scale,
835
+ **kwargs,
836
+ )
837
+
838
+ # Unpack the output if there is multiple results
839
+ lse = None
840
+ if isinstance(attn_out, tuple):
841
+ attn_out, lse = attn_out[0], attn_out[1]
842
+
843
+ # Remain consistent with old `flash_attn_varlen_func` where there
844
+ # is only one output tensor if `return_softmax_lse` is False.
845
+ if return_softmax_lse:
846
+ return attn_out, lse
847
+ return attn_out
848
+
849
+ def _run_prefill_new_tokens_fa(self, prefill: MLACommonPrefillMetadata, q,
850
+ k, v, return_softmax_lse):
851
+ return self._flash_attn_varlen_diff_headdims(
852
+ q=q,
853
+ k=k,
854
+ v=v,
855
+ cu_seqlens_q=prefill.query_start_loc,
856
+ cu_seqlens_k=prefill.query_start_loc,
857
+ max_seqlen_q=prefill.max_query_len,
858
+ max_seqlen_k=prefill.max_query_len,
859
+ softmax_scale=self.scale,
860
+ causal=True,
861
+ return_softmax_lse=return_softmax_lse,
862
+ )
863
+
864
+ def _run_prefill_new_tokens_fi(self, prefill: MLACommonPrefillMetadata, q,
865
+ k, v, return_softmax_lse):
866
+ assert isinstance(prefill, FlashInferPrefillMetadata)
867
+ assert prefill.prefill_main is not None
868
+ return prefill.prefill_main.run(
869
+ q=q,
870
+ k=k,
871
+ v=v,
872
+ return_lse=return_softmax_lse,
873
+ )
874
+
875
+ def _run_prefill_new_tokens_cudnn(self, prefill: MLACommonPrefillMetadata,
876
+ q, k, v, return_softmax_lse):
877
+ assert isinstance(prefill, CudnnPrefillMetadata)
878
+ assert prefill.query_seq_lens is not None
879
+ output, lse = cudnn_batch_prefill_with_kv_cache(
880
+ q=q,
881
+ k_cache=k,
882
+ v_cache=v,
883
+ scale=self.scale,
884
+ workspace_buffer=prefill.cudnn_workspace,
885
+ max_token_per_sequence=prefill.max_query_len,
886
+ max_sequence_kv=prefill.max_query_len,
887
+ actual_seq_lens_q=prefill.query_seq_lens.view(-1, 1, 1, 1),
888
+ actual_seq_lens_kv=prefill.query_seq_lens.view(-1, 1, 1, 1),
889
+ causal=True,
890
+ return_lse=True, # do not support False for now
891
+ is_cuda_graph_compatible=
892
+ True, #Indicates actual_seq_lens are on GPU or CPU.
893
+ )
894
+ if return_softmax_lse:
895
+ return output, lse
896
+ return output
897
+
898
+ def _run_prefill_context_chunk_fa(self, prefill: MLACommonPrefillMetadata,
899
+ chunk_idx: int, q, k, v):
900
+ assert prefill.chunked_context is not None
901
+ return self._flash_attn_varlen_diff_headdims(
902
+ q=q,
903
+ k=k,
904
+ v=v,
905
+ cu_seqlens_q=prefill.query_start_loc,
906
+ cu_seqlens_k=prefill.chunked_context.cu_seq_lens[chunk_idx],
907
+ max_seqlen_q=prefill.max_query_len,
908
+ max_seqlen_k=prefill.chunked_context.max_seq_lens[chunk_idx],
909
+ softmax_scale=self.scale,
910
+ causal=False, # Context is unmasked
911
+ return_softmax_lse=True,
912
+ )
913
+
914
+ def _run_prefill_context_chunk_fi(self, prefill: MLACommonPrefillMetadata,
915
+ chunk_idx: int, q, k, v):
916
+ assert isinstance(prefill, FlashInferPrefillMetadata)
917
+ return prefill.prefill_chunks[chunk_idx].run(
918
+ q=q,
919
+ k=k,
920
+ v=v,
921
+ return_lse=True,
922
+ )
923
+
924
+ def _run_prefill_context_chunk_cudnn(self,
925
+ prefill: MLACommonPrefillMetadata,
926
+ chunk_idx: int, q, k, v):
927
+ assert isinstance(prefill, CudnnPrefillMetadata)
928
+ assert prefill.chunked_context is not None
929
+ assert prefill.chunked_context.seq_lens[chunk_idx] is not None
930
+ assert prefill.query_seq_lens is not None
931
+ return cudnn_batch_prefill_with_kv_cache(
932
+ q=q,
933
+ k_cache=k,
934
+ v_cache=v,
935
+ scale=self.scale,
936
+ workspace_buffer=prefill.cudnn_workspace,
937
+ max_token_per_sequence=prefill.max_query_len,
938
+ max_sequence_kv=prefill.chunked_context.max_seq_lens[chunk_idx],
939
+ actual_seq_lens_q=prefill.query_seq_lens.view(-1, 1, 1, 1),
940
+ actual_seq_lens_kv=prefill.chunked_context.seq_lens[chunk_idx].
941
+ view(-1, 1, 1, 1),
942
+ causal=False,
943
+ return_lse=True,
944
+ is_cuda_graph_compatible=
945
+ True, #Indicates actual_seq_lens are on GPU or CPU.
946
+ )
947
+
948
+ def _v_up_proj(self, x):
949
+ # Convert from (B, N, L) to (N, B, L)
950
+ x = x.view(-1, self.num_heads, self.kv_lora_rank).transpose(0, 1)
951
+ # Multiply (N, B, L) x (N, L, V) -> (N, B, V)
952
+ x = torch.bmm(x, self.W_UV)
953
+ # Convert from (N, B, V) to (B, N * V)
954
+ return x.transpose(0, 1).reshape(-1, self.num_heads * self.v_head_dim)
955
+
956
+ def process_weights_after_loading(self, act_dtype: torch.dtype):
957
+
958
+ def get_layer_weight(layer):
959
+ WEIGHT_NAMES = ("weight", "qweight", "weight_packed")
960
+ for attr in WEIGHT_NAMES:
961
+ if hasattr(layer, attr):
962
+ return getattr(layer, attr)
963
+ raise AttributeError(
964
+ f"Layer '{layer}' has no recognized weight attribute:"
965
+ f" {WEIGHT_NAMES}.")
966
+
967
+ def get_and_maybe_dequant_weights(layer: LinearBase):
968
+ if not isinstance(layer.quant_method, UnquantizedLinearMethod):
969
+ # NOTE: This should only be used offline, since it's O(N^3)
970
+ eye = torch.eye(layer.input_size_per_partition,
971
+ dtype=act_dtype,
972
+ device=get_layer_weight(layer).device)
973
+ dequant_weights = layer.quant_method.apply(layer,
974
+ eye,
975
+ bias=None)
976
+ del eye
977
+ # standardize to (output, input)
978
+ return dequant_weights.T
979
+ return layer.weight
980
+
981
+ # we currently do not have quantized bmm's which are needed for
982
+ # `W_UV` and `W_UK_T`, we we just store fp16/bf16 copies and perform
983
+ # the bmm's in 16-bit, the extra memory overhead of this is fairly low
984
+ kv_b_proj_weight = get_and_maybe_dequant_weights(self.kv_b_proj).T
985
+ assert kv_b_proj_weight.shape == (
986
+ self.kv_lora_rank,
987
+ self.num_heads * (self.qk_nope_head_dim + self.v_head_dim)), (
988
+ f"{kv_b_proj_weight.shape=}, "
989
+ f"{self.kv_lora_rank=}, "
990
+ f"{self.num_heads=}, "
991
+ f"{self.qk_nope_head_dim=}, "
992
+ f"{self.v_head_dim=}")
993
+ kv_b_proj_weight = kv_b_proj_weight.view(
994
+ self.kv_lora_rank,
995
+ self.num_heads,
996
+ self.qk_nope_head_dim + self.v_head_dim,
997
+ )
998
+
999
+ W_UK, W_UV = kv_b_proj_weight.split(
1000
+ [self.qk_nope_head_dim, self.v_head_dim], dim=-1)
1001
+
1002
+ # Convert from (L, N, V) to (N, L, V)
1003
+ self.W_UV = W_UV.transpose(0, 1)
1004
+ # Convert from (L, N, P) to (N, P, L)
1005
+ self.W_UK_T = W_UK.permute(1, 2, 0)
1006
+
1007
+ def _compute_prefill_context(
1008
+ self,
1009
+ q: torch.Tensor,
1010
+ kv_c_and_k_pe_cache: torch.Tensor,
1011
+ attn_metadata: MLACommonMetadata,
1012
+ ):
1013
+ assert attn_metadata.prefill is not None
1014
+ prefill_metadata = attn_metadata.prefill
1015
+ assert prefill_metadata.chunked_context is not None
1016
+
1017
+ output = None
1018
+ iters = len(prefill_metadata.chunked_context.seq_tot)
1019
+ workspace = prefill_metadata.chunked_context.workspace
1020
+
1021
+ for i in range(iters):
1022
+ toks = prefill_metadata.chunked_context.seq_tot[i]
1023
+
1024
+ ops.gather_cache(
1025
+ src_cache=kv_c_and_k_pe_cache,
1026
+ dst=workspace,
1027
+ block_table=prefill_metadata.block_table,
1028
+ cu_seq_lens=prefill_metadata.chunked_context.cu_seq_lens[i],
1029
+ batch_size=attn_metadata.num_prefills,
1030
+ seq_starts=prefill_metadata.chunked_context.starts[i],
1031
+ )
1032
+
1033
+ kv_c_normed = workspace[:toks]\
1034
+ [..., :self.kv_lora_rank]
1035
+ k_pe = workspace[:toks]\
1036
+ [..., self.kv_lora_rank:].unsqueeze(1)
1037
+
1038
+ kv_nope = self.kv_b_proj(kv_c_normed)[0].view( \
1039
+ -1, self.num_heads, self.qk_nope_head_dim + self.v_head_dim)
1040
+ k_nope, v = kv_nope\
1041
+ .split([self.qk_nope_head_dim, self.v_head_dim], dim=-1)
1042
+
1043
+ k = torch.cat((k_nope, k_pe.expand((*k_nope.shape[:-1], -1))),
1044
+ dim=-1)
1045
+
1046
+ attn_output, attn_softmax_lse = self._run_prefill_context_chunk(
1047
+ prefill=prefill_metadata,
1048
+ chunk_idx=i,
1049
+ q=q,
1050
+ k=k,
1051
+ v=v,
1052
+ )
1053
+
1054
+ if output is None:
1055
+ output = attn_output
1056
+ output_lse = attn_softmax_lse
1057
+ else:
1058
+ output_tmp = torch.empty_like(output)
1059
+ output_lse_tmp = torch.empty_like(output_lse)
1060
+ merge_attn_states(
1061
+ output=output_tmp,
1062
+ output_lse=output_lse_tmp,
1063
+ prefix_output=output,
1064
+ prefix_lse=output_lse,
1065
+ suffix_output=attn_output,
1066
+ suffix_lse=attn_softmax_lse,
1067
+ )
1068
+ output = output_tmp
1069
+ output_lse = output_lse_tmp
1070
+
1071
+ return output, output_lse
1072
+
1073
+ def _forward_prefill(
1074
+ self,
1075
+ q: torch.Tensor,
1076
+ kv_c_normed: torch.Tensor,
1077
+ k_pe: torch.Tensor,
1078
+ kv_c_and_k_pe_cache: torch.Tensor,
1079
+ attn_metadata: MLACommonMetadata,
1080
+ ) -> torch.Tensor:
1081
+ assert attn_metadata.prefill is not None
1082
+
1083
+ has_context = attn_metadata.prefill.chunked_context is not None
1084
+ kv_nope = self.kv_b_proj(kv_c_normed)[0].view(\
1085
+ -1, self.num_heads, self.qk_nope_head_dim + self.v_head_dim)
1086
+ k_nope, v = kv_nope\
1087
+ .split([self.qk_nope_head_dim, self.v_head_dim], dim=-1)
1088
+
1089
+ k = torch.cat((k_nope, k_pe.expand((*k_nope.shape[:-1], -1))), dim=-1)
1090
+
1091
+ output = self._run_prefill_new_tokens(
1092
+ prefill=attn_metadata.prefill,
1093
+ q=q,
1094
+ k=k,
1095
+ v=v,
1096
+ return_softmax_lse=has_context,
1097
+ )
1098
+
1099
+ if has_context:
1100
+ suffix_output, suffix_lse = output
1101
+ context_output, context_lse = self._compute_prefill_context( \
1102
+ q, kv_c_and_k_pe_cache, attn_metadata)
1103
+
1104
+ output = torch.empty_like(suffix_output)
1105
+ merge_attn_states(
1106
+ output=output,
1107
+ prefix_output=context_output,
1108
+ prefix_lse=context_lse,
1109
+ suffix_output=suffix_output,
1110
+ suffix_lse=suffix_lse,
1111
+ )
1112
+
1113
+ # unpad if necessary
1114
+ if self._pad_v:
1115
+ output = output[..., :v.shape[-1]]
1116
+
1117
+ return output.flatten(start_dim=-2)
1118
+
1119
+ @abstractmethod
1120
+ def _forward_decode(
1121
+ self,
1122
+ ql_nope: torch.Tensor,
1123
+ q_pe: torch.Tensor,
1124
+ kv_c_and_k_pe_cache: torch.Tensor,
1125
+ attn_metadata: M,
1126
+ ) -> torch.Tensor:
1127
+ raise NotImplementedError
1128
+
1129
+ def forward(
1130
+ self,
1131
+ layer: AttentionLayer,
1132
+ q: torch.Tensor,
1133
+ k_c_normed: torch.Tensor, # key in unified attn
1134
+ k_pe: torch.Tensor, # value in unified attn
1135
+ kv_cache: torch.Tensor,
1136
+ attn_metadata: M,
1137
+ output: Optional[torch.Tensor] = None,
1138
+ output_scale: Optional[torch.Tensor] = None,
1139
+ ) -> torch.Tensor:
1140
+ assert output is not None, "Output tensor must be provided."
1141
+
1142
+ if output_scale is not None:
1143
+ raise NotImplementedError(
1144
+ "fused output quantization is not yet supported"
1145
+ " for MLACommonImpl")
1146
+
1147
+ if attn_metadata is None:
1148
+ # The zero fill is required when used with DP + EP
1149
+ # to ensure all ranks within a DP group compute the
1150
+ # same expert outputs.
1151
+ return output.fill_(0)
1152
+
1153
+ num_actual_toks = attn_metadata.num_actual_tokens
1154
+
1155
+ # Inputs and outputs may be padded for CUDA graphs
1156
+ output_padded = output
1157
+ output = output[:num_actual_toks, ...]
1158
+ q = q[:num_actual_toks, ...]
1159
+ k_c_normed = k_c_normed[:num_actual_toks, ...]
1160
+ k_pe = k_pe[:num_actual_toks, ...]
1161
+
1162
+ assert attn_metadata.num_decodes is not None and \
1163
+ attn_metadata.num_prefills is not None and \
1164
+ attn_metadata.num_decode_tokens is not None
1165
+
1166
+ has_decode = attn_metadata.num_decodes > 0
1167
+ has_prefill = attn_metadata.num_prefills > 0
1168
+ num_decode_tokens = attn_metadata.num_decode_tokens
1169
+
1170
+ decode_q = q[:num_decode_tokens]
1171
+
1172
+ prefill_q = q[num_decode_tokens:]
1173
+ prefill_k_pe = k_pe[num_decode_tokens:]
1174
+ prefill_k_c_normed = k_c_normed[num_decode_tokens:]
1175
+
1176
+ # write the latent and rope to kv cache
1177
+ if kv_cache.numel() > 0:
1178
+ ops.concat_and_cache_mla(
1179
+ k_c_normed,
1180
+ k_pe.squeeze(1),
1181
+ kv_cache,
1182
+ attn_metadata.slot_mapping.flatten(),
1183
+ kv_cache_dtype=self.kv_cache_dtype,
1184
+ scale=layer._k_scale,
1185
+ )
1186
+
1187
+ if has_prefill:
1188
+ output[num_decode_tokens:] = self._forward_prefill(
1189
+ prefill_q, prefill_k_c_normed, prefill_k_pe, kv_cache,
1190
+ attn_metadata)
1191
+
1192
+ if has_decode:
1193
+ assert attn_metadata.decode is not None
1194
+ decode_q_nope, decode_q_pe = decode_q.split(
1195
+ [self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1)
1196
+ # Convert from (B, N, P) to (N, B, P)
1197
+ decode_q_nope = decode_q_nope.transpose(0, 1)
1198
+ # Multiply (N, B, P) x (N, P, L) -> (N, B, L)
1199
+ decode_ql_nope = torch.bmm(decode_q_nope, self.W_UK_T)
1200
+ # Convert from (N, B, L) to (B, N, L)
1201
+ decode_ql_nope = decode_ql_nope.transpose(0, 1)
1202
+
1203
+ output[:num_decode_tokens] = self._forward_decode(
1204
+ decode_ql_nope, decode_q_pe, kv_cache, attn_metadata)
1205
+
1206
+ return output_padded
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/mla/cutlass_mla.py ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+
4
+ import os
5
+ from typing import ClassVar, Optional
6
+
7
+ import torch
8
+
9
+ import vllm._custom_ops as ops
10
+ from vllm.attention.backends.abstract import (AttentionType,
11
+ is_quantized_kv_cache)
12
+ from vllm.logger import init_logger
13
+ from vllm.v1.attention.backends.mla.common import (MLACommonBackend,
14
+ MLACommonImpl,
15
+ MLACommonMetadata,
16
+ MLACommonMetadataBuilder)
17
+ from vllm.v1.attention.backends.utils import AttentionCGSupport
18
+
19
+ logger = init_logger(__name__)
20
+
21
+
22
+ class CutlassMLAMetadataBuilder(MLACommonMetadataBuilder[MLACommonMetadata]):
23
+ # enable full CUDA Graph support for decode-only capture
24
+ cudagraph_support: ClassVar[
25
+ AttentionCGSupport] = AttentionCGSupport.UNIFORM_SINGLE_TOKEN_DECODE
26
+
27
+
28
+ class CutlassMLABackend(MLACommonBackend):
29
+
30
+ @staticmethod
31
+ def get_name() -> str:
32
+ return "CUTLASS_MLA"
33
+
34
+ @staticmethod
35
+ def get_impl_cls() -> type["CutlassMLAImpl"]:
36
+ return CutlassMLAImpl
37
+
38
+ @staticmethod
39
+ def get_builder_cls() -> type["CutlassMLAMetadataBuilder"]:
40
+ return CutlassMLAMetadataBuilder
41
+
42
+
43
+ class SM100Workspace:
44
+
45
+ def __init__(self, initial_workspace_size):
46
+ self._workspace_buf = torch.empty(initial_workspace_size,
47
+ device="cuda",
48
+ dtype=torch.uint8)
49
+
50
+ self._block_size = 128 # Forced to 128
51
+
52
+ # Pre-compute sm_count to avoid recomputing it. Use device 0 as a proxy
53
+ # (assumes all devices are similar)
54
+ properties = torch.cuda.get_device_properties(torch.device("cuda:0"))
55
+ self._sm_count = properties.multi_processor_count
56
+
57
+ def get_buf(self):
58
+ return self._workspace_buf
59
+
60
+ def ensure_size(self, attn_metadata: MLACommonMetadata,
61
+ num_kv_splits: int):
62
+ batch_size = attn_metadata.num_reqs
63
+ max_seq_len = attn_metadata.max_query_len
64
+
65
+ workspace_size = ops.sm100_cutlass_mla_get_workspace_size(
66
+ max_seq_len * self._block_size,
67
+ batch_size,
68
+ self._sm_count,
69
+ num_kv_splits=num_kv_splits)
70
+
71
+ if self._workspace_buf.shape[0] < workspace_size:
72
+ self._workspace_buf.resize_(workspace_size)
73
+
74
+
75
+ g_sm100_workspace = SM100Workspace(128 * 1024 * 1024) # 128MB
76
+
77
+
78
+ class CutlassMLAImpl(MLACommonImpl[MLACommonMetadata]):
79
+
80
+ def __init__(
81
+ self,
82
+ num_heads: int,
83
+ head_size: int,
84
+ scale: float,
85
+ num_kv_heads: int,
86
+ alibi_slopes: Optional[list[float]],
87
+ sliding_window: Optional[int],
88
+ kv_cache_dtype: str,
89
+ logits_soft_cap: Optional[float],
90
+ attn_type: str,
91
+ kv_sharing_target_layer_name: Optional[str],
92
+ # MLA Specific Arguments
93
+ **mla_args) -> None:
94
+ super().__init__(num_heads, head_size, scale, num_kv_heads,
95
+ alibi_slopes, sliding_window, kv_cache_dtype,
96
+ logits_soft_cap, attn_type,
97
+ kv_sharing_target_layer_name, **mla_args)
98
+
99
+ unsupported_features = [alibi_slopes, sliding_window, logits_soft_cap]
100
+ if any(unsupported_features):
101
+ raise NotImplementedError(
102
+ "CutlassMLAImpl does not support one of the following: "
103
+ "alibi_slopes, sliding_window, logits_soft_cap")
104
+
105
+ if attn_type != AttentionType.DECODER:
106
+ raise NotImplementedError("Encoder self-attention and "
107
+ "encoder/decoder cross-attention "
108
+ "are not implemented for "
109
+ "CutlassMLAImpl")
110
+
111
+ if is_quantized_kv_cache(self.kv_cache_dtype):
112
+ raise NotImplementedError(
113
+ "CutlassMLA V1 with FP8 KV cache not yet supported")
114
+
115
+ self._use_old_cutlass_mla = False
116
+ force_old_cutlass = os.environ.get("FORCE_OLD_CUTLASS_MLA", None)
117
+ if force_old_cutlass:
118
+ logger.warning("Forcing old cutlass mla kernel")
119
+ self._use_old_cutlass_mla = True
120
+
121
+ # TODO: Currently, num_kv_splits is limited to 16 to avoid hanging
122
+ # issues. In case the code hangs, use:
123
+ # FORCE_NUM_KV_SPLITS=1
124
+ force_num_kv_splits = os.environ.get("FORCE_NUM_KV_SPLITS", None)
125
+ if force_num_kv_splits:
126
+ logger.warning("Forcing num_kv_splits to %d",
127
+ int(force_num_kv_splits))
128
+ self._num_kv_splits = int(force_num_kv_splits)
129
+ else:
130
+ self._num_kv_splits = -1 # => Auto-detect
131
+
132
+ # Share workspace buffer across all executions
133
+ self._workspace = g_sm100_workspace
134
+
135
+ def _sm100_cutlass_mla_decode(
136
+ self,
137
+ q_nope: torch.Tensor,
138
+ q_pe: torch.Tensor,
139
+ kv_c_and_k_pe_cache: torch.Tensor,
140
+ seq_lens: torch.Tensor,
141
+ page_table: torch.Tensor,
142
+ workspace: torch.Tensor,
143
+ sm_scale: float,
144
+ num_kv_splits: int,
145
+ ) -> torch.Tensor:
146
+ assert (q_nope.ndim == 3
147
+ ), f"q_nope must be a 3D tensor, but got {q_nope.ndim}"
148
+ assert (
149
+ q_pe.ndim == 3), f"q_pe must be a 3D tensor, but got {q_pe.ndim}"
150
+ assert (
151
+ kv_c_and_k_pe_cache.ndim == 3
152
+ ), "kv_c_and_k_pe_cache must be a 3D tensor, but got {}".format(
153
+ kv_c_and_k_pe_cache.ndim)
154
+
155
+ B_q, H, D_q_nope = q_nope.shape
156
+ B_q_2, H_2, D_q_pe = q_pe.shape
157
+ assert (B_q == B_q_2) and (H == H_2)
158
+
159
+ _, PAGE_SIZE, D_ckv = kv_c_and_k_pe_cache.shape
160
+
161
+ D_latent = 512
162
+ D_rope = 64
163
+ assert D_q_nope == D_latent
164
+ assert D_q_pe == D_rope
165
+ assert D_ckv == D_latent + D_rope
166
+
167
+ MAX_HEADS = 128
168
+ assert H <= MAX_HEADS, f"H must be <= {MAX_HEADS}, but got {H}"
169
+ if H < MAX_HEADS:
170
+ q_nope_padded = q_nope.new_empty((B_q, MAX_HEADS, D_q_nope))
171
+ q_nope_padded[:, :H] = q_nope
172
+ q_nope = q_nope_padded
173
+
174
+ q_pe_padded = q_pe.new_empty((B_q, MAX_HEADS, D_q_pe))
175
+ q_pe_padded[:, :H] = q_pe
176
+ q_pe = q_pe_padded
177
+
178
+ assert len(page_table.shape) == 2
179
+ B_block_table, block_num = page_table.shape
180
+ assert B_block_table == B_q
181
+ assert (block_num
182
+ > 0), f"block num must be greater than 0, got {block_num}"
183
+ assert block_num % (128 / PAGE_SIZE) == 0
184
+
185
+ # TODO(kaixih@nvidia): support fp8
186
+ assert q_nope.dtype in (
187
+ torch.float16,
188
+ torch.bfloat16,
189
+ ), f"q_nope.dtype needs to be fp16 or bf16 but got {q_nope.dtype}."
190
+ assert q_nope.dtype == q_pe.dtype == kv_c_and_k_pe_cache.dtype
191
+ assert (
192
+ seq_lens.dtype == torch.int32
193
+ ), f"seq_lens.dtype needs to be int32 but got {seq_lens.dtype}."
194
+ assert (
195
+ page_table.dtype == torch.int32
196
+ ), f"page_table.dtype needs to be int32 but got {page_table.dtype}."
197
+
198
+ out = q_nope.new_empty((B_q, MAX_HEADS, D_latent))
199
+
200
+ ops.sm100_cutlass_mla_decode(
201
+ out,
202
+ q_nope,
203
+ q_pe,
204
+ kv_c_and_k_pe_cache,
205
+ seq_lens,
206
+ page_table,
207
+ workspace,
208
+ sm_scale,
209
+ num_kv_splits,
210
+ )
211
+ return out[:, :H].contiguous()
212
+
213
+ def _sm100_forward_decode(
214
+ self,
215
+ q_nope: torch.Tensor,
216
+ q_pe: torch.Tensor,
217
+ kv_c_and_k_pe_cache: torch.Tensor,
218
+ attn_metadata: MLACommonMetadata,
219
+ ) -> torch.Tensor:
220
+ assert kv_c_and_k_pe_cache.numel() > 0
221
+ assert attn_metadata.decode is not None
222
+
223
+ if self.kv_cache_dtype.startswith("fp8"):
224
+ raise NotImplementedError("FP8 Cutlass MLA not yet supported")
225
+
226
+ # Adjust workspace size (if necessary)
227
+ self._workspace.ensure_size(attn_metadata, self._num_kv_splits)
228
+
229
+ # Run MLA
230
+ # Clone q_nope and q_pe to make sure strides computation is correct.
231
+ # TODO: Check if we really need it
232
+ q_nope = q_nope.clone()
233
+ q_pe = q_pe.clone()
234
+
235
+ o = self._sm100_cutlass_mla_decode(q_nope, q_pe, kv_c_and_k_pe_cache,
236
+ attn_metadata.decode.seq_lens,
237
+ attn_metadata.decode.block_table,
238
+ self._workspace.get_buf(),
239
+ self.scale, self._num_kv_splits)
240
+
241
+ return self._v_up_proj(o)
242
+
243
+ # TODO: Currently we leave it here only for backup in case something is
244
+ # wrong with the new SM100 CUTLASS MLA kernel
245
+ def _old_forward_decode(
246
+ self,
247
+ q_nope: torch.Tensor,
248
+ q_pe: torch.Tensor,
249
+ kv_c_and_k_pe_cache: torch.Tensor,
250
+ attn_metadata: MLACommonMetadata,
251
+ ) -> torch.Tensor:
252
+ assert kv_c_and_k_pe_cache.numel() > 0
253
+ assert attn_metadata.decode is not None
254
+
255
+ if self.kv_cache_dtype.startswith("fp8"):
256
+ raise NotImplementedError("FP8 Cutlass MLA not yet supported")
257
+
258
+ B = q_nope.shape[0]
259
+
260
+ o = torch.empty((B, self.num_heads, self.kv_lora_rank),
261
+ dtype=q_nope.dtype,
262
+ device=q_nope.device)
263
+
264
+ # Run MLA
265
+ # Clone q_nope and q_pe to make sure strides computation is correct.
266
+ q_nope = q_nope.clone()
267
+ q_pe = q_pe.clone()
268
+
269
+ ops.cutlass_mla_decode(o, q_nope, q_pe, kv_c_and_k_pe_cache,
270
+ attn_metadata.decode.seq_lens,
271
+ attn_metadata.decode.block_table, self.scale)
272
+
273
+ return self._v_up_proj(o)
274
+
275
+ def _forward_decode(
276
+ self,
277
+ q_nope: torch.Tensor,
278
+ q_pe: torch.Tensor,
279
+ kv_c_and_k_pe_cache: torch.Tensor,
280
+ attn_metadata: MLACommonMetadata,
281
+ ) -> torch.Tensor:
282
+ if self._use_old_cutlass_mla:
283
+ # TODO: Remove the old cutlass MLA kernel after more extensive
284
+ # testing
285
+ return self._old_forward_decode(q_nope, q_pe, kv_c_and_k_pe_cache,
286
+ attn_metadata)
287
+
288
+ return self._sm100_forward_decode(q_nope, q_pe, kv_c_and_k_pe_cache,
289
+ attn_metadata)
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/mla/flashmla.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+
4
+ from dataclasses import dataclass
5
+ from typing import ClassVar, Optional
6
+
7
+ import torch
8
+
9
+ from vllm.attention.backends.abstract import (AttentionType,
10
+ is_quantized_kv_cache)
11
+ from vllm.attention.ops.flashmla import (flash_mla_with_kvcache,
12
+ get_mla_metadata,
13
+ is_flashmla_supported)
14
+ from vllm.config import VllmConfig
15
+ from vllm.logger import init_logger
16
+ from vllm.v1.attention.backends.mla.common import (MLACommonBackend,
17
+ MLACommonDecodeMetadata,
18
+ MLACommonImpl,
19
+ MLACommonMetadata,
20
+ MLACommonMetadataBuilder)
21
+ from vllm.v1.attention.backends.utils import AttentionCGSupport
22
+ from vllm.v1.kv_cache_interface import AttentionSpec
23
+
24
+ logger = init_logger(__name__)
25
+
26
+
27
+ class FlashMLABackend(MLACommonBackend):
28
+
29
+ @staticmethod
30
+ def get_name() -> str:
31
+ return "FLASHMLA_VLLM_V1"
32
+
33
+ @staticmethod
34
+ def get_metadata_cls() -> type["FlashMLAMetadata"]:
35
+ return FlashMLAMetadata
36
+
37
+ @staticmethod
38
+ def get_builder_cls() -> type["FlashMLAMetadataBuilder"]:
39
+ return FlashMLAMetadataBuilder
40
+
41
+ @staticmethod
42
+ def get_impl_cls() -> type["FlashMLAImpl"]:
43
+ return FlashMLAImpl
44
+
45
+
46
+ @dataclass
47
+ class FlashMLADecodeMetadata(MLACommonDecodeMetadata):
48
+ tile_scheduler_metadata: torch.Tensor
49
+ num_splits: torch.Tensor
50
+
51
+
52
+ @dataclass
53
+ class FlashMLAMetadata(MLACommonMetadata[FlashMLADecodeMetadata]):
54
+ pass
55
+
56
+
57
+ class FlashMLAMetadataBuilder(MLACommonMetadataBuilder[FlashMLAMetadata]):
58
+ cudagraph_support: ClassVar[AttentionCGSupport] = \
59
+ AttentionCGSupport.UNIFORM_BATCH
60
+
61
+ def __init__(self, kv_cache_spec: AttentionSpec, layer_names: list[str],
62
+ vllm_config: VllmConfig, device: torch.device):
63
+ super().__init__(kv_cache_spec, layer_names, vllm_config, device,
64
+ FlashMLAMetadata)
65
+
66
+ self.compilation_config = vllm_config.compilation_config
67
+ self.num_q_heads = vllm_config.model_config.get_num_attention_heads(
68
+ vllm_config.parallel_config)
69
+
70
+ self.cg_buf_tile_scheduler_metadata = None
71
+ self.cg_buf_num_splits = None
72
+
73
+ device_properties = torch.cuda.get_device_properties(self.device)
74
+ num_sms = device_properties.multi_processor_count
75
+
76
+ if self.compilation_config.cudagraph_mode.has_full_cudagraphs():
77
+ self.cg_buf_tile_scheduler_metadata = torch.zeros(
78
+ # Upper bound on size (<= #SMs, TileSchedulerMetaDataSize)
79
+ # TileSchedulerMetaDataSize = 8
80
+ (num_sms, 8),
81
+ device=self.device,
82
+ dtype=torch.int32,
83
+ )
84
+ self.cg_buf_num_splits = torch.empty(
85
+ (vllm_config.scheduler_config.max_num_seqs + 1),
86
+ device=self.device,
87
+ dtype=torch.int32)
88
+
89
+ def _build_decode(self, block_table_tensor: torch.Tensor,
90
+ seq_lens: torch.Tensor) -> FlashMLADecodeMetadata:
91
+ tile_scheduler_metadata, num_splits = \
92
+ get_mla_metadata(
93
+ seq_lens,
94
+ self.num_q_heads,
95
+ 1, # MQA for the decode path
96
+ )
97
+
98
+ # TODO: we can disambiguate between decode and mixed-prefill decode here
99
+ # so we can only use the persistent buffer if a cudagraph is actually
100
+ # being used.
101
+ if self.compilation_config.cudagraph_mode.has_full_cudagraphs():
102
+ assert self.cg_buf_tile_scheduler_metadata is not None
103
+ assert self.cg_buf_num_splits is not None
104
+
105
+ sm_parts = tile_scheduler_metadata.size(0)
106
+ # Metadata per-SM, upper bound on size (<= #SMs, TileMetadataSize)
107
+ assert sm_parts <= self.cg_buf_tile_scheduler_metadata.size(0)
108
+ tile_scheduler_metadata_view = \
109
+ self.cg_buf_tile_scheduler_metadata[:sm_parts]
110
+ tile_scheduler_metadata_view.copy_(tile_scheduler_metadata)
111
+ tile_scheduler_metadata = tile_scheduler_metadata_view
112
+
113
+ # Num splits is per-batch, varying size (batch_size,)
114
+ n = num_splits.size(0)
115
+ # make sure static buffer is large enough
116
+ assert n <= self.cg_buf_num_splits.size(0)
117
+ num_splits_view = self.cg_buf_num_splits[:n]
118
+ num_splits_view.copy_(num_splits)
119
+ # Num splits needs to monotonically increasing
120
+ # (with: https://github.com/vllm-project/FlashMLA/pull/3, otherwise
121
+ # it needs to monotonically increasing by 1)
122
+ self.cg_buf_num_splits[n:].fill_(num_splits[-1])
123
+ num_splits = num_splits_view
124
+
125
+ return FlashMLADecodeMetadata(
126
+ block_table=block_table_tensor,
127
+ seq_lens=seq_lens,
128
+ tile_scheduler_metadata=tile_scheduler_metadata,
129
+ num_splits=num_splits,
130
+ )
131
+
132
+
133
+ class FlashMLAImpl(MLACommonImpl[FlashMLAMetadata]):
134
+
135
+ def __init__(
136
+ self,
137
+ num_heads: int,
138
+ head_size: int,
139
+ scale: float,
140
+ num_kv_heads: int,
141
+ alibi_slopes: Optional[list[float]],
142
+ sliding_window: Optional[int],
143
+ kv_cache_dtype: str,
144
+ logits_soft_cap: Optional[float],
145
+ attn_type: str,
146
+ kv_sharing_target_layer_name: Optional[str],
147
+ # MLA Specific Arguments
148
+ **mla_args) -> None:
149
+ super().__init__(num_heads, head_size, scale, num_kv_heads,
150
+ alibi_slopes, sliding_window, kv_cache_dtype,
151
+ logits_soft_cap, attn_type,
152
+ kv_sharing_target_layer_name, **mla_args)
153
+
154
+ assert is_flashmla_supported(), \
155
+ "FlashMLA is not supported on this device"
156
+
157
+ unsupported_features = [alibi_slopes, sliding_window, logits_soft_cap]
158
+ if any(unsupported_features):
159
+ raise NotImplementedError(
160
+ "FlashMLAImpl does not support one of the following: "
161
+ "alibi_slopes, sliding_window, logits_soft_cap")
162
+
163
+ if attn_type != AttentionType.DECODER:
164
+ raise NotImplementedError("Encoder self-attention and "
165
+ "encoder/decoder cross-attention "
166
+ "are not implemented for "
167
+ "FlashMLAImpl")
168
+
169
+ if is_quantized_kv_cache(self.kv_cache_dtype):
170
+ raise NotImplementedError(
171
+ "FlashMLA V1 with FP8 KV cache not yet supported")
172
+
173
+ def _forward_decode(
174
+ self,
175
+ q_nope: torch.Tensor,
176
+ q_pe: torch.Tensor,
177
+ kv_c_and_k_pe_cache: torch.Tensor,
178
+ attn_metadata: FlashMLAMetadata,
179
+ ) -> torch.Tensor:
180
+ assert kv_c_and_k_pe_cache.numel() > 0
181
+ assert attn_metadata.decode is not None
182
+
183
+ q = torch.cat([q_nope, q_pe], dim=-1)\
184
+ .unsqueeze(1) # Add seqlen dim of 1 (decode)
185
+
186
+ o, _ = flash_mla_with_kvcache(
187
+ q=q,
188
+ k_cache=kv_c_and_k_pe_cache.unsqueeze(-2), # Add head dim of 1
189
+ block_table=attn_metadata.decode.block_table,
190
+ cache_seqlens=attn_metadata.decode.seq_lens,
191
+ head_dim_v=self.kv_lora_rank,
192
+ tile_scheduler_metadata=attn_metadata.decode.
193
+ tile_scheduler_metadata,
194
+ num_splits=attn_metadata.decode.num_splits,
195
+ softmax_scale=self.scale,
196
+ causal=True,
197
+ )
198
+
199
+ return self._v_up_proj(o)
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/pallas.py ADDED
@@ -0,0 +1,406 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+
4
+ from dataclasses import dataclass
5
+ from typing import Optional
6
+
7
+ import torch
8
+ import torch_xla.core.xla_builder as xb
9
+ import torch_xla.experimental.custom_kernel # noqa: F401
10
+ # Required to register custom ops.
11
+ from torch.library import impl
12
+ from torch_xla._internal.jax_workarounds import requires_jax
13
+ from torch_xla.experimental.custom_kernel import XLA_LIB
14
+
15
+ from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl,
16
+ AttentionLayer, AttentionType)
17
+ from vllm.attention.backends.utils import CommonAttentionState
18
+ from vllm.config import VllmConfig
19
+ from vllm.logger import init_logger
20
+ from vllm.utils import cdiv, next_power_of_2
21
+
22
+ logger = init_logger(__name__)
23
+
24
+ # TPU requires the head size to be a multiple of 128.
25
+ TPU_HEAD_SIZE_ALIGNMENT = 128
26
+
27
+ # Note: TPU can fp8 as storage dtype but doesn't support converting from uint8
28
+ # from to fp32 directly. That's why it has a dtype mapping different from GPU
29
+ TPU_STR_DTYPE_TO_TORCH_DTYPE = {
30
+ "half": torch.half,
31
+ "bfloat16": torch.bfloat16,
32
+ "float": torch.float,
33
+ "fp8": torch.float8_e4m3fn,
34
+ "fp8_e4m3": torch.float8_e4m3fn,
35
+ "fp8_e5m2": torch.float8_e5m2,
36
+ "int8": torch.int8,
37
+ "uint8": torch.uint8,
38
+ }
39
+
40
+
41
+ class PallasAttentionBackend(AttentionBackend):
42
+
43
+ @staticmethod
44
+ def get_name() -> str:
45
+ return "PALLAS_VLLM_V1"
46
+
47
+ @staticmethod
48
+ def get_impl_cls() -> type["PallasAttentionBackendImpl"]:
49
+ return PallasAttentionBackendImpl
50
+
51
+ @staticmethod
52
+ def get_metadata_cls() -> type["PallasMetadata"]:
53
+ return PallasMetadata
54
+
55
+ @staticmethod
56
+ def get_state_cls() -> type["CommonAttentionState"]:
57
+ return CommonAttentionState
58
+
59
+ @staticmethod
60
+ def get_kv_cache_shape(
61
+ num_blocks: int,
62
+ block_size: int,
63
+ num_kv_heads: int,
64
+ head_size: int,
65
+ ) -> tuple[int, ...]:
66
+ padded_head_size = cdiv(
67
+ head_size, TPU_HEAD_SIZE_ALIGNMENT) * TPU_HEAD_SIZE_ALIGNMENT
68
+ return (num_blocks, block_size, num_kv_heads * 2, padded_head_size)
69
+
70
+ @staticmethod
71
+ def swap_blocks(
72
+ src_kv_cache: torch.Tensor,
73
+ dst_kv_cache: torch.Tensor,
74
+ src_to_dst: torch.Tensor,
75
+ ) -> None:
76
+ raise RuntimeError("swap_blocks is not used for the TPU backend.")
77
+
78
+ # In recent TPU generations, up to v6e, the SMEM size is 1MB. The
79
+ # block_tables within the PallasMetadata constitute almost the entire SMEM
80
+ # requirement. Its size is max_num_seqs * num_page_per_seq * 4 (Int). Here
81
+ # we simply make sure that the size is smaller than half of SMEM capacity.
82
+ @staticmethod
83
+ def get_min_page_size(vllm_config: VllmConfig) -> int:
84
+ max_num_page_per_req = (1024 * 1024 // 2 //
85
+ vllm_config.scheduler_config.max_num_seqs // 4)
86
+ min_page_size = cdiv(vllm_config.model_config.max_model_len,
87
+ max_num_page_per_req)
88
+ min_page_size = 1 << (min_page_size - 1).bit_length()
89
+ return min_page_size
90
+
91
+ @staticmethod
92
+ def get_max_num_seqs(model_len: int, page_size: int) -> int:
93
+ num_page_per_req = cdiv(model_len, page_size)
94
+ return 1024 * 1024 // 2 // num_page_per_req // 4
95
+
96
+ # TPU has limited SREGs (scalar registers), if page_size is too small, we
97
+ # can spill SREGs easily which leads to bad performance. The strategy we
98
+ # apply here is trying to split max-model-len to 16 pages which make the
99
+ # spill less likely. Meanwhile we make sure the page size is in [16, 256].
100
+ @staticmethod
101
+ def get_page_size(vllm_config: VllmConfig) -> int:
102
+ # TODO: This is a temporary fix for vmem OOM.
103
+ # For long model length, we use 16 page-size to avoid too much
104
+ # VMEM spill. A more robust solution should be implemented to
105
+ # handle VREG spills.
106
+ if vllm_config.model_config.max_model_len > 8192:
107
+ return 16
108
+ page_size = next_power_of_2(
109
+ vllm_config.model_config.max_model_len) // 16
110
+ if page_size <= 16:
111
+ return 16
112
+ if page_size >= 256:
113
+ return 256
114
+ return page_size
115
+
116
+
117
+ @dataclass
118
+ class PallasMetadata:
119
+ # NOTE(sang): Definition of context_len, query_len, and seq_len.
120
+ # |---------- N-1 iteration --------|
121
+ # |---------------- N iteration ---------------------|
122
+ # |- tokenA -|......................|-- newTokens ---|
123
+ # |---------- context_len ----------|
124
+ # |-------------------- seq_len ---------------------|
125
+ # |-- query_len ---|
126
+
127
+ # Used in the PallasAttentionBackendImpl
128
+ slot_mapping: torch.Tensor
129
+ block_tables: torch.Tensor
130
+ context_lens: torch.Tensor
131
+ query_start_loc: torch.Tensor
132
+ num_seqs: torch.Tensor
133
+ num_kv_update_slices: torch.Tensor
134
+ num_slices_per_kv_cache_update_block: int
135
+
136
+
137
+ class PallasAttentionBackendImpl(AttentionImpl):
138
+
139
+ def __init__(
140
+ self,
141
+ num_heads: int,
142
+ head_size: int,
143
+ scale: float,
144
+ num_kv_heads: int,
145
+ alibi_slopes: Optional[list[float]],
146
+ sliding_window: Optional[int],
147
+ kv_cache_dtype: str,
148
+ logits_soft_cap: Optional[float] = None,
149
+ attn_type: str = AttentionType.DECODER,
150
+ kv_sharing_target_layer_name: Optional[int] = None,
151
+ ) -> None:
152
+ self.num_heads = num_heads
153
+ self.head_size = head_size
154
+ self.scale = float(scale)
155
+ self.num_kv_heads = num_kv_heads
156
+ self.sliding_window = sliding_window
157
+ self.logits_soft_cap = logits_soft_cap
158
+ self.kv_sharing_target_layer_name = kv_sharing_target_layer_name
159
+
160
+ self.num_queries_per_kv = self.num_heads // self.num_kv_heads
161
+ if alibi_slopes is not None:
162
+ raise NotImplementedError("Alibi slopes is not supported.")
163
+
164
+ if attn_type != AttentionType.DECODER:
165
+ raise NotImplementedError("Encoder self-attention and "
166
+ "encoder/decoder cross-attention "
167
+ "are not implemented for "
168
+ "PallasAttentionBackendImpl")
169
+
170
+ self.kv_cache_quantized_dtype = None
171
+ if kv_cache_dtype != "auto":
172
+ self.kv_cache_quantized_dtype = TPU_STR_DTYPE_TO_TORCH_DTYPE.get(
173
+ kv_cache_dtype.lower().strip())
174
+
175
+ def forward(
176
+ self,
177
+ layer: AttentionLayer,
178
+ query: torch.Tensor,
179
+ key: torch.Tensor,
180
+ value: torch.Tensor,
181
+ kv_cache: torch.Tensor,
182
+ attn_metadata: PallasMetadata,
183
+ output: Optional[torch.Tensor] = None,
184
+ output_scale: Optional[torch.Tensor] = None,
185
+ ) -> torch.Tensor:
186
+ """Forward pass with Pallas attention.
187
+
188
+ Args:
189
+ query: shape = [num_tokens, num_heads * head_size]
190
+ key: shape = [num_tokens, num_kv_heads * head_size]
191
+ value: shape = [num_tokens, num_kv_heads * head_size]
192
+ kv_cache = [num_blocks, block_size, num_kv_heads * 2, head_size]
193
+ attn_metadata: Metadata for attention.
194
+ Returns:
195
+ shape = [num_tokens, num_heads * head_size]
196
+ """
197
+ if output_scale is not None:
198
+ raise NotImplementedError(
199
+ "fused output quantization is not yet supported"
200
+ " for PallasAttentionBackendImpl")
201
+
202
+ # For determine_available_memory case.
203
+ if kv_cache.numel() == 0:
204
+ if output is None:
205
+ output = torch.ones_like(query)
206
+ return output
207
+
208
+ num_tokens, hidden_size = query.shape
209
+ query = query.view(num_tokens, self.num_heads, self.head_size)
210
+ key = key.view(-1, self.num_kv_heads, self.head_size)
211
+ value = value.view(-1, self.num_kv_heads, self.head_size)
212
+ if self.head_size % TPU_HEAD_SIZE_ALIGNMENT != 0:
213
+ padded_head_size = cdiv(
214
+ self.head_size,
215
+ TPU_HEAD_SIZE_ALIGNMENT) * TPU_HEAD_SIZE_ALIGNMENT
216
+ query = torch.nn.functional.pad(
217
+ query, (0, padded_head_size - self.head_size), value=0.0)
218
+ key = torch.nn.functional.pad(
219
+ key, (0, padded_head_size - self.head_size), value=0.0)
220
+ value = torch.nn.functional.pad(
221
+ value, (0, padded_head_size - self.head_size), value=0.0)
222
+
223
+ if self.kv_sharing_target_layer_name is None and kv_cache.numel() > 0:
224
+ # Write input keys and values to the KV cache.
225
+ # Skip this if sharing KV cache with an earlier attention layer.
226
+ slot_mapping = attn_metadata.slot_mapping
227
+ write_to_kv_cache(
228
+ key,
229
+ value,
230
+ kv_cache,
231
+ slot_mapping,
232
+ attn_metadata.num_slices_per_kv_cache_update_block,
233
+ attn_metadata.num_kv_update_slices,
234
+ self.kv_cache_quantized_dtype,
235
+ layer._k_scale_float,
236
+ layer._v_scale_float,
237
+ )
238
+
239
+ if self.kv_cache_quantized_dtype is not None and (
240
+ layer._k_scale_float == 0.0 or layer._v_scale_float == 0.0):
241
+ raise ValueError(
242
+ "k_scale_float and v_scale_float must be non-zero")
243
+ output = torch.ops.xla.ragged_paged_attention(
244
+ query,
245
+ kv_cache,
246
+ attn_metadata.context_lens,
247
+ attn_metadata.block_tables,
248
+ attn_metadata.query_start_loc,
249
+ attn_metadata.num_seqs,
250
+ # By default, the system utilizes optimized block size and
251
+ # vmem_limit_bytes parameters from the kernel repository. However,
252
+ # these can be manually adjusted for debugging if necessary.
253
+ num_kv_pages_per_block=None,
254
+ num_queries_per_block=None,
255
+ vmem_limit_bytes=None,
256
+ use_kernel=True,
257
+ sm_scale=self.scale,
258
+ sliding_window=self.sliding_window,
259
+ soft_cap=self.logits_soft_cap,
260
+ k_scale=layer._k_scale_float,
261
+ v_scale=layer._v_scale_float,
262
+ )
263
+
264
+ if self.head_size % TPU_HEAD_SIZE_ALIGNMENT != 0:
265
+ output = output[:, :, :self.head_size]
266
+
267
+ return output.reshape(num_tokens, hidden_size)
268
+
269
+
270
+ def write_to_kv_cache(
271
+ key: torch.Tensor,
272
+ value: torch.Tensor,
273
+ kv_cache: torch.Tensor,
274
+ slot_mapping: torch.Tensor,
275
+ num_slices_per_kv_cache_update_block: int,
276
+ num_kv_update_slices: torch.Tensor,
277
+ kv_cache_quantized_dtype: Optional[torch.dtype] = None,
278
+ k_scale: float = 1.0,
279
+ v_scale: float = 1.0,
280
+ ) -> None:
281
+ """ Write the key and values to the KV cache.
282
+
283
+ Args:
284
+ key: shape = [num_tokens, num_kv_heads, head_size]
285
+ value: shape = [num_tokens, num_kv_heads, head_size]
286
+ kv_cache = [num_blocks, block_size, num_kv_heads * 2, head_size]
287
+ num_slices_per_kv_cache_update_block: int
288
+ """
289
+ _, page_size, num_combined_kv_heads, head_size = kv_cache.shape
290
+ head_size = cdiv(head_size,
291
+ TPU_HEAD_SIZE_ALIGNMENT) * TPU_HEAD_SIZE_ALIGNMENT
292
+
293
+ if kv_cache_quantized_dtype is not None:
294
+ dtype_info = torch.finfo(kv_cache_quantized_dtype)
295
+ key = key.to(torch.float32) / k_scale
296
+ # NOTE: clamp is added here to avoid out of range of quantized dtype
297
+ key = torch.clamp(key, dtype_info.min, dtype_info.max)
298
+ key = key.to(kv_cache_quantized_dtype)
299
+ value = value.to(torch.float32) / v_scale
300
+ value = torch.clamp(value, dtype_info.min, dtype_info.max)
301
+ value = value.to(kv_cache_quantized_dtype)
302
+
303
+ kv = torch.cat([key, value], axis=-1).reshape(-1, num_combined_kv_heads,
304
+ head_size)
305
+
306
+ torch.ops.xla.dynamo_set_buffer_donor_(kv_cache, True)
307
+
308
+ kv_cache = kv_cache.flatten(0, 1)
309
+ new_kv_cache = torch.ops.xla.kv_cache_update_op(
310
+ kv, slot_mapping, kv_cache, num_kv_update_slices, page_size,
311
+ num_slices_per_kv_cache_update_block)
312
+ # NOTE: the in-place copy will be optimized away by XLA compiler.
313
+ kv_cache.copy_(new_kv_cache)
314
+
315
+
316
+ @requires_jax
317
+ def kv_cache_update_op_impl(kv: torch.Tensor, slot_mapping: torch.Tensor,
318
+ kv_cache: torch.Tensor,
319
+ num_kv_update_slices: torch.Tensor, page_size: int,
320
+ num_slices_per_block: int):
321
+ from vllm.attention.ops.pallas_kv_cache_update import kv_cache_update
322
+ new_kv_cache = xb.call_jax(
323
+ kv_cache_update, (kv, slot_mapping, kv_cache, num_kv_update_slices), {
324
+ "page_size": page_size,
325
+ "num_slices_per_block": num_slices_per_block
326
+ })
327
+ return new_kv_cache
328
+
329
+
330
+ XLA_LIB.define(
331
+ "kv_cache_update_op(Tensor kv, Tensor slot_mapping, Tensor kv_cache," \
332
+ "Tensor num_kv_update_slices, int page_size, int num_slices_per_block)" \
333
+ "-> Tensor", )
334
+
335
+
336
+ @impl(XLA_LIB, "kv_cache_update_op", "XLA")
337
+ def kv_cache_update_op_xla(kv: torch.Tensor, slot_mapping: torch.Tensor,
338
+ kv_cache: torch.Tensor,
339
+ num_kv_update_slices: torch.Tensor, page_size: int,
340
+ num_slices_per_block: int) -> torch.Tensor:
341
+ new_kv_cache = kv_cache_update_op_impl(kv, slot_mapping, kv_cache,
342
+ num_kv_update_slices, page_size,
343
+ num_slices_per_block)
344
+ return new_kv_cache
345
+
346
+
347
+ @impl(XLA_LIB, "kv_cache_update_op", "CompositeExplicitAutograd")
348
+ def kv_cache_update_op_non_xla(kv: torch.Tensor, slot_mapping: torch.Tensor,
349
+ kv_cache: torch.Tensor,
350
+ num_kv_update_slices: torch.Tensor,
351
+ page_size: int,
352
+ num_slices_per_block: int) -> torch.Tensor:
353
+ return kv_cache
354
+
355
+
356
+ # We can move this function to a common utils file if it's also useful for other
357
+ # hardware.
358
+ def dtype_bits(dtype: torch.dtype):
359
+ if dtype.is_floating_point:
360
+ try:
361
+ return torch.finfo(dtype).bits
362
+ except TypeError:
363
+ pass
364
+ elif dtype.is_complex:
365
+ if dtype is torch.complex32:
366
+ return 32
367
+ elif dtype is torch.complex64:
368
+ return 64
369
+ elif dtype is torch.complex128:
370
+ return 128
371
+ else:
372
+ try:
373
+ return torch.iinfo(dtype).bits
374
+ # torch.iinfo cannot support int4, int2, bits8...
375
+ except TypeError:
376
+ pass
377
+ str_dtype = str(dtype)
378
+ # support torch.int4, torch.int5, torch.uint5...
379
+ if str_dtype.startswith("torch.int") or str_dtype.startswith("torch.uint"):
380
+ return int(str_dtype[-1])
381
+ raise TypeError(f"Getting the bit width of {dtype} is not supported")
382
+
383
+
384
+ def get_dtype_packing(dtype):
385
+ bits = dtype_bits(dtype)
386
+ if 32 % bits != 0:
387
+ raise ValueError(
388
+ f"The bit width must be divisible by 32, but got bits={bits}, "
389
+ "dtype={dtype}")
390
+ return 32 // bits
391
+
392
+
393
+ def get_page_size_bytes(block_size: int, num_kv_heads: int, head_size: int,
394
+ kv_cache_dtype: torch.dtype) -> int:
395
+ """Returns the size in bytes of one page of the KV cache."""
396
+ padded_head_size = cdiv(head_size,
397
+ TPU_HEAD_SIZE_ALIGNMENT) * TPU_HEAD_SIZE_ALIGNMENT
398
+ num_combined_kv_heads = num_kv_heads * 2
399
+
400
+ # NOTE: for the implicit padding in XLA
401
+ packing = get_dtype_packing(kv_cache_dtype)
402
+ num_combined_kv_heads = cdiv(num_combined_kv_heads, packing) * packing
403
+
404
+ kv_cache_dtype_bits = dtype_bits(kv_cache_dtype)
405
+ return (block_size * num_combined_kv_heads * padded_head_size *
406
+ kv_cache_dtype_bits // 8)
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/rocm_aiter_fa.py ADDED
@@ -0,0 +1,546 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ """Attention layer with AiterFlashAttention."""
4
+ from dataclasses import dataclass
5
+ from typing import ClassVar, Optional
6
+
7
+ import torch
8
+
9
+ from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl,
10
+ AttentionMetadata, AttentionType)
11
+ from vllm.config import VllmConfig
12
+ from vllm.logger import init_logger
13
+ from vllm.platforms import current_platform
14
+ from vllm.v1.attention.backends.utils import (AttentionMetadataBuilder,
15
+ CommonAttentionMetadata)
16
+ from vllm.v1.kv_cache_interface import AttentionSpec
17
+
18
+ _PARTITION_SIZE_ROCM = 256
19
+
20
+ if current_platform.is_rocm():
21
+ import aiter
22
+
23
+ from vllm.triton_utils import tl, triton
24
+ from vllm.utils import direct_register_custom_op
25
+
26
+ @triton.jit
27
+ def _vllm_layout_trans_kernel(
28
+ k_buffer_ptr,
29
+ v_buffer_ptr,
30
+ k_values_ptr,
31
+ v_values_ptr,
32
+ b_query_lens_loc,
33
+ b_seq_lens_loc,
34
+ block_table,
35
+ block_table_stride_0,
36
+ k_scale,
37
+ v_scale,
38
+ output_dtype: tl.constexpr,
39
+ E_DIM: tl.constexpr,
40
+ BLOCK_SIZE: tl.constexpr,
41
+ ):
42
+ batch_idx = tl.program_id(0)
43
+ block_idx = tl.program_id(1)
44
+
45
+ batch_query_indexes = tl.load(b_query_lens_loc + batch_idx +
46
+ tl.arange(0, 2))
47
+ batch_query_start, batch_query_end = tl.split(batch_query_indexes)
48
+ query_len = batch_query_end - batch_query_start
49
+
50
+ if query_len <= 1:
51
+ return
52
+
53
+ batch_token_indexes = tl.load(b_seq_lens_loc + batch_idx +
54
+ tl.arange(0, 2))
55
+ batch_token_start, batch_token_end = tl.split(batch_token_indexes)
56
+ seq_len = batch_token_end - batch_token_start
57
+
58
+ if block_idx * BLOCK_SIZE < seq_len:
59
+ block_mask = (block_idx * BLOCK_SIZE +
60
+ tl.arange(0, BLOCK_SIZE)[:, None]) < seq_len
61
+
62
+ kv_idx = tl.load(block_table + batch_idx * block_table_stride_0 +
63
+ block_idx).to(tl.int64)
64
+
65
+ kv_buffer_off = kv_idx * BLOCK_SIZE * E_DIM + tl.arange(
66
+ 0, BLOCK_SIZE)[:, None] * E_DIM + tl.arange(0, E_DIM)[None, :]
67
+ k_vals = tl.load(k_buffer_ptr + kv_buffer_off,
68
+ mask=block_mask,
69
+ other=0.0)
70
+ if k_vals.dtype.is_fp8():
71
+ k_vals = (k_vals.to(tl.float32) *
72
+ tl.load(k_scale)).to(output_dtype)
73
+ else:
74
+ k_vals = k_vals.to(output_dtype)
75
+
76
+ v_vals = tl.load(v_buffer_ptr + kv_buffer_off,
77
+ mask=block_mask,
78
+ other=0.0)
79
+ if v_vals.dtype.is_fp8():
80
+ v_vals = (v_vals.to(tl.float32) *
81
+ tl.load(v_scale)).to(output_dtype)
82
+ else:
83
+ v_vals = v_vals.to(output_dtype)
84
+ kv_values_off = batch_token_start * E_DIM + \
85
+ block_idx * BLOCK_SIZE * E_DIM + \
86
+ tl.arange(0, BLOCK_SIZE)[:, None] * E_DIM + \
87
+ tl.arange(0, E_DIM)[None, :]
88
+ tl.store(k_values_ptr + kv_values_off, k_vals, mask=block_mask)
89
+ tl.store(v_values_ptr + kv_values_off, v_vals, mask=block_mask)
90
+
91
+ def vllm_layout_trans(b_query_lens_loc, b_seq_lens_loc, block_table,
92
+ k_cache, v_cache, max_seq_len, k_scale, v_scale,
93
+ output_dtype, total_tokens):
94
+ H_KV = v_cache.shape[2]
95
+ D = v_cache.shape[3]
96
+ BLOCK_SIZE = v_cache.shape[1]
97
+
98
+ k_values = torch.empty(
99
+ (total_tokens, H_KV, D),
100
+ dtype=output_dtype,
101
+ device=k_cache.device,
102
+ )
103
+ v_values = torch.empty(
104
+ (total_tokens, H_KV, D),
105
+ dtype=output_dtype,
106
+ device=v_cache.device,
107
+ )
108
+
109
+ grid = (block_table.shape[0],
110
+ (max_seq_len + BLOCK_SIZE - 1) // BLOCK_SIZE)
111
+
112
+ if output_dtype == torch.float16:
113
+ output_dtype = tl.float16
114
+ elif output_dtype == torch.bfloat16:
115
+ output_dtype = tl.bfloat16
116
+ else:
117
+ raise ValueError(f"Unsupported output dtype: {output_dtype}")
118
+
119
+ _vllm_layout_trans_kernel[grid](k_cache,
120
+ v_cache,
121
+ k_values,
122
+ v_values,
123
+ b_query_lens_loc,
124
+ b_seq_lens_loc,
125
+ block_table,
126
+ block_table.stride(0),
127
+ k_scale,
128
+ v_scale,
129
+ output_dtype=output_dtype,
130
+ E_DIM=H_KV * D,
131
+ BLOCK_SIZE=BLOCK_SIZE)
132
+
133
+ return k_values, v_values
134
+
135
+ def flash_attn_varlen_func_impl(
136
+ q: torch.Tensor,
137
+ k_cache: torch.Tensor,
138
+ v_cache: torch.Tensor,
139
+ out: torch.Tensor,
140
+ cu_seqlens_q: torch.Tensor,
141
+ cu_seqlens_k: torch.Tensor,
142
+ max_seqlen_q: int,
143
+ max_seqlen_k: int,
144
+ softmax_scale: float,
145
+ window_size: Optional[list[int]], # -1 means infinite context window
146
+ alibi_slopes: Optional[list[float]],
147
+ block_table: torch.Tensor,
148
+ k_scale: torch.Tensor,
149
+ v_scale: torch.Tensor,
150
+ total_tokens: int = 0,
151
+ ) -> torch.Tensor:
152
+ if total_tokens == 0:
153
+ total_tokens = int(cu_seqlens_k[-1].item())
154
+ k, v = vllm_layout_trans(cu_seqlens_q, cu_seqlens_k, block_table,
155
+ k_cache, v_cache, max_seqlen_k, k_scale,
156
+ v_scale, q.dtype, total_tokens)
157
+
158
+ output = aiter.flash_attn_varlen_func(
159
+ q=q,
160
+ k=k,
161
+ v=v,
162
+ cu_seqlens_q=cu_seqlens_q,
163
+ max_seqlen_q=max_seqlen_q,
164
+ min_seqlen_q=1,
165
+ cu_seqlens_k=cu_seqlens_k,
166
+ max_seqlen_k=max_seqlen_k,
167
+ softmax_scale=softmax_scale,
168
+ causal=True,
169
+ alibi_slopes=alibi_slopes,
170
+ window_size=window_size,
171
+ out=out,
172
+ )
173
+ return output
174
+
175
+ def flash_attn_varlen_func_fake(
176
+ q: torch.Tensor,
177
+ k_cache: torch.Tensor,
178
+ v_cache: torch.Tensor,
179
+ out: torch.Tensor,
180
+ cu_seqlens_q: torch.Tensor,
181
+ cu_seqlens_k: torch.Tensor,
182
+ max_seqlen_q: int,
183
+ max_seqlen_k: int,
184
+ softmax_scale: float,
185
+ window_size: Optional[list[int]], # -1 means infinite context window
186
+ alibi_slopes: Optional[list[float]],
187
+ block_table: torch.Tensor,
188
+ k_scale: torch.Tensor,
189
+ v_scale: torch.Tensor,
190
+ total_tokens: int = 0,
191
+ ) -> torch.Tensor:
192
+ return torch.empty(q.shape[0],
193
+ q.shape[1],
194
+ v_cache.shape[-2],
195
+ dtype=q.dtype,
196
+ device=q.device)
197
+
198
+ direct_register_custom_op("flash_attn_varlen_func",
199
+ flash_attn_varlen_func_impl, ["out"],
200
+ flash_attn_varlen_func_fake,
201
+ dispatch_key=current_platform.dispatch_key)
202
+
203
+ logger = init_logger(__name__)
204
+
205
+
206
+ @dataclass
207
+ class AiterFlashAttentionMetadata:
208
+ # NOTE(sang): Definition of context_len, query_len, and seq_len.
209
+ # |---------- N-1 iteration --------|
210
+ # |---------------- N iteration ---------------------|
211
+ # |- tokenA -|......................|-- newTokens ---|
212
+ # |---------- context_len ----------|
213
+ # |-------------------- seq_len ---------------------|
214
+ # |-- query_len ---|
215
+
216
+ num_actual_tokens: int # Number of tokens excluding padding.
217
+ num_actual_kv_tokens: int
218
+ max_query_len: int
219
+ query_start_loc: torch.Tensor
220
+ max_seq_len: int
221
+ seq_lens: torch.Tensor
222
+ slot_mapping: torch.Tensor
223
+ block_table: torch.Tensor
224
+ cu_seq_lens: Optional[torch.Tensor]
225
+
226
+ # For cascade attention.
227
+ use_cascade: bool
228
+ common_prefix_len: int
229
+ total_tokens: int
230
+
231
+
232
+ class AiterFlashAttentionMetadataBuilder(
233
+ AttentionMetadataBuilder[AiterFlashAttentionMetadata]):
234
+ full_cudagraph_supported: ClassVar[bool] = True
235
+
236
+ def __init__(self, kv_cache_spec: AttentionSpec, layer_names: list[str],
237
+ vllm_config: VllmConfig, device: torch.device):
238
+ self.vllm_config = vllm_config
239
+ self.model_config = vllm_config.model_config
240
+ self.parallel_config = vllm_config.parallel_config
241
+ self.cache_config = vllm_config.cache_config
242
+ self.device = device
243
+
244
+ self.num_heads_q = self.model_config.get_num_attention_heads(
245
+ self.parallel_config)
246
+ self.num_heads_kv = self.model_config.get_num_kv_heads(
247
+ self.parallel_config)
248
+ self.headdim = self.model_config.get_head_size()
249
+ self.block_size = kv_cache_spec.block_size
250
+ self.kv_cache_spec = kv_cache_spec
251
+ # Sliding window size to be used with the AOT scheduler will be
252
+ # populated on first build() call.
253
+ self.aot_sliding_window: Optional[tuple[int, int]] = None
254
+ self.total_tokens: int = 0
255
+
256
+ def build_for_cudagraph_capture(
257
+ self, common_attn_metadata: CommonAttentionMetadata):
258
+ self.total_tokens = self.model_config.max_model_len \
259
+ * self.vllm_config.scheduler_config.max_num_partial_prefills
260
+ res = self.build(common_prefix_len=0,
261
+ common_attn_metadata=common_attn_metadata)
262
+ self.total_tokens = 0
263
+ return res
264
+
265
+ def build(self,
266
+ common_prefix_len: int,
267
+ common_attn_metadata: CommonAttentionMetadata,
268
+ fast_build: bool = False) -> 'AiterFlashAttentionMetadata':
269
+
270
+ num_actual_tokens = common_attn_metadata.num_actual_tokens
271
+ max_query_len = common_attn_metadata.max_query_len
272
+ max_seq_len = int(common_attn_metadata.seq_lens_cpu.max())
273
+ query_start_loc = common_attn_metadata.query_start_loc
274
+ seq_lens = common_attn_metadata.seq_lens
275
+ block_table_tensor = common_attn_metadata.block_table_tensor
276
+ slot_mapping = common_attn_metadata.slot_mapping
277
+ if max_query_len > 1:
278
+ # We pre-compute cumulative seq len needed for prefill attention
279
+ # here to avoid recomputing it for every layer
280
+ cu_seq_lens = torch.zeros(seq_lens.shape[0] + 1,
281
+ dtype=torch.int32,
282
+ device=seq_lens.device)
283
+ torch.cumsum(seq_lens,
284
+ dim=0,
285
+ dtype=cu_seq_lens.dtype,
286
+ out=cu_seq_lens[1:])
287
+ num_actual_kv_tokens = int(cu_seq_lens[-1].item())
288
+ else:
289
+ cu_seq_lens = None
290
+ num_actual_kv_tokens = 0
291
+
292
+ def schedule(batch_size, cu_query_lens, max_query_len, seqlens,
293
+ max_seq_len, causal):
294
+ return None
295
+
296
+ use_cascade = common_prefix_len > 0
297
+
298
+ attn_metadata = AiterFlashAttentionMetadata(
299
+ num_actual_tokens=num_actual_tokens,
300
+ num_actual_kv_tokens=num_actual_kv_tokens,
301
+ max_query_len=max_query_len,
302
+ query_start_loc=query_start_loc,
303
+ max_seq_len=max_seq_len,
304
+ seq_lens=seq_lens,
305
+ block_table=block_table_tensor,
306
+ slot_mapping=slot_mapping,
307
+ cu_seq_lens=cu_seq_lens,
308
+ use_cascade=use_cascade,
309
+ common_prefix_len=common_prefix_len,
310
+ total_tokens=self.total_tokens,
311
+ )
312
+ return attn_metadata
313
+
314
+ def use_cascade_attention(self, *args, **kwargs) -> bool:
315
+ return False
316
+
317
+
318
+ class AiterFlashAttentionBackend(AttentionBackend):
319
+
320
+ accept_output_buffer: bool = True
321
+
322
+ @classmethod
323
+ def get_supported_dtypes(cls) -> list[torch.dtype]:
324
+ return [torch.float16, torch.bfloat16]
325
+
326
+ @classmethod
327
+ def get_supported_head_sizes(cls) -> list[int]:
328
+ return [64, 128, 256]
329
+
330
+ @classmethod
331
+ def validate_head_size(cls, head_size: int) -> None:
332
+ supported_head_sizes = cls.get_supported_head_sizes()
333
+ if head_size not in supported_head_sizes:
334
+ attn_type = cls.__name__.removesuffix("Backend")
335
+ raise ValueError(
336
+ f"Head size {head_size} is not supported by {attn_type}. "
337
+ f"Supported head sizes are: {supported_head_sizes}. "
338
+ "Set VLLM_ATTENTION_BACKEND=FLEX_ATTENTION to use "
339
+ "FlexAttention backend which supports all head sizes.")
340
+
341
+ @staticmethod
342
+ def get_name() -> str:
343
+ return "FLASH_ATTN_VLLM_V1"
344
+
345
+ @staticmethod
346
+ def get_impl_cls() -> type["AiterFlashAttentionImpl"]:
347
+ return AiterFlashAttentionImpl
348
+
349
+ @staticmethod
350
+ def get_metadata_cls() -> type["AttentionMetadata"]:
351
+ return AiterFlashAttentionMetadata
352
+
353
+ @staticmethod
354
+ def get_builder_cls() -> type["AiterFlashAttentionMetadataBuilder"]:
355
+ return AiterFlashAttentionMetadataBuilder
356
+
357
+ @staticmethod
358
+ def get_kv_cache_shape(
359
+ num_blocks: int,
360
+ block_size: int,
361
+ num_kv_heads: int,
362
+ head_size: int,
363
+ ) -> tuple[int, ...]:
364
+ if block_size % 16 != 0:
365
+ raise ValueError("Block size must be a multiple of 16.")
366
+ return (2, num_blocks, block_size, num_kv_heads, head_size)
367
+
368
+
369
+ class AiterFlashAttentionImpl(AttentionImpl):
370
+
371
+ def __init__(
372
+ self,
373
+ num_heads: int,
374
+ head_size: int,
375
+ scale: float,
376
+ num_kv_heads: int,
377
+ alibi_slopes: Optional[list[float]],
378
+ sliding_window: Optional[int],
379
+ kv_cache_dtype: str,
380
+ logits_soft_cap: Optional[float] = None,
381
+ attn_type: AttentionType = AttentionType.DECODER,
382
+ kv_sharing_target_layer_name: Optional[int] = None,
383
+ ) -> None:
384
+ self.num_heads = num_heads
385
+ self.head_size = head_size
386
+ self.scale = float(scale)
387
+ self.num_kv_heads = num_kv_heads
388
+ if alibi_slopes is not None:
389
+ alibi_slopes = torch.tensor(alibi_slopes, dtype=torch.float32)
390
+ self.alibi_slopes = alibi_slopes
391
+ if sliding_window is None:
392
+ self.sliding_window = [-1, -1]
393
+ else:
394
+ self.sliding_window = [sliding_window - 1, 0]
395
+ self.kv_cache_dtype = kv_cache_dtype
396
+ if logits_soft_cap is None:
397
+ # In flash-attn, setting logits_soft_cap as 0 means no soft cap.
398
+ logits_soft_cap = 0.
399
+ self.logits_soft_cap = logits_soft_cap
400
+ self.kv_sharing_target_layer_name = kv_sharing_target_layer_name
401
+
402
+ assert self.num_heads % self.num_kv_heads == 0
403
+ self.num_queries_per_kv = self.num_heads // self.num_kv_heads
404
+
405
+ AiterFlashAttentionBackend.validate_head_size(head_size)
406
+
407
+ if attn_type != AttentionType.DECODER:
408
+ raise NotImplementedError("Encoder self-attention and "
409
+ "encoder/decoder cross-attention "
410
+ "are not implemented for "
411
+ "FlashAttentionImpl")
412
+
413
+ def forward(
414
+ self,
415
+ layer: torch.nn.Module,
416
+ query: torch.Tensor,
417
+ key: torch.Tensor,
418
+ value: torch.Tensor,
419
+ kv_cache: torch.Tensor,
420
+ attn_metadata: AiterFlashAttentionMetadata,
421
+ output: Optional[torch.Tensor] = None,
422
+ output_scale: Optional[torch.Tensor] = None,
423
+ ) -> torch.Tensor:
424
+ """Forward pass with AiterFlashAttention.
425
+
426
+ Args:
427
+ query: shape = [num_tokens, num_heads, head_size]
428
+ key: shape = [num_tokens, num_kv_heads, head_size]
429
+ value: shape = [num_tokens, num_kv_heads, head_size]
430
+ kv_cache = [2, num_blocks, block_size, num_kv_heads, head_size]
431
+ attn_metadata: Metadata for attention.
432
+ Returns:
433
+ shape = [num_tokens, num_heads * head_size]
434
+ NOTE: FP8 quantization, flash-attn expect the size of
435
+ {q,k,v}_descale to be (num_sequences, num_kv_heads).
436
+ We use torch's .expand() to avoid duplicating values
437
+ """
438
+ assert output is not None, "Output tensor must be provided."
439
+
440
+ if output_scale is not None:
441
+ raise NotImplementedError(
442
+ "fused output quantization is not yet supported"
443
+ " for FlashAttentionImpl")
444
+
445
+ if attn_metadata is None:
446
+ # Profiling run.
447
+ return output
448
+
449
+ # IMPORTANT!
450
+ # NOTE(woosuk): With piece-wise CUDA graphs, this method is executed in
451
+ # eager-mode PyTorch. Thus, we need to be careful about any CPU overhead
452
+ # in this method. For example, `view` and `slice` (or `[:n]`) operations
453
+ # are surprisingly slow even in the case they do not invoke any GPU ops.
454
+ # Minimize the PyTorch ops in this method as much as possible.
455
+ # Whenever making a change in this method, please benchmark the
456
+ # performance to make sure it does not introduce any overhead.
457
+
458
+ num_actual_tokens = attn_metadata.num_actual_tokens
459
+ key_cache, value_cache = kv_cache.unbind(0)
460
+ if self.kv_sharing_target_layer_name is None:
461
+ # Reshape the input keys and values and store them in the cache.
462
+ # Skip this if sharing KV cache with an earlier attention layer.
463
+ # NOTE(woosuk): Here, key and value are padded while slot_mapping is
464
+ # not padded. However, we don't need to do key[:num_actual_tokens]
465
+ # and value[:num_actual_tokens] because the reshape_and_cache_flash
466
+ # op uses the slot_mapping's shape to determine the number of
467
+ # actual tokens.
468
+ torch.ops._C_cache_ops.reshape_and_cache_flash(
469
+ key,
470
+ value,
471
+ key_cache,
472
+ value_cache,
473
+ attn_metadata.slot_mapping,
474
+ self.kv_cache_dtype,
475
+ layer._k_scale,
476
+ layer._v_scale,
477
+ )
478
+
479
+ if self.kv_cache_dtype.startswith("fp8"):
480
+ key_cache = key_cache.view(torch.float8_e4m3fnuz)
481
+ value_cache = value_cache.view(torch.float8_e4m3fnuz)
482
+
483
+ if not attn_metadata.use_cascade:
484
+ cu_seqlens_q = attn_metadata.query_start_loc
485
+ seqused_k = attn_metadata.seq_lens
486
+ max_seqlen_q = attn_metadata.max_query_len
487
+ max_seqlen_k = attn_metadata.max_seq_len
488
+ block_table = attn_metadata.block_table
489
+
490
+ if max_seqlen_q > 1:
491
+ torch.ops.vllm.flash_attn_varlen_func(
492
+ query[:num_actual_tokens],
493
+ key_cache,
494
+ value_cache,
495
+ out=output[:num_actual_tokens],
496
+ cu_seqlens_q=cu_seqlens_q,
497
+ max_seqlen_q=max_seqlen_q,
498
+ max_seqlen_k=max_seqlen_k,
499
+ softmax_scale=self.scale,
500
+ alibi_slopes=self.alibi_slopes,
501
+ window_size=self.sliding_window,
502
+ block_table=block_table,
503
+ cu_seqlens_k=attn_metadata.cu_seq_lens,
504
+ k_scale=layer._k_scale,
505
+ v_scale=layer._v_scale,
506
+ total_tokens=attn_metadata.num_actual_kv_tokens,
507
+ )
508
+
509
+ _, num_heads, head_size = query.shape
510
+ nbytes_per_qo_elem = torch.finfo(query.dtype).bits // 8
511
+ num_seqs = seqused_k.shape[0]
512
+ max_num_partitions = (max_seqlen_k + _PARTITION_SIZE_ROCM -
513
+ 1) // _PARTITION_SIZE_ROCM
514
+
515
+ workspace_buffer = torch.empty(
516
+ (num_seqs * num_heads * max_num_partitions * head_size) *
517
+ nbytes_per_qo_elem + 2 *
518
+ (num_seqs * num_heads * max_num_partitions) * 4,
519
+ dtype=torch.uint8,
520
+ device=output.device,
521
+ )
522
+
523
+ torch.ops.aiter.paged_attention_v1(
524
+ output[:num_actual_tokens],
525
+ workspace_buffer,
526
+ query[:num_actual_tokens],
527
+ key_cache,
528
+ value_cache,
529
+ self.scale,
530
+ block_table,
531
+ cu_seqlens_q,
532
+ seqused_k,
533
+ max_seqlen_k,
534
+ self.alibi_slopes,
535
+ self.kv_cache_dtype,
536
+ "NHD",
537
+ self.logits_soft_cap,
538
+ layer._k_scale,
539
+ layer._v_scale,
540
+ None,
541
+ _PARTITION_SIZE_ROCM,
542
+ )
543
+ return output
544
+ else:
545
+ raise NotImplementedError(
546
+ "Cascade attention is not implemented for ROCM AITER")
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/tree_attn.py ADDED
@@ -0,0 +1,447 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ """Attention layer with TreeAttention."""
4
+
5
+ import ast
6
+ from dataclasses import dataclass
7
+ from typing import TYPE_CHECKING, Optional
8
+
9
+ import torch
10
+
11
+ from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl,
12
+ AttentionMetadata, AttentionType)
13
+ from vllm.attention.ops.triton_unified_attention import unified_attention
14
+ from vllm.config import VllmConfig
15
+ from vllm.logger import init_logger
16
+ from vllm.v1.attention.backends.utils import (
17
+ AttentionMetadataBuilder, CommonAttentionMetadata,
18
+ reorder_batch_to_split_decodes_and_prefills, split_decodes_and_prefills)
19
+ from vllm.v1.kv_cache_interface import AttentionSpec
20
+
21
+ if TYPE_CHECKING:
22
+ from vllm.v1.core.sched.output import SchedulerOutput
23
+ from vllm.v1.worker.gpu_input_batch import InputBatch
24
+
25
+ from vllm import _custom_ops as ops
26
+
27
+ logger = init_logger(__name__)
28
+
29
+
30
+ class TreeAttentionBackend(AttentionBackend):
31
+
32
+ accept_output_buffer: bool = True
33
+
34
+ @classmethod
35
+ def get_supported_dtypes(cls) -> list[torch.dtype]:
36
+ return [torch.float16, torch.bfloat16]
37
+
38
+ @classmethod
39
+ def get_supported_head_sizes(cls) -> list[int]:
40
+ return [32, 64, 96, 128, 160, 192, 224, 256]
41
+
42
+ @classmethod
43
+ def validate_head_size(cls, head_size: int) -> None:
44
+ supported_head_sizes = cls.get_supported_head_sizes()
45
+ if head_size not in supported_head_sizes:
46
+ attn_type = cls.__name__.removesuffix("Backend")
47
+ raise ValueError(
48
+ f"Head size {head_size} is not supported by {attn_type}. "
49
+ f"Supported head sizes are: {supported_head_sizes}. "
50
+ "Set VLLM_ATTENTION_BACKEND=FLEX_ATTENTION to use "
51
+ "FlexAttention backend which supports all head sizes.")
52
+
53
+ @staticmethod
54
+ def get_name() -> str:
55
+ return "TREE_ATTN_VLLM_V1"
56
+
57
+ @staticmethod
58
+ def get_impl_cls() -> type["TreeAttentionImpl"]:
59
+ return TreeAttentionImpl
60
+
61
+ @staticmethod
62
+ def get_metadata_cls() -> type["AttentionMetadata"]:
63
+ return TreeAttentionMetadata
64
+
65
+ @staticmethod
66
+ def get_kv_cache_shape(
67
+ num_blocks: int,
68
+ block_size: int,
69
+ num_kv_heads: int,
70
+ head_size: int,
71
+ ) -> tuple[int, ...]:
72
+ if block_size % 16 != 0:
73
+ raise ValueError("Block size must be a multiple of 16.")
74
+ return (2, num_blocks, block_size, num_kv_heads, head_size)
75
+
76
+ @staticmethod
77
+ def get_builder_cls() -> type["TreeAttentionMetadataBuilder"]:
78
+ return TreeAttentionMetadataBuilder
79
+
80
+ @staticmethod
81
+ def use_cascade_attention(*args, **kwargs) -> bool:
82
+ return False
83
+
84
+
85
+ @dataclass
86
+ class TreeAttentionMetadata:
87
+ num_actual_tokens: int # Number of tokens excluding padding.
88
+ max_query_len: int
89
+ query_start_loc: torch.Tensor
90
+ max_seq_len: int
91
+ seq_lens: torch.Tensor
92
+ block_table: torch.Tensor
93
+ slot_mapping: torch.Tensor
94
+
95
+ num_prefill_tokens: int = 0
96
+ num_decode_tokens: int = 0
97
+ num_prefills: int = 0
98
+ num_decodes: int = 0
99
+
100
+ tree_attn_bias: Optional[torch.Tensor] = None
101
+
102
+ # Cached Prefill/decode metadata.
103
+ _cached_prefill_metadata: Optional["TreeAttentionMetadata"] = None
104
+ _cached_decode_metadata: Optional["TreeAttentionMetadata"] = None
105
+
106
+ @property
107
+ def prefill_metadata(self) -> Optional["TreeAttentionMetadata"]:
108
+ if self.num_prefills == 0:
109
+ return None
110
+
111
+ if self._cached_prefill_metadata is not None:
112
+ # Recover cached prefill-phase attention
113
+ # metadata structure
114
+ return self._cached_prefill_metadata
115
+
116
+ q_start_loc = self.query_start_loc[self.num_decodes:]
117
+ q_seqlens = torch.diff(q_start_loc)
118
+ kv_seqlens = self.seq_lens[self.num_decodes:]
119
+ # Construct & cache prefill-phase attention metadata structure
120
+ self._cached_prefill_metadata = TreeAttentionMetadata(
121
+ num_actual_tokens=self.num_prefill_tokens,
122
+ max_query_len=int(q_seqlens.max().item()),
123
+ query_start_loc=q_start_loc - q_start_loc[0],
124
+ max_seq_len=int(kv_seqlens.max().item()),
125
+ seq_lens=kv_seqlens,
126
+ block_table=self.block_table[self.num_decodes:],
127
+ slot_mapping=self.slot_mapping[self.num_decode_tokens:],
128
+ )
129
+ return self._cached_prefill_metadata
130
+
131
+ @property
132
+ def decode_metadata(self) -> Optional["TreeAttentionMetadata"]:
133
+ if self.num_decode_tokens == 0:
134
+ return None
135
+
136
+ if self._cached_decode_metadata is not None:
137
+ # Recover cached decode-phase attention
138
+ # metadata structure
139
+ return self._cached_decode_metadata
140
+
141
+ q_start_loc = self.query_start_loc[:self.num_decodes + 1]
142
+ q_seqlens = torch.diff(q_start_loc)
143
+ kv_seqlens = self.seq_lens[:self.num_decodes]
144
+ # Construct & cache decode-phase attention metadata structure
145
+ self._cached_decode_metadata = TreeAttentionMetadata(
146
+ num_actual_tokens=self.num_decode_tokens,
147
+ max_query_len=int(q_seqlens.max().item()),
148
+ query_start_loc=q_start_loc,
149
+ max_seq_len=int(kv_seqlens.max().item()),
150
+ seq_lens=kv_seqlens,
151
+ block_table=self.block_table[:self.num_decodes],
152
+ slot_mapping=self.slot_mapping[:self.num_decode_tokens],
153
+ tree_attn_bias=self.tree_attn_bias,
154
+ )
155
+ return self._cached_decode_metadata
156
+
157
+
158
+ class TreeAttentionMetadataBuilder(
159
+ AttentionMetadataBuilder[TreeAttentionMetadata]):
160
+
161
+ def __init__(
162
+ self,
163
+ kv_cache_spec: AttentionSpec,
164
+ layer_names: list[str],
165
+ vllm_config: VllmConfig,
166
+ device: torch.device,
167
+ ):
168
+ self.kv_cache_spec = kv_cache_spec
169
+ self.block_size = kv_cache_spec.block_size
170
+
171
+ spec_config = vllm_config.speculative_config
172
+ spec_token_tree = (spec := spec_config) and spec.speculative_token_tree
173
+ tree_choices: list[tuple[int,
174
+ ...]] = (ast.literal_eval(spec_token_tree)
175
+ if spec_token_tree is not None else
176
+ [(0, )])
177
+ # Construct the tree attention bias.
178
+ depth_counts = _get_depth_counts(tree_choices)
179
+ self.tree_attn_bias = _prepare_tree_attn_bias(
180
+ tree_choices,
181
+ depth_counts,
182
+ dtype=torch.float32,
183
+ device=device,
184
+ )
185
+
186
+ def reorder_batch(self, input_batch: "InputBatch",
187
+ scheduler_output: "SchedulerOutput") -> bool:
188
+ return reorder_batch_to_split_decodes_and_prefills(
189
+ input_batch,
190
+ scheduler_output,
191
+ decode_threshold=self.tree_attn_bias.shape[0])
192
+
193
+ def build(
194
+ self,
195
+ common_prefix_len: int,
196
+ common_attn_metadata: CommonAttentionMetadata,
197
+ fast_build: bool = False,
198
+ ) -> TreeAttentionMetadata:
199
+ decode_threshold = self.tree_attn_bias.shape[0]
200
+ num_decodes, num_prefills, num_decode_tokens, num_prefill_tokens = (
201
+ split_decodes_and_prefills(common_attn_metadata,
202
+ decode_threshold=decode_threshold))
203
+
204
+ num_actual_tokens = common_attn_metadata.num_actual_tokens
205
+ q_start_loc = common_attn_metadata.query_start_loc
206
+ max_query_len = common_attn_metadata.max_query_len
207
+ kv_seqlens = common_attn_metadata.seq_lens
208
+ max_seq_len = int(common_attn_metadata.seq_lens_cpu.max())
209
+ block_table = common_attn_metadata.block_table_tensor
210
+ slot_mapping = common_attn_metadata.slot_mapping
211
+
212
+ return TreeAttentionMetadata(
213
+ num_actual_tokens=num_actual_tokens,
214
+ num_prefill_tokens=num_prefill_tokens,
215
+ num_decode_tokens=num_decode_tokens,
216
+ num_prefills=num_prefills,
217
+ num_decodes=num_decodes,
218
+ max_query_len=max_query_len,
219
+ query_start_loc=q_start_loc,
220
+ max_seq_len=max_seq_len,
221
+ seq_lens=kv_seqlens,
222
+ block_table=block_table,
223
+ slot_mapping=slot_mapping,
224
+ tree_attn_bias=self.tree_attn_bias,
225
+ )
226
+
227
+ def build_for_drafting(
228
+ self,
229
+ common_attn_metadata: CommonAttentionMetadata,
230
+ draft_index: int,
231
+ ) -> TreeAttentionMetadata:
232
+ # Cache the original tree attention bias.
233
+ orig_tree_attn_bias = self.tree_attn_bias
234
+
235
+ if draft_index == 0:
236
+ # Use prefill for drafting at the root level.
237
+ self.tree_attn_bias = torch.empty(0)
238
+ else:
239
+ # Slice the tree attention bias for drafting. Exclude
240
+ # the root level.
241
+ start, end = 1, 1 + common_attn_metadata.max_query_len
242
+ self.tree_attn_bias = self.tree_attn_bias[start:end,
243
+ start:end].contiguous()
244
+
245
+ # Build attention bias.
246
+ attn_metadata = self.build(0, common_attn_metadata, fast_build=True)
247
+
248
+ # Reset the tree attention bias to the original value.
249
+ self.tree_attn_bias = orig_tree_attn_bias
250
+ return attn_metadata
251
+
252
+
253
+ def _get_depth_counts(sorted_tree_choices: list[tuple[int, ...]]) -> list[int]:
254
+ # Count the number of choices at each depth of the tree.
255
+ depth_counts = []
256
+ prev_depth = 0
257
+ for path in sorted_tree_choices:
258
+ depth = len(path)
259
+ if depth != prev_depth:
260
+ depth_counts.append(0)
261
+ depth_counts[depth - 1] += 1
262
+ prev_depth = depth
263
+ return depth_counts
264
+
265
+
266
+ def _prepare_tree_attn_bias(
267
+ sorted_tree_choices: list[tuple[int, ...]],
268
+ depth_counts: list[int],
269
+ dtype: Optional[torch.dtype],
270
+ device: Optional[torch.device],
271
+ ) -> torch.Tensor:
272
+ # +1 comes from the additional root node.
273
+ tree_len = len(sorted_tree_choices) + 1
274
+ tree_attn_mask = torch.full((tree_len, tree_len),
275
+ -torch.inf,
276
+ device=device,
277
+ dtype=dtype)
278
+
279
+ # Set diagonal to all zeros. Each token should
280
+ # attend to itself.
281
+ mask_val = 0
282
+ for i in range(tree_len):
283
+ tree_attn_mask[i, i] = mask_val
284
+
285
+ # Set root to all zeros. All tokens attend to it.
286
+ tree_attn_mask[:, 0] = mask_val
287
+
288
+ # Set all ancestors to zeros.
289
+ start = 0
290
+ for i in range(len(depth_counts)):
291
+ for j in range(depth_counts[i]):
292
+ cur_tree_choice = sorted_tree_choices[start + j]
293
+ # Retrieve ancestor position.
294
+ if len(cur_tree_choice) == 1:
295
+ continue
296
+ ancestor_idx = []
297
+ for c in range(len(cur_tree_choice) - 1):
298
+ ancestor_idx.append(
299
+ sorted_tree_choices.index(cur_tree_choice[:c + 1]) + 1)
300
+ tree_attn_mask[j + start + 1, ancestor_idx] = mask_val
301
+ start += depth_counts[i]
302
+ return tree_attn_mask
303
+
304
+
305
+ class TreeAttentionImpl(AttentionImpl):
306
+
307
+ def __init__(
308
+ self,
309
+ num_heads: int,
310
+ head_size: int,
311
+ scale: float,
312
+ num_kv_heads: int,
313
+ alibi_slopes: Optional[list[float]],
314
+ sliding_window: Optional[int],
315
+ kv_cache_dtype: str,
316
+ logits_soft_cap: Optional[float] = None,
317
+ attn_type: AttentionType = AttentionType.DECODER,
318
+ kv_sharing_target_layer_name: Optional[str] = None,
319
+ ) -> None:
320
+ self.num_heads = num_heads
321
+ self.head_size = head_size
322
+ self.scale = float(scale)
323
+ self.num_kv_heads = num_kv_heads
324
+ self.num_queries_per_kv = self.num_heads // self.num_kv_heads
325
+ self.kv_cache_dtype = kv_cache_dtype
326
+ self.kv_sharing_target_layer_name = kv_sharing_target_layer_name
327
+ if alibi_slopes is not None:
328
+ alibi_slopes = torch.tensor(alibi_slopes, dtype=torch.float32)
329
+ self.alibi_slopes = alibi_slopes
330
+ if logits_soft_cap is None:
331
+ # Setting logits_soft_cap to 0 means no soft cap.
332
+ logits_soft_cap = 0
333
+ self.logits_soft_cap = logits_soft_cap
334
+ if sliding_window is None:
335
+ self.sliding_window = (-1, -1)
336
+ else:
337
+ self.sliding_window = (sliding_window - 1, 0)
338
+
339
+ TreeAttentionBackend.validate_head_size(head_size)
340
+
341
+ if attn_type != AttentionType.DECODER:
342
+ raise NotImplementedError("Encoder self-attention and "
343
+ "encoder/decoder cross-attention "
344
+ "are not implemented for "
345
+ "TreeAttentionImpl.")
346
+
347
+ def forward(
348
+ self,
349
+ layer: torch.nn.Module,
350
+ query: torch.Tensor,
351
+ key: torch.Tensor,
352
+ value: torch.Tensor,
353
+ kv_cache: torch.Tensor,
354
+ attn_metadata: TreeAttentionMetadata,
355
+ output: Optional[torch.Tensor] = None,
356
+ output_scale: Optional[torch.Tensor] = None,
357
+ ) -> torch.Tensor:
358
+ """Forward pass with TreeAttention.
359
+
360
+ Args:
361
+ query: shape = [num_tokens, num_heads, head_size]
362
+ key: shape = [num_tokens, num_kv_heads, head_size]
363
+ value: shape = [num_tokens, num_kv_heads, head_size]
364
+ kv_cache = [2, num_blocks, block_size, num_kv_heads, head_size]
365
+ attn_metadata: Metadata for attention.
366
+ Returns:
367
+ shape = [num_tokens, num_heads * head_size]
368
+ """
369
+ assert output is not None, "Output tensor must be provided."
370
+
371
+ if output_scale is not None:
372
+ raise NotImplementedError(
373
+ "fused output quantization is not yet supported"
374
+ " for TreeAttentionImpl")
375
+
376
+ if attn_metadata is None:
377
+ # Profiling run.
378
+ return output
379
+
380
+ # Cache the input KVs.
381
+ key_cache, value_cache = kv_cache.unbind(0)
382
+ if self.kv_sharing_target_layer_name is None:
383
+ # Reshape the input keys and values and store them in the cache.
384
+ # Skip this if sharing KV cache with an earlier attention layer.
385
+ # NOTE(woosuk): Here, key and value are padded while slot_mapping is
386
+ # not padded. However, we don't need to do key[:num_actual_tokens]
387
+ # and value[:num_actual_tokens] because the reshape_and_cache_flash
388
+ # op uses the slot_mapping's shape to determine the number of
389
+ # actual tokens.
390
+ ops.reshape_and_cache_flash(
391
+ key,
392
+ value,
393
+ key_cache,
394
+ value_cache,
395
+ attn_metadata.slot_mapping,
396
+ self.kv_cache_dtype,
397
+ layer._k_scale,
398
+ layer._v_scale,
399
+ )
400
+
401
+ num_actual_tokens = attn_metadata.num_actual_tokens
402
+ num_decode_tokens = attn_metadata.num_decode_tokens
403
+ descale_shape = (attn_metadata.query_start_loc.shape[0] - 1,
404
+ key.shape[1])
405
+ if prefill_meta := attn_metadata.prefill_metadata:
406
+ unified_attention(
407
+ q=query[num_decode_tokens:num_actual_tokens],
408
+ k=key_cache,
409
+ v=value_cache,
410
+ out=output[num_decode_tokens:num_actual_tokens],
411
+ cu_seqlens_q=prefill_meta.query_start_loc,
412
+ max_seqlen_q=prefill_meta.max_query_len,
413
+ seqused_k=prefill_meta.seq_lens,
414
+ max_seqlen_k=prefill_meta.max_seq_len,
415
+ softmax_scale=self.scale,
416
+ causal=True,
417
+ alibi_slopes=self.alibi_slopes,
418
+ window_size=self.sliding_window,
419
+ block_table=prefill_meta.block_table,
420
+ softcap=self.logits_soft_cap,
421
+ q_descale=None, # Not supported
422
+ k_descale=layer._k_scale.expand(descale_shape),
423
+ v_descale=layer._v_scale.expand(descale_shape),
424
+ )
425
+
426
+ if decode_meta := attn_metadata.decode_metadata:
427
+ unified_attention(
428
+ q=query[:num_decode_tokens],
429
+ k=key_cache,
430
+ v=value_cache,
431
+ out=output[:num_decode_tokens],
432
+ cu_seqlens_q=decode_meta.query_start_loc,
433
+ max_seqlen_q=decode_meta.max_query_len,
434
+ seqused_k=decode_meta.seq_lens,
435
+ max_seqlen_k=decode_meta.max_seq_len,
436
+ softmax_scale=self.scale,
437
+ causal=True,
438
+ alibi_slopes=self.alibi_slopes,
439
+ qq_bias=decode_meta.tree_attn_bias,
440
+ window_size=self.sliding_window,
441
+ block_table=decode_meta.block_table,
442
+ softcap=self.logits_soft_cap,
443
+ q_descale=None, # Not supported
444
+ k_descale=layer._k_scale.expand(descale_shape),
445
+ v_descale=layer._v_scale.expand(descale_shape),
446
+ )
447
+ return output
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/triton_attn.py ADDED
@@ -0,0 +1,417 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ """Attention layer with PagedAttention and Triton prefix prefill."""
4
+ from dataclasses import dataclass
5
+ from functools import cache
6
+ from typing import ClassVar, Optional
7
+
8
+ import torch
9
+
10
+ from vllm import _custom_ops as ops
11
+ from vllm import envs
12
+ from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl,
13
+ AttentionMetadata, AttentionType)
14
+ from vllm.attention.ops.chunked_prefill_paged_decode import (
15
+ chunked_prefill_paged_decode)
16
+ from vllm.attention.ops.paged_attn import PagedAttention
17
+ from vllm.config import VllmConfig
18
+ from vllm.logger import init_logger
19
+ from vllm.platforms import current_platform
20
+ from vllm.v1.attention.backends.flash_attn import FlashAttentionMetadata
21
+ from vllm.v1.attention.backends.utils import (AttentionCGSupport,
22
+ AttentionMetadataBuilder,
23
+ CommonAttentionMetadata)
24
+ from vllm.v1.kv_cache_interface import AttentionSpec
25
+
26
+ logger = init_logger(__name__)
27
+
28
+
29
+ @dataclass
30
+ class TritonAttentionMetadata:
31
+ # NOTE(sang): Definition of context_len, query_len, and seq_len.
32
+ # |---------- N-1 iteration --------|
33
+ # |---------------- N iteration ---------------------|
34
+ # |- tokenA -|......................|-- newTokens ---|
35
+ # |---------- context_len ----------|
36
+ # |-------------------- seq_len ---------------------|
37
+ # |-- query_len ---|
38
+
39
+ num_actual_tokens: int # Number of tokens excluding padding.
40
+ max_query_len: int
41
+ query_start_loc: torch.Tensor
42
+ max_seq_len: int
43
+ seq_lens: torch.Tensor
44
+ block_table: torch.Tensor
45
+ slot_mapping: torch.Tensor
46
+
47
+ # For cascade attention.
48
+ use_cascade: bool
49
+ common_prefix_len: int
50
+ cu_prefix_query_lens: Optional[torch.Tensor]
51
+ prefix_kv_lens: Optional[torch.Tensor]
52
+ suffix_kv_lens: Optional[torch.Tensor]
53
+
54
+ # Optional aot scheduling
55
+ scheduler_metadata: Optional[torch.Tensor] = None
56
+ prefix_scheduler_metadata: Optional[torch.Tensor] = None
57
+
58
+
59
+ class TritonAttentionMetadataBuilder(
60
+ AttentionMetadataBuilder[TritonAttentionMetadata]):
61
+ cudagraph_support: ClassVar[AttentionCGSupport] = AttentionCGSupport.ALWAYS
62
+
63
+ def __init__(self, kv_cache_spec: AttentionSpec, layer_names: list[str],
64
+ vllm_config: VllmConfig, device: torch.device):
65
+ self.device = device
66
+ self.block_size = kv_cache_spec.block_size
67
+ self.kv_cache_spec = kv_cache_spec
68
+
69
+ model_config = vllm_config.model_config
70
+ self.num_heads_q = model_config.get_num_attention_heads(
71
+ vllm_config.parallel_config)
72
+ self.num_heads_kv = model_config.get_num_kv_heads(
73
+ vllm_config.parallel_config)
74
+ self.headdim = model_config.get_head_size()
75
+
76
+ def build_for_cudagraph_capture(
77
+ self, common_attn_metadata: CommonAttentionMetadata
78
+ ) -> TritonAttentionMetadata:
79
+ attn_metadata = self.build(0, common_attn_metadata)
80
+ # When doing full graph capture, setting seq_lens to
81
+ # max_model_len will cause graph capture to be extremely
82
+ # slow, so here we set it to 1.
83
+ attn_metadata.seq_lens.fill_(1)
84
+ return attn_metadata
85
+
86
+ def build(self,
87
+ common_prefix_len: int,
88
+ common_attn_metadata: CommonAttentionMetadata,
89
+ fast_build: bool = False) -> TritonAttentionMetadata:
90
+ num_actual_tokens = common_attn_metadata.num_actual_tokens
91
+ max_query_len = common_attn_metadata.max_query_len
92
+
93
+ max_seq_len = int(common_attn_metadata.seq_lens_cpu.max())
94
+ query_start_loc = common_attn_metadata.query_start_loc
95
+ seq_lens = common_attn_metadata.seq_lens
96
+ block_table_tensor = common_attn_metadata.block_table_tensor
97
+ slot_mapping = common_attn_metadata.slot_mapping
98
+
99
+ use_cascade = common_prefix_len > 0
100
+
101
+ if use_cascade:
102
+ cu_prefix_query_lens = torch.tensor([0, num_actual_tokens],
103
+ dtype=torch.int32,
104
+ device=self.device)
105
+ prefix_kv_lens = torch.tensor([common_prefix_len],
106
+ dtype=torch.int32,
107
+ device=self.device)
108
+ suffix_kv_lens = (common_attn_metadata.seq_lens_cpu -
109
+ common_prefix_len)
110
+ suffix_kv_lens = suffix_kv_lens.to(self.device)
111
+ else:
112
+ cu_prefix_query_lens = None
113
+ prefix_kv_lens = None
114
+ suffix_kv_lens = None
115
+ prefix_scheduler_metadata = None
116
+
117
+ attn_metadata = TritonAttentionMetadata(
118
+ num_actual_tokens=num_actual_tokens,
119
+ max_query_len=max_query_len,
120
+ query_start_loc=query_start_loc,
121
+ max_seq_len=max_seq_len,
122
+ seq_lens=seq_lens,
123
+ block_table=block_table_tensor,
124
+ slot_mapping=slot_mapping,
125
+ use_cascade=use_cascade,
126
+ common_prefix_len=common_prefix_len,
127
+ cu_prefix_query_lens=cu_prefix_query_lens,
128
+ prefix_kv_lens=prefix_kv_lens,
129
+ suffix_kv_lens=suffix_kv_lens,
130
+ prefix_scheduler_metadata=prefix_scheduler_metadata,
131
+ )
132
+ return attn_metadata
133
+
134
+
135
+ class TritonAttentionBackend(AttentionBackend):
136
+
137
+ accept_output_buffer: bool = True
138
+
139
+ @classmethod
140
+ def get_supported_dtypes(cls) -> list[torch.dtype]:
141
+ return [torch.float16, torch.bfloat16]
142
+
143
+ @classmethod
144
+ def get_supported_head_sizes(cls) -> list[int]:
145
+ return [32, 64, 96, 128, 160, 192, 224, 256]
146
+
147
+ @classmethod
148
+ def validate_head_size(cls, head_size: int) -> None:
149
+ supported_head_sizes = cls.get_supported_head_sizes()
150
+ if head_size not in supported_head_sizes:
151
+ attn_type = cls.__name__.removesuffix("Backend")
152
+ raise ValueError(
153
+ f"Head size {head_size} is not supported by {attn_type}. "
154
+ f"Supported head sizes are: {supported_head_sizes}. "
155
+ "Set VLLM_ATTENTION_BACKEND=FLEX_ATTENTION to use "
156
+ "FlexAttention backend which supports all head sizes.")
157
+
158
+ @staticmethod
159
+ def get_name() -> str:
160
+ return "TRITON_ATTN_VLLM_V1"
161
+
162
+ @staticmethod
163
+ def get_impl_cls() -> type["TritonAttentionImpl"]:
164
+ return TritonAttentionImpl
165
+
166
+ @staticmethod
167
+ def get_metadata_cls() -> type["AttentionMetadata"]:
168
+ return TritonAttentionMetadata
169
+
170
+ @staticmethod
171
+ def get_kv_cache_shape(
172
+ num_blocks: int,
173
+ block_size: int,
174
+ num_kv_heads: int,
175
+ head_size: int,
176
+ ) -> tuple[int, ...]:
177
+ if block_size % 16 != 0:
178
+ raise ValueError("Block size must be a multiple of 16.")
179
+ return (2, num_blocks, block_size, num_kv_heads, head_size)
180
+
181
+ @staticmethod
182
+ def use_cascade_attention(*args, **kwargs) -> bool:
183
+ return False
184
+
185
+ @staticmethod
186
+ def get_builder_cls() -> type["TritonAttentionMetadataBuilder"]:
187
+ return TritonAttentionMetadataBuilder
188
+
189
+
190
+ @cache
191
+ def use_aiter_unified_attention() -> bool:
192
+ """Check if aiter unified attention should be used."""
193
+ # VLLM_ROCM_USE_AITER_MHA needs to set to 0 as well as it is set
194
+ # to 1 as default
195
+ return envs.VLLM_ROCM_USE_AITER \
196
+ and envs.VLLM_USE_AITER_UNIFIED_ATTENTION
197
+
198
+
199
+ class TritonAttentionImpl(AttentionImpl):
200
+
201
+ def __init__(
202
+ self,
203
+ num_heads: int,
204
+ head_size: int,
205
+ scale: float,
206
+ num_kv_heads: int,
207
+ alibi_slopes: Optional[list[float]],
208
+ sliding_window: Optional[int],
209
+ kv_cache_dtype: str,
210
+ logits_soft_cap: Optional[float] = None,
211
+ attn_type: AttentionType = AttentionType.DECODER,
212
+ kv_sharing_target_layer_name: Optional[int] = None,
213
+ sinks: Optional[torch.Tensor] = None,
214
+ ) -> None:
215
+ self.num_heads = num_heads
216
+ self.head_size = head_size
217
+ self.scale = float(scale)
218
+ self.num_kv_heads = num_kv_heads
219
+ if alibi_slopes is not None:
220
+ alibi_slopes = torch.tensor(alibi_slopes, dtype=torch.float32)
221
+ self.alibi_slopes = alibi_slopes
222
+ if sliding_window is None:
223
+ self.sliding_window = (-1, -1)
224
+ else:
225
+ self.sliding_window = (sliding_window - 1, 0)
226
+ self.kv_cache_dtype = kv_cache_dtype
227
+ if logits_soft_cap is None:
228
+ # In flash-attn, setting logits_soft_cap as 0 means no soft cap.
229
+ logits_soft_cap = 0
230
+ self.logits_soft_cap = logits_soft_cap
231
+ self.kv_sharing_target_layer_name = kv_sharing_target_layer_name
232
+
233
+ self.num_queries_per_kv = self.num_heads // self.num_kv_heads
234
+
235
+ TritonAttentionBackend.validate_head_size(head_size)
236
+
237
+ if attn_type != AttentionType.DECODER:
238
+ raise NotImplementedError("Encoder self-attention and "
239
+ "encoder/decoder cross-attention "
240
+ "are not implemented for "
241
+ "TritonAttentionImpl")
242
+
243
+ self.fp8_dtype = current_platform.fp8_dtype()
244
+ self.force_prefill_decode_attn = \
245
+ envs.VLLM_V1_USE_PREFILL_DECODE_ATTENTION
246
+
247
+ if not self.force_prefill_decode_attn:
248
+ # If not using prefill decode attention, we use the Triton
249
+ # unified attention implementation.
250
+ if use_aiter_unified_attention():
251
+ logger.info_once(
252
+ "Using aiter unified attention for TritonAttentionImpl")
253
+ from aiter.ops.triton.unified_attention import (
254
+ unified_attention)
255
+ self.unified_attention = unified_attention
256
+ else:
257
+ logger.info_once(
258
+ "Using vllm unified attention for TritonAttentionImpl")
259
+ from vllm.attention.ops.triton_unified_attention import (
260
+ unified_attention)
261
+ self.unified_attention = unified_attention
262
+
263
+ self.sinks = sinks
264
+ if sinks is not None:
265
+ assert sinks.shape[0] == num_heads, (
266
+ "Sinks must have the same number of heads as the number of "
267
+ f"heads in the layer. Sinks shape: {sinks.shape}, "
268
+ f"num_heads: {num_heads}.")
269
+
270
+ def forward(
271
+ self,
272
+ layer: torch.nn.Module,
273
+ query: torch.Tensor,
274
+ key: torch.Tensor,
275
+ value: torch.Tensor,
276
+ kv_cache: torch.Tensor,
277
+ attn_metadata: FlashAttentionMetadata,
278
+ output: Optional[torch.Tensor] = None,
279
+ output_scale: Optional[torch.Tensor] = None,
280
+ ) -> torch.Tensor:
281
+ """Forward pass with FlashAttention.
282
+
283
+ Args:
284
+ query: shape = [num_tokens, num_heads, head_size]
285
+ key: shape = [num_tokens, num_kv_heads, head_size]
286
+ value: shape = [num_tokens, num_kv_heads, head_size]
287
+ kv_cache = [2, num_blocks, block_size, num_kv_heads, head_size]
288
+ attn_metadata: Metadata for attention.
289
+ Returns:
290
+ shape = [num_tokens, num_heads * head_size]
291
+ """
292
+ assert output is not None, "Output tensor must be provided."
293
+
294
+ if output_scale is not None:
295
+ raise NotImplementedError(
296
+ "fused output quantization is not yet supported"
297
+ " for TritonAttentionImpl")
298
+
299
+ if attn_metadata is None:
300
+ # Profiling run.
301
+ return output
302
+
303
+ assert attn_metadata.use_cascade is False
304
+
305
+ # IMPORTANT!
306
+ # NOTE(woosuk): With piece-wise CUDA graphs, this method is executed in
307
+ # eager-mode PyTorch. Thus, we need to be careful about any CPU overhead
308
+ # in this method. For example, `view` and `slice` (or `[:n]`) operations
309
+ # are surprisingly slow even in the case they do not invoke any GPU ops.
310
+ # Minimize the PyTorch ops in this method as much as possible.
311
+ # Whenever making a change in this method, please benchmark the
312
+ # performance to make sure it does not introduce any overhead.
313
+
314
+ use_prefill_decode_attn = self.force_prefill_decode_attn
315
+ num_actual_tokens = attn_metadata.num_actual_tokens
316
+
317
+ if use_prefill_decode_attn:
318
+ key_cache, value_cache = PagedAttention.split_kv_cache(
319
+ kv_cache, self.num_kv_heads, self.head_size)
320
+ else:
321
+ key_cache, value_cache = kv_cache.unbind(0)
322
+
323
+ if self.kv_sharing_target_layer_name is None:
324
+ # Reshape the input keys and values and store them in the cache.
325
+ # Skip this if sharing KV cache with an earlier attention layer.
326
+ if use_prefill_decode_attn:
327
+ PagedAttention.write_to_paged_cache(
328
+ key,
329
+ value,
330
+ key_cache,
331
+ value_cache,
332
+ attn_metadata.slot_mapping,
333
+ self.kv_cache_dtype,
334
+ layer._k_scale,
335
+ layer._v_scale,
336
+ )
337
+ else:
338
+ torch.ops._C_cache_ops.reshape_and_cache_flash(
339
+ key,
340
+ value,
341
+ key_cache,
342
+ value_cache,
343
+ attn_metadata.slot_mapping,
344
+ self.kv_cache_dtype,
345
+ layer._k_scale,
346
+ layer._v_scale,
347
+ )
348
+
349
+ if self.kv_cache_dtype.startswith("fp8"):
350
+ key_cache = key_cache.view(self.fp8_dtype)
351
+ value_cache = value_cache.view(self.fp8_dtype)
352
+ num_tokens, num_heads, head_size = query.shape
353
+ assert layer._q_scale == 1.0, \
354
+ "A non 1.0 q_scale is not currently supported."
355
+ if not current_platform.is_rocm():
356
+ # Skip Q quantization on ROCm, since dequantizing back to
357
+ # f32 in the attention kernel is not supported.
358
+ query, _ = ops.scaled_fp8_quant(
359
+ query.reshape(
360
+ (num_tokens, num_heads * head_size)).contiguous(),
361
+ layer._q_scale)
362
+ query = query.reshape((num_tokens, num_heads, head_size))
363
+
364
+ cu_seqlens_q = attn_metadata.query_start_loc
365
+ seqused_k = attn_metadata.seq_lens
366
+ max_seqlen_q = attn_metadata.max_query_len
367
+ max_seqlen_k = attn_metadata.max_seq_len
368
+ block_table = attn_metadata.block_table
369
+
370
+ if use_prefill_decode_attn:
371
+ # Compute attention and update output up to `num_actual_tokens`.
372
+ chunked_prefill_paged_decode(
373
+ query=query[:num_actual_tokens],
374
+ key=key[:num_actual_tokens],
375
+ value=value[:num_actual_tokens],
376
+ output=output[:num_actual_tokens],
377
+ kv_cache_dtype=self.kv_cache_dtype,
378
+ key_cache=key_cache,
379
+ value_cache=value_cache,
380
+ block_table=block_table,
381
+ query_start_loc=cu_seqlens_q,
382
+ seq_lens=seqused_k,
383
+ max_seq_len=max_seqlen_k,
384
+ max_query_len=max_seqlen_q,
385
+ k_scale=layer._k_scale,
386
+ v_scale=layer._v_scale,
387
+ alibi_slopes=self.alibi_slopes,
388
+ sliding_window=self.sliding_window[0],
389
+ sm_scale=self.scale,
390
+ sinks=self.sinks,
391
+ )
392
+
393
+ else:
394
+ descale_shape = (cu_seqlens_q.shape[0] - 1, key.shape[1])
395
+
396
+ self.unified_attention(
397
+ q=query[:num_actual_tokens],
398
+ k=key_cache,
399
+ v=value_cache,
400
+ out=output[:num_actual_tokens],
401
+ cu_seqlens_q=cu_seqlens_q,
402
+ max_seqlen_q=max_seqlen_q,
403
+ seqused_k=seqused_k,
404
+ max_seqlen_k=max_seqlen_k,
405
+ softmax_scale=self.scale,
406
+ causal=True,
407
+ alibi_slopes=self.alibi_slopes,
408
+ window_size=self.sliding_window,
409
+ block_table=block_table,
410
+ softcap=self.logits_soft_cap,
411
+ q_descale=None, # Not supported
412
+ k_descale=layer._k_scale.expand(descale_shape),
413
+ v_descale=layer._v_scale.expand(descale_shape),
414
+ sinks=self.sinks,
415
+ )
416
+
417
+ return output
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/utils.py ADDED
@@ -0,0 +1,715 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ import abc
4
+ import enum
5
+ import functools
6
+ from abc import abstractmethod
7
+ from dataclasses import dataclass, make_dataclass
8
+ from typing import (TYPE_CHECKING, Any, Callable, ClassVar, Generic, Optional,
9
+ TypeVar)
10
+
11
+ import numpy as np
12
+ import torch
13
+
14
+ from vllm.config import VllmConfig, get_layers_from_vllm_config
15
+ from vllm.utils import cdiv
16
+
17
+ if TYPE_CHECKING:
18
+ from vllm.attention.backends.abstract import AttentionImpl
19
+ from vllm.v1.core.sched.output import SchedulerOutput
20
+ from vllm.v1.worker.gpu_input_batch import InputBatch
21
+
22
+ import vllm.envs as envs
23
+ from vllm.attention.backends.abstract import AttentionBackend
24
+ from vllm.attention.layer import Attention
25
+ from vllm.distributed.kv_transfer.kv_connector.utils import (
26
+ get_kv_connector_cache_layout)
27
+ from vllm.logger import init_logger
28
+ from vllm.v1.kv_cache_interface import AttentionSpec
29
+
30
+ logger = init_logger(__name__)
31
+ _KV_CACHE_LAYOUT_OVERRIDE = None
32
+
33
+
34
+ @dataclass
35
+ class CommonAttentionMetadata:
36
+ """
37
+ Per-batch attention metadata, shared across layers and backends.
38
+ AttentionMetadataBuilder instances use it to construct per-layer metadata.
39
+
40
+ For many of the tensors we keep both GPU and CPU versions.
41
+ """
42
+
43
+ query_start_loc: torch.Tensor
44
+ query_start_loc_cpu: torch.Tensor
45
+ """(batch_size + 1,), the start location of each request in query Tensor"""
46
+
47
+ seq_lens: torch.Tensor
48
+ seq_lens_cpu: torch.Tensor
49
+ """(batch_size,), the length of each request including both computed tokens
50
+ and newly scheduled tokens"""
51
+
52
+ num_computed_tokens_cpu: torch.Tensor
53
+ """(batch_size,), the number of computed tokens for each request"""
54
+
55
+ num_reqs: int
56
+ """Number of requests"""
57
+ num_actual_tokens: int
58
+ """Total number of tokens in batch"""
59
+ max_query_len: int
60
+ """Longest query in batch"""
61
+
62
+ block_table_tensor: torch.Tensor
63
+ slot_mapping: torch.Tensor
64
+
65
+ causal: bool = True
66
+
67
+
68
+ @dataclass
69
+ class UbatchSlice:
70
+ request_slice: slice
71
+ token_slice: slice
72
+
73
+
74
+ def slice_query_start_locs(
75
+ query_start_loc: torch.Tensor,
76
+ request_slice: slice,
77
+ ) -> torch.Tensor:
78
+ """
79
+ Creates a new query_start_loc that corresponds to the requests in
80
+ request_slice.
81
+
82
+ Note: This function creates a new tensor to hold the new query_start_locs.
83
+ This will break cudagraph compatibility.
84
+ """
85
+ return query_start_loc[request_slice.start: request_slice.stop + 1] -\
86
+ query_start_loc[request_slice.start]
87
+
88
+
89
+ def _make_metadata_with_slice(
90
+ ubatch_slice: UbatchSlice,
91
+ attn_metadata: CommonAttentionMetadata) -> CommonAttentionMetadata:
92
+ """
93
+ This function creates a new CommonAttentionMetadata that corresponds to
94
+ the requests included in ubatch_slice
95
+ """
96
+
97
+ request_slice = ubatch_slice.request_slice
98
+ token_slice = ubatch_slice.token_slice
99
+
100
+ query_start_loc = slice_query_start_locs(attn_metadata.query_start_loc,
101
+ request_slice)
102
+ assert len(query_start_loc) >= 2, (
103
+ f"query_start_loc must have at least 2 elements, "
104
+ f"got {len(query_start_loc)}")
105
+ query_start_loc_cpu = slice_query_start_locs(
106
+ attn_metadata.query_start_loc_cpu, request_slice)
107
+
108
+ seq_lens = attn_metadata.seq_lens[request_slice]
109
+ seq_lens_cpu = attn_metadata.seq_lens_cpu[request_slice]
110
+ num_computed_tokens_cpu = attn_metadata.num_computed_tokens_cpu[
111
+ request_slice]
112
+
113
+ num_requests = request_slice.stop - request_slice.start
114
+ num_actual_tokens = token_slice.stop - token_slice.start
115
+ max_query_len = int(
116
+ torch.max(torch.abs(query_start_loc_cpu[1:] -
117
+ query_start_loc_cpu[:-1])).item())
118
+
119
+ block_table_tensor = attn_metadata.block_table_tensor[request_slice]
120
+ slot_mapping = attn_metadata.slot_mapping[token_slice]
121
+
122
+ return CommonAttentionMetadata(
123
+ query_start_loc=query_start_loc,
124
+ query_start_loc_cpu=query_start_loc_cpu,
125
+ seq_lens=seq_lens,
126
+ seq_lens_cpu=seq_lens_cpu,
127
+ num_computed_tokens_cpu=num_computed_tokens_cpu,
128
+ num_reqs=num_requests,
129
+ num_actual_tokens=num_actual_tokens,
130
+ max_query_len=max_query_len,
131
+ block_table_tensor=block_table_tensor,
132
+ slot_mapping=slot_mapping,
133
+ )
134
+
135
+
136
+ def split_attn_metadata(
137
+ ubatch_slices: list[UbatchSlice],
138
+ common_attn_metadata: CommonAttentionMetadata,
139
+ ) -> list[CommonAttentionMetadata]:
140
+ """
141
+ Creates a new CommonAttentionMetadata instance that corresponds to the
142
+ requests for each UbatchSlice in ubatch_slices.
143
+
144
+ Note: This function does not modify common_attn_metadata
145
+ """
146
+ results = []
147
+ for ubatch_slice in ubatch_slices:
148
+ results.append(
149
+ _make_metadata_with_slice(ubatch_slice, common_attn_metadata))
150
+ return results
151
+
152
+
153
+ M = TypeVar("M")
154
+
155
+
156
+ class AttentionCGSupport(enum.Enum):
157
+ """ Constants for the cudagraph support of the attention backend
158
+ Here we do not consider the cascade attention, as currently
159
+ it is never cudagraph supported."""
160
+
161
+ ALWAYS = 3
162
+ """Cudagraph always supported; supports mixed-prefill-decode"""
163
+ UNIFORM_BATCH = 2
164
+ """Cudagraph supported for batches the only contain query lengths that are
165
+ the same, this can be used for spec-decode
166
+ i.e. "decodes" are 1 + num_speculative_tokens"""
167
+ UNIFORM_SINGLE_TOKEN_DECODE = 1
168
+ """Cudagraph supported for batches the only contain query_len==1 decodes"""
169
+ NEVER = 0
170
+ """NO cudagraph support"""
171
+
172
+
173
+ class AttentionMetadataBuilder(abc.ABC, Generic[M]):
174
+ # Does this backend/builder support CUDA Graphs for attention (default: no).
175
+ cudagraph_support: ClassVar[AttentionCGSupport] = \
176
+ AttentionCGSupport.NEVER
177
+ # Does this backend/builder reorder the batch?
178
+ # If not, set this to None. Otherwise set it to the query
179
+ # length that will be pulled into the front of the batch.
180
+ reorder_batch_threshold: ClassVar[Optional[int]] = None
181
+
182
+ @abstractmethod
183
+ def __init__(self, kv_cache_spec: AttentionSpec, layer_names: list[str],
184
+ vllm_config: VllmConfig, device: torch.device):
185
+ self.kv_cache_spec = kv_cache_spec
186
+
187
+ @abstractmethod
188
+ def build(self,
189
+ common_prefix_len: int,
190
+ common_attn_metadata: CommonAttentionMetadata,
191
+ fast_build: bool = False) -> M:
192
+ """
193
+ Central method that builds attention metadata.
194
+ Some builders (MLA) require reorder_batch to be called prior to build.
195
+
196
+ Args:
197
+ common_prefix_len: The length of the common prefix of the batch.
198
+ common_attn_metadata: The common attention metadata.
199
+ fast_build: The meta-data will prioritize speed of building over
200
+ then speed at execution. Can be used for spec-decode where the
201
+ result of a build call may only be used for few layers/iters.
202
+ """
203
+ raise NotImplementedError
204
+
205
+ def build_for_cudagraph_capture(
206
+ self, common_attn_metadata: CommonAttentionMetadata) -> M:
207
+ """
208
+ Build attention metadata for CUDA graph capture. Uses build by default.
209
+ Subclasses that override this method should call self.build or
210
+ super().build_for_cudagraph_capture.
211
+ """
212
+ return self.build(common_prefix_len=0,
213
+ common_attn_metadata=common_attn_metadata)
214
+
215
+ def build_for_drafting(
216
+ self,
217
+ common_attn_metadata: CommonAttentionMetadata,
218
+ draft_index: int,
219
+ ) -> M:
220
+ """
221
+ Build attention metadata for draft model. Uses build by default.
222
+
223
+ Args:
224
+ common_attn_metadata: The common attention metadata.
225
+ draft_index: The index of the current draft operation.
226
+ When speculating a chain of tokens, this index refers to the
227
+ draft attempt for the i-th token.
228
+ For tree-based attention, this index instead refers to the
229
+ draft attempt for the i-th level in the tree of tokens.
230
+ """
231
+ return self.build(common_prefix_len=0,
232
+ common_attn_metadata=common_attn_metadata,
233
+ fast_build=True)
234
+
235
+ def use_cascade_attention(
236
+ self,
237
+ common_prefix_len: int,
238
+ query_lens: np.ndarray,
239
+ num_query_heads: int,
240
+ num_kv_heads: int,
241
+ use_alibi: bool,
242
+ use_sliding_window: bool,
243
+ use_local_attention: bool,
244
+ num_sms: int,
245
+ ) -> bool:
246
+ return False
247
+
248
+
249
+ @functools.lru_cache
250
+ def get_kv_cache_layout():
251
+ # Format specified by the code.
252
+ global _KV_CACHE_LAYOUT_OVERRIDE
253
+
254
+ if _KV_CACHE_LAYOUT_OVERRIDE is not None:
255
+ cache_layout = _KV_CACHE_LAYOUT_OVERRIDE
256
+ logger.info_once("`_KV_CACHE_LAYOUT_OVERRIDE` variable detected. " \
257
+ "Setting KV cache layout to %s.", cache_layout)
258
+ return cache_layout
259
+
260
+ # Format specified by the user.
261
+ cache_layout = envs.VLLM_KV_CACHE_LAYOUT
262
+ # When neither the user nor the override specified a layout, get default
263
+ if cache_layout is None:
264
+ cache_layout = get_kv_connector_cache_layout()
265
+ else:
266
+ logger.info_once("`VLLM_KV_CACHE_LAYOUT` environment variable " \
267
+ "detected. Setting KV cache layout to %s.", cache_layout)
268
+ return cache_layout
269
+
270
+
271
+ def set_kv_cache_layout(cache_layout: str):
272
+ global _KV_CACHE_LAYOUT_OVERRIDE
273
+ _KV_CACHE_LAYOUT_OVERRIDE = cache_layout
274
+
275
+
276
+ @dataclass
277
+ class PerLayerParameters:
278
+ """
279
+ Currently, FlashInfer backend only support models in which all layers share
280
+ the same values for the following hyperparameters. Should not be used for
281
+ trtllm-gen backend since it supports different values for the following
282
+ hyperparameters.
283
+ """
284
+
285
+ window_left: int
286
+ logits_soft_cap: Optional[float]
287
+ sm_scale: float
288
+ has_sinks: bool = False
289
+
290
+
291
+ def get_per_layer_parameters(
292
+ vllm_config: VllmConfig, layer_names: list[str],
293
+ cls_: type['AttentionImpl']) -> dict[str, PerLayerParameters]:
294
+ """
295
+ Scan layers in `layer_names` and determine some hyperparameters
296
+ to use during `plan`.
297
+ """
298
+
299
+ layers = get_layers_from_vllm_config(vllm_config, Attention, layer_names)
300
+ per_layer_params: dict[str, PerLayerParameters] = {}
301
+
302
+ for key, layer in layers.items():
303
+ impl = layer.impl
304
+ assert isinstance(impl, cls_)
305
+
306
+ # Infer hyperparameters from the attention layer
307
+ window_size = getattr(impl, "sliding_window", None)
308
+ window_left = window_size[0] if window_size is not None else -1
309
+ logits_soft_cap = getattr(impl, "logits_soft_cap", None)
310
+ sm_scale = impl.scale
311
+ has_sinks = getattr(impl, "sinks", None) is not None
312
+
313
+ per_layer_params[key] = PerLayerParameters(window_left,
314
+ logits_soft_cap, sm_scale,
315
+ has_sinks)
316
+
317
+ return per_layer_params
318
+
319
+
320
+ def infer_global_hyperparameters(
321
+ per_layer_params: dict[str, PerLayerParameters]) -> PerLayerParameters:
322
+ """
323
+ Currently, FlashInfer backend other than trtllm-gen
324
+ only support models in which all layers share
325
+ the same values for the following hyperparameters:
326
+ - `window_left`
327
+ - `logits_soft_cap`
328
+ - `sm_scale`
329
+
330
+ So this function asserts that all layers share the same values for these
331
+ hyperparameters and returns the global values.
332
+ """
333
+
334
+ assert len(per_layer_params) > 0, "No attention layers found in the model."
335
+
336
+ param_sets = list(per_layer_params.values())
337
+ global_params = param_sets[0]
338
+
339
+ # trtllm attention doesn't need global hyper params so disable the check
340
+ if not envs.VLLM_USE_TRTLLM_ATTENTION:
341
+ for params in param_sets:
342
+ if params.window_left != global_params.window_left:
343
+ raise ValueError(
344
+ "Window left is not the same for all layers. " \
345
+ "One potential fix is to set disable_sliding_window=True")
346
+ assert params == global_params, (
347
+ "FlashInfer backend currently only supports models in which all"
348
+ "layers share the same values "
349
+ "for the following hyperparameters:"
350
+ "`window_left`, `logits_soft_cap`, `sm_scale`.")
351
+
352
+ return global_params
353
+
354
+
355
+ #
356
+ # Take in `query_start_loc_np` and `seq_lens_np` and break the sequences into
357
+ # local attention blocks, where each block is passed to the attention kernel
358
+ # as an independent local ("virtual") batch item.
359
+ #
360
+ # For example, if are performing a chunked prefill a batch of 3 sequences:
361
+ # q_seqlens = [4, 10, 5]
362
+ # kv_seqlens = [6, 17, 9]
363
+ # Then normally for regular attention we would compute with an attention mask
364
+ # for batch idx 0 (q_seqlens = 4, kv_seqlens = 6) like:
365
+ # batch idx: 0 (q_seqlens = 4, kv_seqlens = 6)
366
+ # k_toks > 0 1 2 3 4 5
367
+ # q_toks v _____________
368
+ # 0 | 1 1 1
369
+ # 1 | 1 1 1 1
370
+ # 2 | 1 1 1 1 1
371
+ # 3 | 1 1 1 1 1 1
372
+ #
373
+ # for local attention (with attn_chunk_size = 4) we would compute with an
374
+ # attention mask like:
375
+ # batch idx: 0 (q_seqlens = 4, kv_seqlens = 6, attn_chunk_size = 4)
376
+ # k_toks > 0 1 2 3 4 5
377
+ # q_toks v _____________
378
+ # 0 | 1 1 1
379
+ # 1 | 1 1 1 1
380
+ # 2 | 1
381
+ # 3 | 1 1
382
+ #
383
+ # We can simulate this mask using standard flash-attention by breaking the
384
+ # sequences into local ("virtual") batches, where each local batch item is a
385
+ # local attention block, so in this case batch idx 0 would be broken up into:
386
+ #
387
+ # local-batch idx: 0 (q_seqlens = 2, kv_seqlens = 4) (batch 0)
388
+ # k_toks > 0 1 2 3
389
+ # q_toks v _____________
390
+ # 0 | 1 1 1
391
+ # 1 | 1 1 1 1
392
+ # local-batch idx: 1 (q_seqlens = 2, kv_seqlens = 2) (batch 0)
393
+ # k_toks > 4 5
394
+ # q_toks v _____________
395
+ # 2 | 1
396
+ # 3 | 1 1
397
+ #
398
+ # e.g. if we have:
399
+ # attn_chunk_size = 4
400
+ # query_start_loc_np = [0, 4, 14, 19] (q_seqlens = [4, 10, 5])
401
+ # Then this function would return:
402
+ # __b0__ ______b1______ __b2__ < orig batch indices
403
+ # q_seqlens_local = [ 2, 2, 1, 4, 4, 1, 4, 1]
404
+ # cu_seqlens_q_local = [0, 4, 6, 10, 14, 18, 19, 23, 24]
405
+ # seqlens_k_local = [ 4, 2, 4, 4, 4, 1, 4, 1]
406
+ # block_table_local : shape[local_virtual_batches, pages_per_local_batch]
407
+ def make_local_attention_virtual_batches(
408
+ attn_chunk_size: int,
409
+ common_attn_metadata: CommonAttentionMetadata,
410
+ block_size: int = 0,
411
+ ) -> CommonAttentionMetadata:
412
+ query_start_loc_np = common_attn_metadata.query_start_loc_cpu.numpy()
413
+ seq_lens_np = common_attn_metadata.seq_lens_cpu.numpy()
414
+ block_table = common_attn_metadata.block_table_tensor
415
+ device = common_attn_metadata.query_start_loc.device
416
+
417
+ q_seqlens = query_start_loc_np[1:] - query_start_loc_np[:-1]
418
+ actual_batch_size = seq_lens_np.shape[0]
419
+
420
+ # Handle if we are starting in the middle of a local attention block,
421
+ # we assume q_seqlens > 0 (for all elements), for each batch idx we compute
422
+ # the number of tokens that are not in the first local attention block and
423
+ # then we can simply use a cdiv for the rest.
424
+ # For example if we have:
425
+ # attn_chunk_size = 4
426
+ # q_seqlens = [4, 10, 5]
427
+ # k_seqlens = [6, 17, 9]
428
+ # Then we would get:
429
+ # new_tokens_in_first_block = [2, 1, 4]
430
+ # local_blocks = [2, 4, 2]
431
+ q_tokens_in_first_block = np.minimum(
432
+ attn_chunk_size - ((seq_lens_np - q_seqlens) % attn_chunk_size),
433
+ q_seqlens).astype(np.int32)
434
+ tokens_in_last_block = attn_chunk_size + (seq_lens_np % -attn_chunk_size)
435
+ local_blocks = 1 + cdiv(q_seqlens - q_tokens_in_first_block,
436
+ attn_chunk_size)
437
+
438
+ # Once we know the number of local blocks we can compute the request spans
439
+ # for each batch idx, we can figure out the number of "virtual" requests we
440
+ # have to make,
441
+ # For the above example we would get:
442
+ # seqlens_q_local = [2, 2, 1, 4, 4, 1, 4, 1]
443
+ #
444
+ # First Get batched arange. (E.g., [2, 4, 2] -> [0, 1, 0, 1, 2, 3, 0, 1])
445
+ # (TODO: max a utility to share this code with _prepare_inputs)
446
+ # arange step 1. [2, 4, 2] -> [2, 6, 8]
447
+ cu_num_blocks = np.cumsum(local_blocks)
448
+ virtual_batches = cu_num_blocks[-1]
449
+ # arange step 2. [2, 6, 8] -> [0, 0, 2, 2, 2, 2, 6, 6]
450
+ block_offsets = np.repeat(cu_num_blocks - local_blocks, local_blocks)
451
+ # arange step 3. [0, 1, 0, 1, 2, 3, 0, 1]
452
+ arange = np.arange(virtual_batches, dtype=np.int32) - block_offsets
453
+ # also compute reverse arange (i.e. [1, 0, 3, 2, 1, 0, 1, 0])
454
+ rarange = np.repeat(local_blocks, local_blocks) - arange - 1
455
+ # Then we can compute the seqlens_q_local, handling the fact that the
456
+ # first and last blocks could be partial
457
+ seqlens_q_local = \
458
+ np.repeat(q_seqlens - q_tokens_in_first_block, local_blocks)
459
+ # set the first block since this may be a partial block
460
+ seqlens_q_local[arange == 0] = q_tokens_in_first_block
461
+ # set the remaining blocks
462
+ seqlens_q_local[arange > 0] = np.minimum(
463
+ seqlens_q_local - attn_chunk_size * (arange - 1),
464
+ attn_chunk_size)[arange > 0]
465
+
466
+ # convert from q_seqlens to cu_seqlens_q
467
+ cu_seqlens_q_local = np.pad(np.cumsum(seqlens_q_local), (1, 0))\
468
+ .astype(np.int32)
469
+
470
+ # compute the seqlens_k_local,
471
+ # basically a full local attention block for all but the last block in each
472
+ # batch
473
+ # For our example this will be:
474
+ # seqlens_k_local = [4, 2, 4, 4, 4, 1, 4, 1]
475
+ seqlens_k_local = np.full(cu_num_blocks[-1],
476
+ attn_chunk_size,
477
+ dtype=np.int32)
478
+ seqlens_k_local[cu_num_blocks - 1] = tokens_in_last_block
479
+ num_computed_tokens_local = seqlens_k_local - seqlens_q_local
480
+
481
+ k_seqstarts_absolute = np.repeat(seq_lens_np, local_blocks) - \
482
+ (rarange * attn_chunk_size + \
483
+ np.repeat(tokens_in_last_block, local_blocks))
484
+ # For the example the local attention blocks start at:
485
+ # _b0_ _____b1_____ _b2_
486
+ # k_seqstarts_absolute = [0, 4, 4, 8, 12, 16, 4, 8]
487
+ block_starts = k_seqstarts_absolute // block_size
488
+ assert attn_chunk_size % block_size == 0, \
489
+ f"attn_chunk_size {attn_chunk_size} is not " \
490
+ f"divisible by block_size {block_size}"
491
+ pages_per_local_batch = attn_chunk_size // block_size
492
+
493
+ # Create a block_table for the local attention blocks
494
+ # For out example if we have a block-table like (assuming block_size=2):
495
+ # block_table = [
496
+ # [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], < batch 0
497
+ # [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], < batch 1
498
+ # [20, 21, 22, 23, 24, 25, 26, 27, 28, 29], < batch 2
499
+ # ]
500
+ # Then for the local batches we would want a block-table like
501
+ # block_table_local = [
502
+ # [ 0, 1 ], < local-batch 0, (batch 0, starting from k[0])
503
+ # [ 2, 3 ], < local-batch 1, (batch 0, starting from k[4])
504
+ # [ 12, 13 ], < local-batch 2, (batch 1, starting from k[4])
505
+ # [ 14, 15 ], < local-batch 3, (batch 1, starting from k[8])
506
+ # [ 16, 17 ], < local-batch 4, (batch 1, starting from k[12])
507
+ # [ 18, 19 ], < local-batch 5, (batch 1, starting from k[16])
508
+ # [ 22, 23 ], < local-batch 6, (batch 2, starting from k[4])
509
+ # [ 24, 25 ], < local-batch 7, (batch 2, starting from k[8])
510
+ # ]
511
+ block_indices= np.broadcast_to(
512
+ np.arange(pages_per_local_batch, dtype=np.int32),
513
+ (virtual_batches, pages_per_local_batch)) \
514
+ + np.expand_dims(block_starts, axis=1)
515
+ block_indices = block_indices.flatten().clip(max=block_table.shape[1] - 1)
516
+ batch_indices = np.repeat(np.arange(actual_batch_size, dtype=np.int32),
517
+ local_blocks * pages_per_local_batch)
518
+ block_table_local = block_table[batch_indices, block_indices]\
519
+ .view(virtual_batches, -1)
520
+
521
+ query_start_loc_cpu = torch.from_numpy(cu_seqlens_q_local)
522
+ seq_lens_cpu = torch.from_numpy(seqlens_k_local)
523
+
524
+ return CommonAttentionMetadata(
525
+ query_start_loc_cpu=query_start_loc_cpu,
526
+ query_start_loc=query_start_loc_cpu.to(device=device,
527
+ non_blocking=True),
528
+ seq_lens_cpu=seq_lens_cpu,
529
+ seq_lens=seq_lens_cpu.to(device=device, non_blocking=True),
530
+ num_computed_tokens_cpu=torch.from_numpy(num_computed_tokens_local),
531
+ num_reqs=len(seq_lens_cpu),
532
+ num_actual_tokens=common_attn_metadata.num_actual_tokens,
533
+ max_query_len=seqlens_q_local.max(),
534
+ block_table_tensor=block_table_local,
535
+ slot_mapping=common_attn_metadata.slot_mapping,
536
+ causal=True,
537
+ )
538
+
539
+
540
+ def subclass_attention_metadata_builder(
541
+ name_prefix: str,
542
+ builder_cls: type[AttentionMetadataBuilder[M]],
543
+ build_preprocess_fn: Callable[[CommonAttentionMetadata],
544
+ CommonAttentionMetadata],
545
+ ) -> type[AttentionMetadataBuilder[M]]:
546
+ """
547
+ Return a new subclass of `builder_cls` whose .build(...) method
548
+ first calls build_preprocess_fn(common_attn_metadata) on the metadata.
549
+ """
550
+ name: str = name_prefix + builder_cls.__name__ # type: ignore
551
+
552
+ def build(self,
553
+ common_prefix_len: int,
554
+ common_attn_metadata: CommonAttentionMetadata,
555
+ fast_build: bool = False):
556
+ return builder_cls.build(self, common_prefix_len,
557
+ build_preprocess_fn(common_attn_metadata),
558
+ fast_build)
559
+
560
+ Wrapped = type(
561
+ name,
562
+ (builder_cls, ), # inherit from the original
563
+ {
564
+ "build": build,
565
+ })
566
+ return Wrapped # type: ignore
567
+
568
+
569
+ def subclass_attention_backend(
570
+ name_prefix: str, attention_backend_cls: type[AttentionBackend],
571
+ builder_cls: type[AttentionMetadataBuilder[M]]
572
+ ) -> type[AttentionBackend]:
573
+ """
574
+ Return a new subclass where `get_builder_cls` returns `builder_cls`.
575
+ """
576
+ name: str = name_prefix + attention_backend_cls.__name__ # type: ignore
577
+
578
+ return type(name, (attention_backend_cls, ),
579
+ {"get_builder_cls": lambda: builder_cls})
580
+
581
+
582
+ def split_decodes_and_prefills(
583
+ common_attn_metadata: CommonAttentionMetadata,
584
+ decode_threshold: int = 1,
585
+ ) -> tuple[int, int, int, int]:
586
+ """
587
+ Assuming a reordered batch, finds the boundary between prefill and decode
588
+ requests.
589
+
590
+ Args:
591
+ common_attn_metadata: CommonAttentionMetadata object containing the
592
+ batch metadata.
593
+ decode_threshold: The maximum query length to be considered a decode.
594
+
595
+ Returns:
596
+ num_decodes: The number of decode requests.
597
+ num_prefills: The number of prefill requests.
598
+ num_decode_tokens: The number of tokens in the decode requests.
599
+ num_prefill_tokens: The number of tokens in the prefill requests.
600
+ """
601
+ max_query_len = common_attn_metadata.max_query_len
602
+ num_reqs = common_attn_metadata.num_reqs
603
+ num_tokens = common_attn_metadata.num_actual_tokens
604
+ query_start_loc = common_attn_metadata.query_start_loc_cpu
605
+
606
+ if max_query_len <= decode_threshold:
607
+ return num_reqs, 0, num_tokens, 0
608
+
609
+ query_lens = query_start_loc[1:] - query_start_loc[:-1]
610
+ is_prefill = query_lens > decode_threshold
611
+ if not torch.any(is_prefill):
612
+ return num_reqs, 0, num_tokens, 0
613
+
614
+ first_prefill = is_prefill.int().argmax(dim=-1).item()
615
+ assert torch.all(query_lens[first_prefill:] > decode_threshold)
616
+ assert torch.all(query_lens[:first_prefill] <= decode_threshold)
617
+ num_decodes = first_prefill
618
+ num_prefills = num_reqs - num_decodes
619
+ num_decode_tokens = query_start_loc[first_prefill].item()
620
+ num_prefill_tokens = num_tokens - num_decode_tokens
621
+ return (num_decodes, num_prefills, num_decode_tokens, num_prefill_tokens)
622
+
623
+
624
+ def reorder_batch_to_split_decodes_and_prefills(
625
+ input_batch: "InputBatch",
626
+ scheduler_output: "SchedulerOutput",
627
+ decode_threshold: int = 1,
628
+ ) -> bool:
629
+ """
630
+ Reorders the batch to split into prefill and decode requests; places all
631
+ requests with <= decode_threshold tokens at the front of the batch.
632
+
633
+ Returns:
634
+ True if the batch was modified, False otherwise.
635
+ """
636
+ # We now want to reorder the batch so that the "decode" requests are at
637
+ # the front and the "prefill" requests are at the back using the least
638
+ # amount of swaps possible. (NOTE for now we loosely use "decode" to mean
639
+ # requests where attention is likely memory-bound and "prefill" to mean
640
+ # requests where attention is likely compute-bound, TODO(lucas): figure out
641
+ # a better naming here)
642
+ decodes = []
643
+ prefills = []
644
+ num_decode_tokens = 0
645
+ num_prefill_tokens = 0
646
+
647
+ for i, req_id in enumerate(input_batch.req_ids):
648
+ num_tokens = scheduler_output.num_scheduled_tokens[req_id]
649
+ # for now treat 1 scheduled token as "decode" even if its not,
650
+ # we should update this to something like < 8 in the future but
651
+ # currently the TritonMLA._forward_decode only supports
652
+ # num_tokens = 1
653
+ if num_tokens <= decode_threshold:
654
+ decodes.append(i)
655
+ num_decode_tokens += num_tokens
656
+ else:
657
+ prefills.append(i)
658
+ num_prefill_tokens += num_tokens
659
+
660
+ # We hope that this is fairly minimal since decodes
661
+ # should be around for a number of iterations so hopefully they are
662
+ # relatively stationary (and new request are generally appended to the
663
+ # persistent batch so already should be at the back)
664
+ # To achieve this we loop over the decodes in descending order and
665
+ # the prefills in ascending order. We swap decodes from the "back"
666
+ # i.e. past where the last decode should be in the reodorered with
667
+ # prefills from the front of the batch.
668
+ # `decodes` and `prefills` are already in ascending order just based on
669
+ # the above loop
670
+ num_decodes = len(decodes)
671
+ num_prefills = len(prefills)
672
+ modified_batch = False
673
+
674
+ for i in range(1, min(num_decodes, num_prefills) + 1):
675
+ # If the decode is at the "back" of the batch, i, we can swap it
676
+ # with the prefill closest to the front of the batch
677
+ decode_idx = decodes[num_decodes - i]
678
+ if decode_idx < num_decodes:
679
+ break
680
+
681
+ input_batch.swap_states(prefills[i - 1], decode_idx)
682
+ modified_batch = True
683
+
684
+ return modified_batch
685
+
686
+
687
+ KV_SHARING_FAST_PREFILL_METADATA_FIELDS = [
688
+ ('logits_indices_padded', Optional[torch.Tensor], None),
689
+ ('num_logits_indices', int, 0),
690
+ ]
691
+
692
+
693
+ def subclass_attention_metadata(
694
+ name_prefix: str,
695
+ metadata_cls: Any,
696
+ fields: list[tuple[str, Any, Any]],
697
+ ) -> Any:
698
+ """
699
+ Return a new subclass of `metadata_cls` with additional fields
700
+ """
701
+ name: str = name_prefix + metadata_cls.__name__ # type: ignore
702
+ Wrapped = make_dataclass(name, fields, bases=(metadata_cls, ))
703
+ return Wrapped
704
+
705
+
706
+ def make_kv_sharing_fast_prefill_attention_metadata(
707
+ metadata_cls: Any, ) -> Any:
708
+ """
709
+ Return a new subclass of `metadata_cls` for fast prefill
710
+ """
711
+ return subclass_attention_metadata(
712
+ name_prefix="KVSharingFastPrefill",
713
+ metadata_cls=metadata_cls,
714
+ fields=KV_SHARING_FAST_PREFILL_METADATA_FIELDS,
715
+ )
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/xformers.py ADDED
@@ -0,0 +1,430 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
3
+ """Attention layer with XFormersAttention."""
4
+
5
+ from dataclasses import dataclass
6
+ from typing import TYPE_CHECKING, Optional
7
+
8
+ import torch
9
+
10
+ from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl,
11
+ AttentionMetadata, AttentionType)
12
+ from vllm.attention.ops.triton_unified_attention import unified_attention
13
+ from vllm.config import VllmConfig
14
+ from vllm.logger import init_logger
15
+ from vllm.v1.attention.backends.utils import (
16
+ AttentionMetadataBuilder, CommonAttentionMetadata,
17
+ reorder_batch_to_split_decodes_and_prefills, split_decodes_and_prefills)
18
+ from vllm.v1.kv_cache_interface import AttentionSpec
19
+
20
+ try:
21
+ from xformers import ops as xops
22
+ from xformers.ops.fmha.attn_bias import (
23
+ AttentionBias, PagedBlockDiagonalCausalWithOffsetPaddedKeysMask)
24
+
25
+ XFORMERS_AVAILABLE = True
26
+ except ImportError:
27
+ XFORMERS_AVAILABLE = False
28
+
29
+ if TYPE_CHECKING:
30
+ from vllm.v1.core.sched.output import SchedulerOutput
31
+ from vllm.v1.worker.gpu_input_batch import InputBatch
32
+
33
+ from vllm import _custom_ops as ops
34
+
35
+ logger = init_logger(__name__)
36
+
37
+
38
+ class XFormersAttentionBackend(AttentionBackend):
39
+
40
+ accept_output_buffer: bool = True
41
+
42
+ @classmethod
43
+ def get_supported_dtypes(cls) -> list[torch.dtype]:
44
+ return [torch.float16, torch.bfloat16]
45
+
46
+ @classmethod
47
+ def get_supported_head_sizes(cls) -> list[int]:
48
+ return [
49
+ 32,
50
+ 40,
51
+ 48,
52
+ 56,
53
+ 64,
54
+ 72,
55
+ 80,
56
+ 88,
57
+ 96,
58
+ 104,
59
+ 112,
60
+ 120,
61
+ 128,
62
+ 136,
63
+ 144,
64
+ 152,
65
+ 160,
66
+ 168,
67
+ 176,
68
+ 184,
69
+ 192,
70
+ 200,
71
+ 208,
72
+ 216,
73
+ 224,
74
+ 232,
75
+ 240,
76
+ 248,
77
+ 256,
78
+ ]
79
+
80
+ @classmethod
81
+ def validate_head_size(cls, head_size: int) -> None:
82
+ supported_head_sizes = cls.get_supported_head_sizes()
83
+ if head_size not in supported_head_sizes:
84
+ attn_type = cls.__name__.removesuffix("Backend")
85
+ raise ValueError(
86
+ f"Head size {head_size} is not supported by {attn_type}. "
87
+ f"Supported head sizes are: {supported_head_sizes}. "
88
+ "Set VLLM_ATTENTION_BACKEND=FLEX_ATTENTION to use "
89
+ "FlexAttention backend which supports all head sizes.")
90
+
91
+ @staticmethod
92
+ def get_name() -> str:
93
+ return "XFORMERS_VLLM_V1"
94
+
95
+ @staticmethod
96
+ def get_impl_cls() -> type["XFormersAttentionImpl"]:
97
+ return XFormersAttentionImpl
98
+
99
+ @staticmethod
100
+ def get_metadata_cls() -> type["AttentionMetadata"]:
101
+ return XFormersAttentionMetadata
102
+
103
+ @staticmethod
104
+ def get_kv_cache_shape(
105
+ num_blocks: int,
106
+ block_size: int,
107
+ num_kv_heads: int,
108
+ head_size: int,
109
+ ) -> tuple[int, ...]:
110
+ if block_size % 16 != 0:
111
+ raise ValueError("Block size must be a multiple of 16.")
112
+ return (2, num_blocks, block_size, num_kv_heads, head_size)
113
+
114
+ @staticmethod
115
+ def get_builder_cls() -> type["XFormersAttentionMetadataBuilder"]:
116
+ return XFormersAttentionMetadataBuilder
117
+
118
+ @staticmethod
119
+ def use_cascade_attention(*args, **kwargs) -> bool:
120
+ return False
121
+
122
+
123
+ @dataclass
124
+ class XFormersAttentionMetadata:
125
+ num_actual_tokens: int # Number of tokens excluding padding.
126
+ max_query_len: int
127
+ query_start_loc: torch.Tensor
128
+ max_seq_len: int
129
+ seq_lens: torch.Tensor
130
+ block_table: torch.Tensor
131
+ slot_mapping: torch.Tensor
132
+
133
+ num_prefill_tokens: int = 0
134
+ num_decode_tokens: int = 0
135
+ num_prefills: int = 0
136
+ num_decodes: int = 0
137
+
138
+ # Biases for different attention types.
139
+ attn_bias: Optional["AttentionBias"] = None
140
+
141
+ # Self-attention prefill/decode metadata cache
142
+ _cached_prefill_metadata: Optional["XFormersAttentionMetadata"] = None
143
+ _cached_decode_metadata: Optional["XFormersAttentionMetadata"] = None
144
+
145
+ @property
146
+ def prefill_metadata(self) -> Optional["XFormersAttentionMetadata"]:
147
+ if self.num_prefills == 0:
148
+ return None
149
+
150
+ if self._cached_prefill_metadata is not None:
151
+ # Recover cached prefill-phase attention
152
+ # metadata structure
153
+ return self._cached_prefill_metadata
154
+
155
+ q_start_loc = self.query_start_loc[self.num_decodes:]
156
+ q_seqlens = torch.diff(q_start_loc)
157
+ kv_seqlens = self.seq_lens[self.num_decodes:]
158
+ # Construct & cache prefill-phase attention metadata structure
159
+ self._cached_prefill_metadata = XFormersAttentionMetadata(
160
+ num_actual_tokens=self.num_prefill_tokens,
161
+ max_query_len=int(q_seqlens.max().item()),
162
+ query_start_loc=q_start_loc - q_start_loc[0],
163
+ max_seq_len=int(kv_seqlens.max().item()),
164
+ seq_lens=kv_seqlens,
165
+ block_table=self.block_table[self.num_decodes:],
166
+ slot_mapping=self.slot_mapping[self.num_decode_tokens:],
167
+ )
168
+ return self._cached_prefill_metadata
169
+
170
+ @property
171
+ def decode_metadata(self) -> Optional["XFormersAttentionMetadata"]:
172
+ if self.num_decode_tokens == 0:
173
+ return None
174
+
175
+ if self._cached_decode_metadata is not None:
176
+ # Recover cached decode-phase attention
177
+ # metadata structure
178
+ return self._cached_decode_metadata
179
+
180
+ q_start_loc = self.query_start_loc
181
+ q_seqlens = torch.diff(q_start_loc)
182
+ decode_kv_seqlens = self.seq_lens[:self.num_decodes]
183
+ # Construct & cache decode-phase attention metadata structure
184
+ self._cached_decode_metadata = XFormersAttentionMetadata(
185
+ num_actual_tokens=self.num_decode_tokens,
186
+ max_query_len=int(q_seqlens[:self.num_decodes].max().item()),
187
+ query_start_loc=q_start_loc[:self.num_decodes + 1],
188
+ max_seq_len=int(decode_kv_seqlens.max().item()),
189
+ seq_lens=decode_kv_seqlens,
190
+ block_table=self.block_table[:self.num_decodes],
191
+ slot_mapping=self.slot_mapping[:self.num_decode_tokens],
192
+ attn_bias=self.attn_bias,
193
+ )
194
+ return self._cached_decode_metadata
195
+
196
+
197
+ class XFormersAttentionMetadataBuilder(
198
+ AttentionMetadataBuilder[XFormersAttentionMetadata]):
199
+
200
+ def __init__(
201
+ self,
202
+ kv_cache_spec: AttentionSpec,
203
+ layer_names: list[str],
204
+ vllm_config: VllmConfig,
205
+ device: torch.device,
206
+ ):
207
+ assert XFORMERS_AVAILABLE
208
+ self.kv_cache_spec = kv_cache_spec
209
+ self.block_size = kv_cache_spec.block_size
210
+ self._num_decodes = 0
211
+ self._num_decode_tokens = 0
212
+
213
+ def reorder_batch(self, input_batch: "InputBatch",
214
+ scheduler_output: "SchedulerOutput") -> bool:
215
+ return reorder_batch_to_split_decodes_and_prefills(input_batch,
216
+ scheduler_output,
217
+ decode_threshold=1)
218
+
219
+ def build(
220
+ self,
221
+ common_prefix_len: int,
222
+ common_attn_metadata: CommonAttentionMetadata,
223
+ fast_build: bool = False,
224
+ ) -> XFormersAttentionMetadata:
225
+ num_decodes, num_prefills, num_decode_tokens, num_prefill_tokens = (
226
+ split_decodes_and_prefills(common_attn_metadata,
227
+ decode_threshold=1))
228
+
229
+ num_actual_tokens = common_attn_metadata.num_actual_tokens
230
+ q_start_loc = common_attn_metadata.query_start_loc
231
+ q_seqlens = torch.diff(q_start_loc)
232
+ max_query_len = common_attn_metadata.max_query_len
233
+ kv_seqlens = common_attn_metadata.seq_lens
234
+ max_seq_len = int(common_attn_metadata.seq_lens_cpu.max())
235
+ block_table = common_attn_metadata.block_table_tensor
236
+ slot_mapping = common_attn_metadata.slot_mapping
237
+
238
+ bias = None
239
+ if num_decodes > 0:
240
+ # Construct the decoder bias.
241
+ decode_q_seqlens = q_seqlens[:num_decodes]
242
+ decode_kv_seqlens = kv_seqlens[:num_decodes]
243
+ bias = (
244
+ PagedBlockDiagonalCausalWithOffsetPaddedKeysMask.from_seqlens(
245
+ q_seqlen=decode_q_seqlens.tolist(),
246
+ kv_seqlen=decode_kv_seqlens.tolist(),
247
+ page_size=self.block_size,
248
+ block_tables=block_table[:num_decodes],
249
+ device=block_table.device,
250
+ ))
251
+
252
+ return XFormersAttentionMetadata(
253
+ num_actual_tokens=num_actual_tokens,
254
+ num_prefill_tokens=num_prefill_tokens,
255
+ num_decode_tokens=num_decode_tokens,
256
+ num_prefills=num_prefills,
257
+ num_decodes=num_decodes,
258
+ max_query_len=max_query_len,
259
+ query_start_loc=q_start_loc,
260
+ max_seq_len=max_seq_len,
261
+ seq_lens=kv_seqlens,
262
+ block_table=block_table,
263
+ slot_mapping=slot_mapping,
264
+ attn_bias=bias,
265
+ )
266
+
267
+
268
+ class XFormersAttentionImpl(AttentionImpl):
269
+
270
+ def __init__(
271
+ self,
272
+ num_heads: int,
273
+ head_size: int,
274
+ scale: float,
275
+ num_kv_heads: int,
276
+ alibi_slopes: Optional[list[float]],
277
+ sliding_window: Optional[int],
278
+ kv_cache_dtype: str,
279
+ logits_soft_cap: Optional[float] = None,
280
+ attn_type: AttentionType = AttentionType.DECODER,
281
+ kv_sharing_target_layer_name: Optional[str] = None,
282
+ ) -> None:
283
+ if kv_sharing_target_layer_name is not None:
284
+ raise NotImplementedError("KV sharing is not supported in V0.")
285
+ if alibi_slopes is not None:
286
+ raise NotImplementedError(
287
+ "XFormers does not support alibi slopes yet.")
288
+ self.num_heads = num_heads
289
+ self.head_size = head_size
290
+ self.scale = float(scale)
291
+ self.num_kv_heads = num_kv_heads
292
+ self.num_queries_per_kv = self.num_heads // self.num_kv_heads
293
+ self.kv_cache_dtype = kv_cache_dtype
294
+ self.kv_sharing_target_layer_name = kv_sharing_target_layer_name
295
+ if alibi_slopes is not None:
296
+ alibi_slopes = torch.tensor(alibi_slopes, dtype=torch.float32)
297
+ self.alibi_slopes = alibi_slopes
298
+ if sliding_window is None:
299
+ self.sliding_window = (-1, -1)
300
+ else:
301
+ self.sliding_window = (sliding_window - 1, 0)
302
+ if logits_soft_cap is None:
303
+ # Setting logits_soft_cap to 0 means no soft cap.
304
+ logits_soft_cap = 0
305
+ self.logits_soft_cap = logits_soft_cap
306
+
307
+ XFormersAttentionBackend.validate_head_size(head_size)
308
+
309
+ if attn_type != AttentionType.DECODER:
310
+ raise NotImplementedError("Encoder self-attention and "
311
+ "encoder/decoder cross-attention "
312
+ "are not implemented for "
313
+ "XFormersAttentionImpl.")
314
+
315
+ def forward(
316
+ self,
317
+ layer: torch.nn.Module,
318
+ query: torch.Tensor,
319
+ key: torch.Tensor,
320
+ value: torch.Tensor,
321
+ kv_cache: torch.Tensor,
322
+ attn_metadata: XFormersAttentionMetadata,
323
+ output: Optional[torch.Tensor] = None,
324
+ output_scale: Optional[torch.Tensor] = None,
325
+ ) -> torch.Tensor:
326
+ """Forward pass with XFormers.
327
+
328
+ Args:
329
+ query: shape = [num_tokens, num_heads, head_size]
330
+ key: shape = [num_tokens, num_kv_heads, head_size]
331
+ value: shape = [num_tokens, num_kv_heads, head_size]
332
+ kv_cache = [2, num_blocks, block_size, num_kv_heads, head_size]
333
+ attn_metadata: Metadata for attention.
334
+ Returns:
335
+ shape = [num_tokens, num_heads * head_size]
336
+ """
337
+ assert output is not None, "Output tensor must be provided."
338
+
339
+ if output_scale is not None:
340
+ raise NotImplementedError(
341
+ "fused output quantization is not yet supported"
342
+ " for XFormersAttentionImpl")
343
+
344
+ if attn_metadata is None:
345
+ # Profiling run.
346
+ return output
347
+
348
+ # Cache the input KVs.
349
+ key_cache, value_cache = kv_cache.unbind(0)
350
+ if self.kv_sharing_target_layer_name is None:
351
+ # Reshape the input keys and values and store them in the cache.
352
+ # Skip this if sharing KV cache with an earlier attention layer.
353
+ # NOTE(woosuk): Here, key and value are padded while slot_mapping is
354
+ # not padded. However, we don't need to do key[:num_actual_tokens]
355
+ # and value[:num_actual_tokens] because the reshape_and_cache_flash
356
+ # op uses the slot_mapping's shape to determine the number of
357
+ # actual tokens.
358
+ ops.reshape_and_cache_flash(
359
+ key,
360
+ value,
361
+ key_cache,
362
+ value_cache,
363
+ attn_metadata.slot_mapping,
364
+ self.kv_cache_dtype,
365
+ layer._k_scale,
366
+ layer._v_scale,
367
+ )
368
+
369
+ num_actual_tokens = attn_metadata.num_actual_tokens
370
+ num_decode_tokens = attn_metadata.num_decode_tokens
371
+ if prefill_meta := attn_metadata.prefill_metadata:
372
+ descale_shape = (prefill_meta.query_start_loc.shape[0] - 1,
373
+ key.shape[1])
374
+ unified_attention(
375
+ q=query[num_decode_tokens:num_actual_tokens],
376
+ k=key_cache,
377
+ v=value_cache,
378
+ out=output[num_decode_tokens:num_actual_tokens],
379
+ cu_seqlens_q=prefill_meta.query_start_loc,
380
+ max_seqlen_q=prefill_meta.max_query_len,
381
+ seqused_k=prefill_meta.seq_lens,
382
+ max_seqlen_k=prefill_meta.max_seq_len,
383
+ softmax_scale=self.scale,
384
+ causal=True,
385
+ alibi_slopes=self.alibi_slopes,
386
+ window_size=self.sliding_window,
387
+ block_table=prefill_meta.block_table,
388
+ softcap=self.logits_soft_cap,
389
+ q_descale=None, # Not supported
390
+ k_descale=layer._k_scale.expand(descale_shape),
391
+ v_descale=layer._v_scale.expand(descale_shape),
392
+ )
393
+
394
+ if decode_meta := attn_metadata.decode_metadata:
395
+ # Query for decode. KV is not needed because it is already cached.
396
+ decode_query = query[:num_decode_tokens]
397
+ # Reshape query to [1, B_T, G, H, D].
398
+ q = decode_query.view(1, -1, self.num_kv_heads,
399
+ self.num_queries_per_kv, self.head_size)
400
+ # Reshape the k and v caches to [1, Bkv_T, G, H, D]
401
+ cache_k = key_cache.view(1, -1, self.num_kv_heads, 1,
402
+ self.head_size).expand(
403
+ 1,
404
+ -1,
405
+ self.num_kv_heads,
406
+ self.num_queries_per_kv,
407
+ self.head_size,
408
+ )
409
+ cache_v = value_cache.view(1, -1, self.num_kv_heads, 1,
410
+ self.head_size).expand(
411
+ 1,
412
+ -1,
413
+ self.num_kv_heads,
414
+ self.num_queries_per_kv,
415
+ self.head_size,
416
+ )
417
+
418
+ attn_bias = decode_meta.attn_bias
419
+ output[:
420
+ num_decode_tokens] = xops.memory_efficient_attention_forward(
421
+ q,
422
+ cache_k,
423
+ cache_v,
424
+ attn_bias=attn_bias,
425
+ p=0.0,
426
+ scale=self.scale,
427
+ ).view(decode_query.shape)
428
+
429
+ # Reshape the output tensor.
430
+ return output