Add files using upload-large-folder tool
Browse files- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__pycache__/__init__.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__pycache__/cpu_attn.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__pycache__/flash_attn.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__pycache__/flashinfer.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__pycache__/flex_attention.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__pycache__/linear_attn.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__pycache__/mamba1_attn.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__pycache__/mamba2_attn.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__pycache__/mamba_selectors.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__pycache__/pallas.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__pycache__/rocm_aiter_fa.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__pycache__/tree_attn.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__pycache__/triton_attn.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__pycache__/utils.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__pycache__/xformers.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/mla/__pycache__/__init__.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/mla/__pycache__/common.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/mla/__pycache__/cutlass_mla.cpython-312.pyc +0 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/mla/rocm_aiter_mla.py +248 -0
- tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/mla/triton_mla.py +173 -0
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (196 Bytes). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__pycache__/cpu_attn.cpython-312.pyc
ADDED
|
Binary file (37.1 kB). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__pycache__/flash_attn.cpython-312.pyc
ADDED
|
Binary file (28.4 kB). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__pycache__/flashinfer.cpython-312.pyc
ADDED
|
Binary file (35.3 kB). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__pycache__/flex_attention.cpython-312.pyc
ADDED
|
Binary file (23.4 kB). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__pycache__/linear_attn.cpython-312.pyc
ADDED
|
Binary file (3.06 kB). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__pycache__/mamba1_attn.cpython-312.pyc
ADDED
|
Binary file (3.4 kB). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__pycache__/mamba2_attn.cpython-312.pyc
ADDED
|
Binary file (8.15 kB). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__pycache__/mamba_selectors.cpython-312.pyc
ADDED
|
Binary file (1.01 kB). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__pycache__/pallas.cpython-312.pyc
ADDED
|
Binary file (17.2 kB). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__pycache__/rocm_aiter_fa.cpython-312.pyc
ADDED
|
Binary file (21.6 kB). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__pycache__/tree_attn.cpython-312.pyc
ADDED
|
Binary file (18.2 kB). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__pycache__/triton_attn.cpython-312.pyc
ADDED
|
Binary file (16.5 kB). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__pycache__/utils.cpython-312.pyc
ADDED
|
Binary file (23 kB). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/__pycache__/xformers.cpython-312.pyc
ADDED
|
Binary file (16.6 kB). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/mla/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (200 Bytes). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/mla/__pycache__/common.cpython-312.pyc
ADDED
|
Binary file (45.7 kB). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/mla/__pycache__/cutlass_mla.cpython-312.pyc
ADDED
|
Binary file (12.4 kB). View file
|
|
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/mla/rocm_aiter_mla.py
ADDED
|
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
|
| 4 |
+
from dataclasses import dataclass
|
| 5 |
+
from typing import ClassVar, Optional
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
|
| 9 |
+
import vllm.envs as envs
|
| 10 |
+
from vllm.attention.ops.rocm_aiter_mla import aiter_mla_decode_fwd
|
| 11 |
+
from vllm.config import VllmConfig
|
| 12 |
+
from vllm.utils import cdiv
|
| 13 |
+
# yapf conflicts with isort for this docstring
|
| 14 |
+
# yapf: disable
|
| 15 |
+
from vllm.v1.attention.backends.mla.common import (MLACommonBackend,
|
| 16 |
+
MLACommonDecodeMetadata,
|
| 17 |
+
MLACommonImpl,
|
| 18 |
+
MLACommonMetadata,
|
| 19 |
+
MLACommonMetadataBuilder)
|
| 20 |
+
from vllm.v1.attention.backends.utils import AttentionCGSupport
|
| 21 |
+
from vllm.v1.kv_cache_interface import AttentionSpec
|
| 22 |
+
|
| 23 |
+
# yapf: enable
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def is_aiter_mla_enabled() -> bool:
|
| 27 |
+
return envs.VLLM_ROCM_USE_AITER \
|
| 28 |
+
and envs.VLLM_ROCM_USE_AITER_MLA
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class AiterMLABackend(MLACommonBackend):
|
| 32 |
+
|
| 33 |
+
@staticmethod
|
| 34 |
+
def get_name() -> str:
|
| 35 |
+
return "ROCM_AITER_MLA_VLLM_V1"
|
| 36 |
+
|
| 37 |
+
@staticmethod
|
| 38 |
+
def get_impl_cls() -> type["AiterMLAImpl"]:
|
| 39 |
+
return AiterMLAImpl
|
| 40 |
+
|
| 41 |
+
@staticmethod
|
| 42 |
+
def get_metadata_cls() -> type["AiterMLAMetadata"]:
|
| 43 |
+
return AiterMLAMetadata
|
| 44 |
+
|
| 45 |
+
@staticmethod
|
| 46 |
+
def get_builder_cls() -> type["AiterMLAMetadataBuilder"]:
|
| 47 |
+
return AiterMLAMetadataBuilder
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
@dataclass
|
| 51 |
+
class AiterMLADecodeMetadata(MLACommonDecodeMetadata):
|
| 52 |
+
# The indptr of the paged kv cache, shape: [batch_size + 1]
|
| 53 |
+
paged_kv_indptr: Optional[torch.Tensor] = None
|
| 54 |
+
# The page indices of the paged kv cache
|
| 55 |
+
paged_kv_indices: Optional[torch.Tensor] = None
|
| 56 |
+
# The number of entries in the last page of each request in
|
| 57 |
+
# the paged kv cache, shape: [batch_size]
|
| 58 |
+
paged_kv_last_page_len: Optional[torch.Tensor] = None
|
| 59 |
+
# The query indptr, shape : [num_decode + 1]
|
| 60 |
+
qo_indptr: Optional[torch.Tensor] = None
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class AiterMLAMetadata(MLACommonMetadata[AiterMLADecodeMetadata]):
|
| 64 |
+
pass
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
class AiterMLAMetadataBuilder(MLACommonMetadataBuilder[AiterMLAMetadata]):
|
| 68 |
+
# TODO(luka, lucas): audit this as part of:
|
| 69 |
+
# https://github.com/vllm-project/vllm/issues/22945
|
| 70 |
+
cudagraph_support: ClassVar[AttentionCGSupport] = \
|
| 71 |
+
AttentionCGSupport.UNIFORM_SINGLE_TOKEN_DECODE
|
| 72 |
+
|
| 73 |
+
def __init__(self, kv_cache_spec: AttentionSpec, layer_names: list[str],
|
| 74 |
+
vllm_config: VllmConfig, device: torch.device):
|
| 75 |
+
super().__init__(kv_cache_spec, layer_names, vllm_config, device,
|
| 76 |
+
AiterMLAMetadata)
|
| 77 |
+
assert self.kv_cache_spec.block_size == 1, "AITER MLA" \
|
| 78 |
+
"only supports block size 1."
|
| 79 |
+
|
| 80 |
+
self.compilation_config = vllm_config.compilation_config
|
| 81 |
+
max_num_pages_per_req = cdiv(vllm_config.model_config.max_model_len,
|
| 82 |
+
self.kv_cache_spec.block_size)
|
| 83 |
+
max_num_reqs = vllm_config.scheduler_config.max_num_seqs
|
| 84 |
+
max_num_pages = max_num_reqs * max_num_pages_per_req
|
| 85 |
+
|
| 86 |
+
# Preparing persistent buffers
|
| 87 |
+
# TODO: we can disambiguate between decode and mixed-prefill decode here
|
| 88 |
+
# so we can only use the persistent buffer if a cudagraph is actually
|
| 89 |
+
# being used.
|
| 90 |
+
if self.compilation_config.cudagraph_mode.has_full_cudagraphs():
|
| 91 |
+
self.paged_kv_indptr = torch.zeros(max_num_reqs + 1,
|
| 92 |
+
dtype=torch.int32,
|
| 93 |
+
device=device)
|
| 94 |
+
self.paged_kv_indices = torch.zeros(max_num_pages,
|
| 95 |
+
dtype=torch.int32,
|
| 96 |
+
device=device)
|
| 97 |
+
self.paged_kv_last_page_len = torch.zeros(max_num_reqs,
|
| 98 |
+
dtype=torch.int32,
|
| 99 |
+
device=device)
|
| 100 |
+
|
| 101 |
+
self.qo_indptr = torch.arange(0,
|
| 102 |
+
max_num_reqs + 1,
|
| 103 |
+
dtype=torch.int32,
|
| 104 |
+
device=device)
|
| 105 |
+
|
| 106 |
+
def _build_decode(self, block_table_tensor: torch.Tensor,
|
| 107 |
+
seq_lens: torch.Tensor) -> AiterMLADecodeMetadata:
|
| 108 |
+
page_size = self.kv_cache_spec.block_size
|
| 109 |
+
block_table_bounds = (seq_lens + page_size - 1) // page_size
|
| 110 |
+
device = self.device
|
| 111 |
+
num_reqs = seq_lens.size(0)
|
| 112 |
+
|
| 113 |
+
mask = (torch.arange(block_table_tensor.size(1),
|
| 114 |
+
dtype=block_table_tensor.dtype,
|
| 115 |
+
device=device).unsqueeze(0)
|
| 116 |
+
< block_table_bounds.unsqueeze(1))
|
| 117 |
+
paged_kv_indices = block_table_tensor[mask]
|
| 118 |
+
|
| 119 |
+
paged_kv_last_page_len = seq_lens % page_size
|
| 120 |
+
paged_kv_last_page_len = torch.where(paged_kv_last_page_len == 0,
|
| 121 |
+
page_size, paged_kv_last_page_len)
|
| 122 |
+
|
| 123 |
+
paged_kv_indptr = torch.cat([
|
| 124 |
+
torch.zeros(1, dtype=block_table_bounds.dtype, device=device),
|
| 125 |
+
block_table_bounds.cumsum(dim=0, dtype=torch.int32)
|
| 126 |
+
])
|
| 127 |
+
|
| 128 |
+
if self.compilation_config.cudagraph_mode.has_full_cudagraphs():
|
| 129 |
+
|
| 130 |
+
num_actual_pages = paged_kv_indices.size(0)
|
| 131 |
+
|
| 132 |
+
self.paged_kv_indices[:num_actual_pages].copy_(paged_kv_indices,
|
| 133 |
+
non_blocking=True)
|
| 134 |
+
self.paged_kv_indices[num_actual_pages:].fill_(-1)
|
| 135 |
+
paged_kv_indices = self.paged_kv_indices[:num_actual_pages]
|
| 136 |
+
|
| 137 |
+
self.paged_kv_indptr[:1 + num_reqs].copy_(paged_kv_indptr,
|
| 138 |
+
non_blocking=True)
|
| 139 |
+
self.paged_kv_indptr[1 + num_reqs:].fill_(paged_kv_indptr[-1])
|
| 140 |
+
paged_kv_indptr = self.paged_kv_indptr[:1 + num_reqs]
|
| 141 |
+
|
| 142 |
+
self.paged_kv_last_page_len[:num_reqs].copy_(
|
| 143 |
+
paged_kv_last_page_len, non_blocking=True)
|
| 144 |
+
self.paged_kv_last_page_len[num_reqs:].fill_(1)
|
| 145 |
+
paged_kv_last_page_len = self.paged_kv_last_page_len[:num_reqs]
|
| 146 |
+
|
| 147 |
+
qo_indptr = self.qo_indptr[:1 + num_reqs]
|
| 148 |
+
|
| 149 |
+
else:
|
| 150 |
+
qo_indptr = torch.arange(0,
|
| 151 |
+
num_reqs + 1,
|
| 152 |
+
step=1,
|
| 153 |
+
dtype=torch.int32,
|
| 154 |
+
device=device)
|
| 155 |
+
|
| 156 |
+
attn_metadata = AiterMLADecodeMetadata(
|
| 157 |
+
block_table=block_table_tensor,
|
| 158 |
+
seq_lens=seq_lens,
|
| 159 |
+
paged_kv_indptr=paged_kv_indptr,
|
| 160 |
+
paged_kv_indices=paged_kv_indices,
|
| 161 |
+
paged_kv_last_page_len=paged_kv_last_page_len,
|
| 162 |
+
qo_indptr=qo_indptr)
|
| 163 |
+
|
| 164 |
+
return attn_metadata
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
class AiterMLAImpl(MLACommonImpl[AiterMLAMetadata]):
|
| 168 |
+
|
| 169 |
+
def __init__(
|
| 170 |
+
self,
|
| 171 |
+
num_heads: int,
|
| 172 |
+
head_size: int,
|
| 173 |
+
scale: float,
|
| 174 |
+
num_kv_heads: int,
|
| 175 |
+
alibi_slopes: Optional[list[float]],
|
| 176 |
+
sliding_window: Optional[int],
|
| 177 |
+
kv_cache_dtype: str,
|
| 178 |
+
logits_soft_cap: Optional[float],
|
| 179 |
+
attn_type: str,
|
| 180 |
+
kv_sharing_target_layer_name: Optional[str],
|
| 181 |
+
# MLA Specific Arguments
|
| 182 |
+
**mla_args) -> None:
|
| 183 |
+
super().__init__(num_heads, head_size, scale, num_kv_heads,
|
| 184 |
+
alibi_slopes, sliding_window, kv_cache_dtype,
|
| 185 |
+
logits_soft_cap, attn_type,
|
| 186 |
+
kv_sharing_target_layer_name, **mla_args)
|
| 187 |
+
assert (num_heads == 16 or num_heads == 128), (
|
| 188 |
+
f"Aiter MLA only supports 16 or 128 number of heads.\n"
|
| 189 |
+
f"Provided {num_heads} number of heads.\n"
|
| 190 |
+
"Try adjusting tensor_parallel_size value.")
|
| 191 |
+
unsupported_features = [alibi_slopes, sliding_window, logits_soft_cap]
|
| 192 |
+
if any(unsupported_features):
|
| 193 |
+
raise NotImplementedError(
|
| 194 |
+
"Aiter MLA does not support one of the following: "
|
| 195 |
+
"alibi_slopes, sliding_window, logits_soft_cap")
|
| 196 |
+
|
| 197 |
+
from aiter import flash_attn_varlen_func
|
| 198 |
+
self.flash_attn_varlen_func = flash_attn_varlen_func
|
| 199 |
+
|
| 200 |
+
def _flash_attn_varlen_diff_headdims(self,
|
| 201 |
+
q,
|
| 202 |
+
k,
|
| 203 |
+
v,
|
| 204 |
+
return_softmax_lse=False,
|
| 205 |
+
softmax_scale=None,
|
| 206 |
+
**kwargs):
|
| 207 |
+
output = self.flash_attn_varlen_func(
|
| 208 |
+
q=q,
|
| 209 |
+
k=k,
|
| 210 |
+
v=v,
|
| 211 |
+
softmax_scale=softmax_scale,
|
| 212 |
+
return_lse=return_softmax_lse,
|
| 213 |
+
**kwargs,
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
return output
|
| 217 |
+
|
| 218 |
+
def _forward_decode(
|
| 219 |
+
self,
|
| 220 |
+
q_nope: torch.Tensor,
|
| 221 |
+
q_pe: torch.Tensor,
|
| 222 |
+
kv_c_and_k_pe_cache: torch.Tensor,
|
| 223 |
+
attn_metadata: AiterMLAMetadata,
|
| 224 |
+
) -> torch.Tensor:
|
| 225 |
+
assert kv_c_and_k_pe_cache.numel() > 0
|
| 226 |
+
assert attn_metadata.decode is not None
|
| 227 |
+
|
| 228 |
+
B = q_nope.shape[0]
|
| 229 |
+
|
| 230 |
+
q = torch.cat([q_nope, q_pe], dim=-1)
|
| 231 |
+
o = torch.zeros(B,
|
| 232 |
+
self.num_heads,
|
| 233 |
+
self.kv_lora_rank,
|
| 234 |
+
dtype=q.dtype,
|
| 235 |
+
device=q.device)
|
| 236 |
+
|
| 237 |
+
kv_buffer = kv_c_and_k_pe_cache.unsqueeze(2)
|
| 238 |
+
|
| 239 |
+
# max_seqlen_qo must be 1 except for MTP
|
| 240 |
+
# TODO: Find the best value for MTP
|
| 241 |
+
max_seqlen_qo = 1
|
| 242 |
+
aiter_mla_decode_fwd(q, kv_buffer, o, self.scale,
|
| 243 |
+
attn_metadata.decode.qo_indptr, max_seqlen_qo,
|
| 244 |
+
attn_metadata.decode.paged_kv_indptr,
|
| 245 |
+
attn_metadata.decode.paged_kv_indices,
|
| 246 |
+
attn_metadata.decode.paged_kv_last_page_len)
|
| 247 |
+
|
| 248 |
+
return self._v_up_proj(o)
|
tool_server/.venv/lib/python3.12/site-packages/vllm/v1/attention/backends/mla/triton_mla.py
ADDED
|
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
| 3 |
+
|
| 4 |
+
from typing import Optional
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
from vllm import envs
|
| 9 |
+
from vllm.attention.backends.abstract import (AttentionType,
|
| 10 |
+
is_quantized_kv_cache)
|
| 11 |
+
from vllm.attention.ops.triton_decode_attention import decode_attention_fwd
|
| 12 |
+
from vllm.attention.ops.triton_flash_attention import triton_attention
|
| 13 |
+
from vllm.logger import init_logger
|
| 14 |
+
from vllm.platforms import current_platform
|
| 15 |
+
from vllm.triton_utils import HAS_TRITON
|
| 16 |
+
from vllm.v1.attention.backends.mla.common import (MLACommonBackend,
|
| 17 |
+
MLACommonImpl,
|
| 18 |
+
MLACommonMetadata)
|
| 19 |
+
|
| 20 |
+
logger = init_logger(__name__)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class TritonMLABackend(MLACommonBackend):
|
| 24 |
+
|
| 25 |
+
@staticmethod
|
| 26 |
+
def get_name() -> str:
|
| 27 |
+
return "TRITON_MLA_VLLM_V1"
|
| 28 |
+
|
| 29 |
+
@staticmethod
|
| 30 |
+
def get_impl_cls() -> type["TritonMLAImpl"]:
|
| 31 |
+
return TritonMLAImpl
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class TritonMLAImpl(MLACommonImpl[MLACommonMetadata]):
|
| 35 |
+
|
| 36 |
+
def __init__(
|
| 37 |
+
self,
|
| 38 |
+
num_heads: int,
|
| 39 |
+
head_size: int,
|
| 40 |
+
scale: float,
|
| 41 |
+
num_kv_heads: int,
|
| 42 |
+
alibi_slopes: Optional[list[float]],
|
| 43 |
+
sliding_window: Optional[int],
|
| 44 |
+
kv_cache_dtype: str,
|
| 45 |
+
logits_soft_cap: Optional[float],
|
| 46 |
+
attn_type: str,
|
| 47 |
+
kv_sharing_target_layer_name: Optional[str],
|
| 48 |
+
# MLA Specific Arguments
|
| 49 |
+
**mla_args) -> None:
|
| 50 |
+
super().__init__(num_heads, head_size, scale, num_kv_heads,
|
| 51 |
+
alibi_slopes, sliding_window, kv_cache_dtype,
|
| 52 |
+
logits_soft_cap, attn_type,
|
| 53 |
+
kv_sharing_target_layer_name, **mla_args)
|
| 54 |
+
|
| 55 |
+
unsupported_features = [alibi_slopes, sliding_window, logits_soft_cap]
|
| 56 |
+
if any(unsupported_features):
|
| 57 |
+
raise NotImplementedError(
|
| 58 |
+
"TritonMLAImpl does not support one of the following: "
|
| 59 |
+
"alibi_slopes, sliding_window, logits_soft_cap")
|
| 60 |
+
|
| 61 |
+
if attn_type != AttentionType.DECODER:
|
| 62 |
+
raise NotImplementedError("Encoder self-attention and "
|
| 63 |
+
"encoder/decoder cross-attention "
|
| 64 |
+
"are not implemented for "
|
| 65 |
+
"TritonMLAImpl")
|
| 66 |
+
|
| 67 |
+
if is_quantized_kv_cache(self.kv_cache_dtype):
|
| 68 |
+
raise NotImplementedError(
|
| 69 |
+
"TritonMLA V1 with FP8 KV cache not yet supported")
|
| 70 |
+
|
| 71 |
+
self.use_triton_flash_attn = envs.VLLM_USE_TRITON_FLASH_ATTN
|
| 72 |
+
self.triton_fa_func = triton_attention if HAS_TRITON else None
|
| 73 |
+
|
| 74 |
+
def _flash_attn_varlen_diff_headdims_rocm(self,
|
| 75 |
+
q,
|
| 76 |
+
k,
|
| 77 |
+
v,
|
| 78 |
+
softmax_scale=None,
|
| 79 |
+
**kwargs):
|
| 80 |
+
assert self.triton_fa_func is not None
|
| 81 |
+
|
| 82 |
+
# Triton Attention requires a padded V
|
| 83 |
+
padded_v = torch.nn.functional.pad(v, [0, q.shape[-1] - v.shape[-1]],
|
| 84 |
+
value=0)
|
| 85 |
+
# The output of triton_attention is a tuple of
|
| 86 |
+
# [output_tensor, encoded_softmax] where encoded_softmax is always None
|
| 87 |
+
output_tensor, _ = self.triton_fa_func(
|
| 88 |
+
q,
|
| 89 |
+
k,
|
| 90 |
+
padded_v,
|
| 91 |
+
None, # output
|
| 92 |
+
kwargs["cu_seqlens_q"],
|
| 93 |
+
kwargs["cu_seqlens_k"],
|
| 94 |
+
kwargs["max_seqlen_q"],
|
| 95 |
+
kwargs["max_seqlen_k"],
|
| 96 |
+
kwargs["causal"],
|
| 97 |
+
softmax_scale,
|
| 98 |
+
None, # bias
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
return output_tensor
|
| 102 |
+
|
| 103 |
+
def _flash_attn_varlen_diff_headdims(self,
|
| 104 |
+
q,
|
| 105 |
+
k,
|
| 106 |
+
v,
|
| 107 |
+
return_softmax_lse=False,
|
| 108 |
+
softmax_scale=None,
|
| 109 |
+
**kwargs):
|
| 110 |
+
if current_platform.is_rocm() \
|
| 111 |
+
and self.use_triton_flash_attn \
|
| 112 |
+
and not return_softmax_lse:
|
| 113 |
+
return self._flash_attn_varlen_diff_headdims_rocm(
|
| 114 |
+
q, k, v, softmax_scale=softmax_scale, **kwargs)
|
| 115 |
+
else:
|
| 116 |
+
return super()._flash_attn_varlen_diff_headdims(
|
| 117 |
+
q,
|
| 118 |
+
k,
|
| 119 |
+
v,
|
| 120 |
+
return_softmax_lse=return_softmax_lse,
|
| 121 |
+
softmax_scale=softmax_scale,
|
| 122 |
+
**kwargs)
|
| 123 |
+
|
| 124 |
+
def _forward_decode(
|
| 125 |
+
self,
|
| 126 |
+
q_nope: torch.Tensor,
|
| 127 |
+
q_pe: torch.Tensor,
|
| 128 |
+
kv_c_and_k_pe_cache: torch.Tensor,
|
| 129 |
+
attn_metadata: MLACommonMetadata,
|
| 130 |
+
) -> torch.Tensor:
|
| 131 |
+
assert kv_c_and_k_pe_cache.numel() > 0
|
| 132 |
+
assert attn_metadata.decode is not None
|
| 133 |
+
|
| 134 |
+
if self.kv_cache_dtype.startswith("fp8"):
|
| 135 |
+
raise NotImplementedError("FP8 Triton MLA not yet supported")
|
| 136 |
+
|
| 137 |
+
B = q_nope.shape[0]
|
| 138 |
+
|
| 139 |
+
q = torch.cat([q_nope, q_pe], dim=-1)
|
| 140 |
+
o = torch.zeros(B,
|
| 141 |
+
self.num_heads,
|
| 142 |
+
self.kv_lora_rank,
|
| 143 |
+
dtype=q.dtype,
|
| 144 |
+
device=q.device)
|
| 145 |
+
|
| 146 |
+
num_kv_splits = 4 # TODO: heuristic
|
| 147 |
+
|
| 148 |
+
# TODO(lucas) Allocate ahead of time
|
| 149 |
+
attn_logits = torch.empty(
|
| 150 |
+
(
|
| 151 |
+
B,
|
| 152 |
+
self.num_heads,
|
| 153 |
+
num_kv_splits,
|
| 154 |
+
# NOTE(lucas) idk why the +1 is here but sglang has it so we
|
| 155 |
+
# just mirror that
|
| 156 |
+
self.kv_lora_rank + 1,
|
| 157 |
+
),
|
| 158 |
+
dtype=torch.float32,
|
| 159 |
+
device=q.device,
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
# Add a head dim of 1
|
| 163 |
+
kv_c_and_k_pe_cache = kv_c_and_k_pe_cache.unsqueeze(2)
|
| 164 |
+
kv_c_cache = kv_c_and_k_pe_cache[..., :self.kv_lora_rank]
|
| 165 |
+
PAGE_SIZE = kv_c_and_k_pe_cache.size(1)
|
| 166 |
+
|
| 167 |
+
# Run MQA
|
| 168 |
+
decode_attention_fwd(q, kv_c_and_k_pe_cache, kv_c_cache, o,
|
| 169 |
+
attn_metadata.decode.block_table,
|
| 170 |
+
attn_metadata.decode.seq_lens, attn_logits,
|
| 171 |
+
num_kv_splits, self.scale, PAGE_SIZE)
|
| 172 |
+
|
| 173 |
+
return self._v_up_proj(o)
|