sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
vllm-project/vllm:tests/distributed/test_eplb_fused_moe_layer.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# Test that the interaction between EPLB and FusedMoE Layer is okay
from dataclasses import dataclass
import pytest
import torch
from vllm.config import VllmConfig, set_current_vllm_config
from vllm.distributed.eplb.rebalance_execute import rearrange_expert_weights_inplace
from vllm.distributed.parallel_state import (
ensure_model_parallel_initialized,
get_tp_group,
)
from vllm.model_executor.layers.fused_moe.layer import FusedMoE
from .eplb_utils import distributed_run, set_env_vars_and_device
@dataclass
class TestConfig:
num_layers: int
num_experts: int
num_local_experts: int
num_topk: int
hidden_size: int
intermediate_size: int
weight_dtype: torch.dtype
weight_scale_dtype: torch.dtype | None
column_major_scales: bool
def make_expert_weights(
layer_idx: int,
global_expert_idx: int,
global_num_experts: int,
tensor_shape: tuple[int, ...],
tensor_dtype: torch.dtype,
tensor_device: torch.device,
is_column_major: bool,
) -> torch.Tensor:
assert len(tensor_shape) == 2
if is_column_major:
tensor_shape = (tensor_shape[1], tensor_shape[0])
x = torch.empty(tensor_shape, dtype=tensor_dtype, device=tensor_device)
value_offset = (layer_idx * global_num_experts + global_expert_idx) * x.numel()
x.view(-1).copy_(
torch.arange(
value_offset,
value_offset + x.numel(),
dtype=tensor_dtype,
device=tensor_device,
)
)
if is_column_major:
x = torch.transpose(x, 1, 0)
assert not x.is_contiguous()
return x
def make_fused_moe_layer(
rank: int,
layer_idx: int,
test_config: TestConfig,
) -> FusedMoE:
fml = FusedMoE(
num_experts=test_config.num_experts,
top_k=test_config.num_topk,
hidden_size=test_config.hidden_size,
intermediate_size=test_config.intermediate_size,
prefix=f"dummy_layer_{layer_idx}",
activation="silu",
is_act_and_mul=True,
params_dtype=test_config.weight_dtype,
)
device = torch.device(f"cuda:{rank}")
from functools import partial
_make_expert_weights = partial(
make_expert_weights,
layer_idx=layer_idx,
global_num_experts=test_config.num_experts,
tensor_device=device,
)
assert isinstance(fml.w13_weight.data, torch.Tensor)
assert isinstance(fml.w2_weight.data, torch.Tensor)
fml.w13_weight.data = fml.w13_weight.data.to(device=device)
fml.w2_weight.data = fml.w2_weight.data.to(device=device)
w13_weight = fml.w13_weight.data
w2_weight = fml.w2_weight.data
assert w13_weight.size(0) == test_config.num_local_experts
for i in range(test_config.num_local_experts):
g_i = rank * test_config.num_local_experts + i
w13_weight_e = w13_weight[i]
w2_weight_e = w2_weight[i]
w13_weight_e.copy_(
_make_expert_weights(
global_expert_idx=g_i,
tensor_shape=w13_weight_e.shape,
tensor_dtype=w13_weight_e.dtype,
is_column_major=False,
)
)
w2_weight_e.copy_(
_make_expert_weights(
global_expert_idx=g_i,
tensor_shape=w2_weight_e.shape,
tensor_dtype=w2_weight_e.dtype,
is_column_major=False,
)
)
block_size = 16
def block_quant_scales_shape(
shape: tuple[int, ...], is_column_major: bool
) -> tuple[int, ...]:
assert len(shape) == 3
if not is_column_major:
return (shape[0], shape[1] // block_size, shape[2] // block_size)
else:
return (shape[0], shape[2] // block_size, shape[1] // block_size)
is_column_major = test_config.column_major_scales
w13_weight_scale_inv = torch.empty(
block_quant_scales_shape(w13_weight.shape, is_column_major),
dtype=test_config.weight_dtype,
device=device,
)
w2_weight_scale_inv = torch.empty(
block_quant_scales_shape(w2_weight.shape, is_column_major),
dtype=test_config.weight_dtype,
device=device,
)
for i in range(test_config.num_local_experts):
g_i = rank * test_config.num_local_experts + i
w13_s_e = w13_weight_scale_inv[i]
w2_s_e = w2_weight_scale_inv[i]
w13_s_e.copy_(
_make_expert_weights(
global_expert_idx=g_i,
tensor_shape=w13_s_e.shape,
tensor_dtype=w13_s_e.dtype,
# Fill data in row-major and then
# transpose if test_config requires col-major.
is_column_major=False,
)
)
w2_s_e.copy_(
_make_expert_weights(
global_expert_idx=g_i,
tensor_shape=w2_s_e.shape,
tensor_dtype=w2_s_e.dtype,
is_column_major=False,
)
)
if is_column_major:
w13_weight_scale_inv = torch.transpose(w13_weight_scale_inv, 1, 2)
w2_weight_scale_inv = torch.transpose(w2_weight_scale_inv, 1, 2)
assert not w13_weight_scale_inv.is_contiguous()
assert not w2_weight_scale_inv.is_contiguous()
# Add scales to the parameter list
fml.w13_weight_scale_inv = torch.nn.Parameter(
w13_weight_scale_inv, requires_grad=False
)
fml.w2_weight_scale_inv = torch.nn.Parameter(
w2_weight_scale_inv, requires_grad=False
)
return fml
def _test_eplb_fml(env, world_size: int, test_config: TestConfig):
# Initialize model parallel (using tensor parallel as an entrypoint
# to expert parallel)
set_env_vars_and_device(env)
vllm_config = VllmConfig()
vllm_config.parallel_config.tensor_parallel_size = world_size
vllm_config.parallel_config.enable_expert_parallel = True
with set_current_vllm_config(vllm_config):
ensure_model_parallel_initialized(
tensor_model_parallel_size=world_size, pipeline_model_parallel_size=1
)
ep_group = get_tp_group().cpu_group
ep_rank = torch.distributed.get_rank()
fml_layers = [
make_fused_moe_layer(ep_rank, layer_idx, test_config)
for layer_idx in range(test_config.num_layers)
]
rank_expert_weights = [fml.get_expert_weights() for fml in fml_layers]
indices = torch.zeros(
test_config.num_layers, test_config.num_experts, dtype=torch.long
)
for lidx in range(test_config.num_layers):
indices[lidx] = torch.Tensor(range(test_config.num_experts))
shuffled_indices = torch.zeros_like(indices)
for lidx in range(test_config.num_layers):
shuffled_indices[lidx] = torch.randperm(test_config.num_experts)
rearrange_expert_weights_inplace(
indices,
shuffled_indices,
rank_expert_weights,
ep_group,
is_profile=False,
)
num_local_experts = test_config.num_local_experts
num_global_experts = test_config.num_experts
for lidx, fml in enumerate(fml_layers):
for name, w in fml.named_parameters():
for e in range(num_local_experts):
g_e = shuffled_indices[lidx][ep_rank * num_local_experts + e]
ref = make_expert_weights(
layer_idx=lidx,
global_expert_idx=int(g_e.item()),
global_num_experts=num_global_experts,
tensor_shape=w[e].shape,
tensor_dtype=w[e].dtype,
tensor_device=w[e].device,
is_column_major=not w[e].is_contiguous(),
)
assert w[e].shape == ref.shape and w[e].stride() == ref.stride(), (
f"w[{e}] {w[e].size()} {w[e].stride()} vs "
f"ref {ref.size()} {ref.stride()}"
)
torch.testing.assert_close(w[e], ref)
@pytest.mark.parametrize("world_size", [2])
@pytest.mark.parametrize("num_layers", [4])
@pytest.mark.parametrize("num_experts", [16])
@pytest.mark.parametrize("hidden_size", [256])
@pytest.mark.parametrize("intermediate_size", [256])
@pytest.mark.parametrize("column_major_scales", [True, False])
def test_eplb_fml(
world_size: int,
num_layers: int,
num_experts: int,
hidden_size: int,
intermediate_size: int,
column_major_scales: bool,
):
if torch.cuda.device_count() < world_size:
pytest.skip(f"Need at least {world_size} GPUs to run the test")
num_local_experts = num_experts // world_size
num_topk = 4
# The dtypes are fine as we are essentially just checking data-copies
weight_dtype = torch.bfloat16
weight_scale_dtype = torch.bfloat16
test_config = TestConfig(
num_layers=num_layers,
num_experts=num_experts,
num_local_experts=num_local_experts,
num_topk=num_topk,
hidden_size=hidden_size,
intermediate_size=intermediate_size,
weight_dtype=weight_dtype,
weight_scale_dtype=weight_scale_dtype,
column_major_scales=column_major_scales,
)
distributed_run(
_test_eplb_fml,
world_size,
test_config,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/distributed/test_eplb_fused_moe_layer.py",
"license": "Apache License 2.0",
"lines": 246,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/transformers_utils/test_config.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
This test file includes some cases where it is inappropriate to
only get the `eos_token_id` from the tokenizer as defined by
`vllm.LLMEngine._get_eos_token_id`.
"""
from vllm.tokenizers import get_tokenizer
from vllm.transformers_utils.config import try_get_generation_config
def test_get_llama3_eos_token():
model_name = "meta-llama/Llama-3.2-1B-Instruct"
tokenizer = get_tokenizer(model_name)
assert tokenizer.eos_token_id == 128009
generation_config = try_get_generation_config(model_name, trust_remote_code=False)
assert generation_config is not None
assert generation_config.eos_token_id == [128001, 128008, 128009]
def test_get_blip2_eos_token():
model_name = "Salesforce/blip2-opt-2.7b"
tokenizer = get_tokenizer(model_name)
assert tokenizer.eos_token_id == 2
generation_config = try_get_generation_config(model_name, trust_remote_code=False)
assert generation_config is not None
assert generation_config.eos_token_id == 50118
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/transformers_utils/test_config.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/v1/worker/gpu/async_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import contextlib
import numpy as np
import torch
from vllm.v1.outputs import AsyncModelRunnerOutput, LogprobsTensors, ModelRunnerOutput
from vllm.v1.worker.gpu.sample.output import SamplerOutput
class AsyncOutput(AsyncModelRunnerOutput):
def __init__(
self,
model_runner_output: ModelRunnerOutput,
sampler_output: SamplerOutput,
num_sampled_tokens: torch.Tensor,
main_stream: torch.cuda.Stream,
copy_stream: torch.cuda.Stream,
copy_event: torch.cuda.Event,
):
# NOTE(woosuk): We must retain references to the GPU tensors,
# as the copy operations are performed on a different CUDA stream than
# the one where the tensors were created.
self.model_runner_output = model_runner_output
self.sampler_output = sampler_output
self.num_sampled_tokens = num_sampled_tokens
self.copy_event = copy_event
with stream(copy_stream, main_stream):
copy_stream.wait_stream(main_stream)
self.sampled_token_ids = async_copy_to_np(sampler_output.sampled_token_ids)
self.logprobs_tensors: LogprobsTensors | None = None
if sampler_output.logprobs_tensors is not None:
self.logprobs_tensors = (
sampler_output.logprobs_tensors.to_cpu_nonblocking()
)
self.num_nans: np.ndarray | None = None
if sampler_output.num_nans is not None:
self.num_nans = async_copy_to_np(sampler_output.num_nans)
self.num_sampled_tokens_np = async_copy_to_np(num_sampled_tokens)
self.prompt_logprobs_dict = {
k: v.to_cpu_nonblocking() if v is not None else None
for k, v in self.model_runner_output.prompt_logprobs_dict.items()
}
self.copy_event.record(copy_stream)
def get_output(self) -> ModelRunnerOutput:
self.copy_event.synchronize()
# NOTE(woosuk): The following code is to ensure compatibility with
# the existing model runner.
# Going forward, we should keep the data structures as NumPy arrays
# rather than Python lists.
sampled_token_ids: list[list[int]] = self.sampled_token_ids.tolist()
num_sampled_tokens: list[int] = self.num_sampled_tokens_np.tolist()
for token_ids, num_tokens in zip(sampled_token_ids, num_sampled_tokens):
del token_ids[num_tokens:]
self.model_runner_output.sampled_token_ids = sampled_token_ids
if self.num_nans is not None:
self.model_runner_output.num_nans_in_logits = dict(
zip(self.model_runner_output.req_ids, self.num_nans.tolist())
)
if self.logprobs_tensors is not None:
self.model_runner_output.logprobs = self.logprobs_tensors.tolists()
self.model_runner_output.prompt_logprobs_dict = self.prompt_logprobs_dict
return self.model_runner_output
class AsyncPoolingOutput(AsyncModelRunnerOutput):
def __init__(
self,
model_runner_output: ModelRunnerOutput,
pooler_output: torch.Tensor,
is_valid: torch.Tensor | None,
main_stream: torch.cuda.Stream,
copy_stream: torch.cuda.Stream,
copy_event: torch.cuda.Event,
):
self.model_runner_output = model_runner_output
self.pooler_output = pooler_output
self.is_valid = is_valid
self.copy_event = copy_event
with stream(copy_stream, main_stream):
copy_stream.wait_stream(main_stream)
self.pooler_output_cpu = self.pooler_output.to("cpu", non_blocking=True)
if self.is_valid is not None:
self.is_valid_cpu = self.is_valid.to("cpu", non_blocking=True)
else:
self.is_valid_cpu = None
self.copy_event.record(copy_stream)
def get_output(self) -> ModelRunnerOutput:
self.copy_event.synchronize()
pooler_output = self.pooler_output_cpu.unbind(dim=0)
if self.is_valid_cpu is not None:
is_valid_cpu = self.is_valid_cpu.tolist()
for i, is_valid in enumerate(is_valid_cpu):
if not is_valid:
pooler_output[i] = None
self.model_runner_output.pooler_output = pooler_output
return self.model_runner_output
def async_copy_to_np(x: torch.Tensor) -> np.ndarray:
return x.to("cpu", non_blocking=True).numpy()
@contextlib.contextmanager
def stream(to_stream: torch.cuda.Stream, from_stream: torch.cuda.Stream):
"""Lightweight version of torch.cuda.stream() context manager which
avoids current_stream and device lookups.
"""
try:
torch.cuda.set_stream(to_stream)
yield
finally:
torch.cuda.set_stream(from_stream)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/worker/gpu/async_utils.py",
"license": "Apache License 2.0",
"lines": 104,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/v1/worker/gpu/attn_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Sequence
from typing import Any, cast
import torch
from vllm.config import VllmConfig, get_layers_from_vllm_config
from vllm.model_executor.layers.attention_layer_base import AttentionLayerBase
from vllm.v1.attention.backend import AttentionBackend, CommonAttentionMetadata
from vllm.v1.kv_cache_interface import (
AttentionSpec,
KVCacheConfig,
KVCacheSpec,
UniformTypeKVCacheSpecs,
)
from vllm.v1.worker.utils import AttentionGroup, bind_kv_cache
def get_kv_cache_spec(vllm_config: VllmConfig) -> dict[str, KVCacheSpec]:
kv_cache_spec: dict[str, KVCacheSpec] = {}
layer_type = cast(type[Any], AttentionLayerBase)
attn_layers = get_layers_from_vllm_config(vllm_config, layer_type)
for layer_name, attn_module in attn_layers.items():
# Skip modules that don't need KV cache (eg encoder-only attention)
if spec := attn_module.get_kv_cache_spec(vllm_config):
kv_cache_spec[layer_name] = spec
return kv_cache_spec
def init_attn_backend(
kv_cache_config: KVCacheConfig, vllm_config: VllmConfig, device: torch.device
):
attn_backends: dict[str, type[AttentionBackend]] = {}
attn_groups: list[list[AttentionGroup]] = []
attn_backend_workspace: torch.Tensor | None = None
for kv_cache_group_id, kv_cache_group_spec in enumerate(
kv_cache_config.kv_cache_groups
):
layer_names = kv_cache_group_spec.layer_names
layer_type = cast(type[Any], AttentionLayerBase)
attn_layers = get_layers_from_vllm_config(vllm_config, layer_type, layer_names)
group_map: dict[tuple[tuple[str, str], KVCacheSpec], AttentionGroup] = {}
group_order: list[tuple[tuple[str, str], KVCacheSpec]] = []
for layer_name in layer_names:
attn_backend = attn_layers[layer_name].get_attn_backend()
attn_backends[layer_name] = attn_backend
layer_kv_cache_spec: KVCacheSpec = kv_cache_group_spec.kv_cache_spec
if isinstance(layer_kv_cache_spec, UniformTypeKVCacheSpecs):
layer_kv_cache_spec = layer_kv_cache_spec.kv_cache_specs[layer_name]
key = (attn_backend.full_cls_name(), layer_kv_cache_spec)
if key not in group_map:
group_map[key] = AttentionGroup(
attn_backend,
[layer_name],
layer_kv_cache_spec,
kv_cache_group_id,
)
group_order.append(key)
else:
group_map[key].layer_names.append(layer_name)
groups = [group_map[key] for key in group_order]
for group in groups:
group.create_metadata_builders(
vllm_config=vllm_config,
device=device,
kernel_block_size=None,
num_metadata_builders=1,
)
builder = group.get_metadata_builder(0)
if attn_backend_workspace is None:
if hasattr(builder, "_get_workspace_buffer"):
attn_backend_workspace = builder._get_workspace_buffer()
else:
if hasattr(builder, "set_workspace_buffer"):
builder.set_workspace_buffer(attn_backend_workspace)
attn_groups.append(groups)
return attn_backends, attn_groups
def _allocate_kv_cache(kv_cache_config: KVCacheConfig, device: torch.device):
kv_cache_raw_tensors: dict[str, torch.Tensor] = {}
for kv_cache_tensor in kv_cache_config.kv_cache_tensors:
tensor = torch.zeros(kv_cache_tensor.size, dtype=torch.int8, device=device)
for layer_name in kv_cache_tensor.shared_by:
kv_cache_raw_tensors[layer_name] = tensor
layer_names = set()
for group in kv_cache_config.kv_cache_groups:
for layer_name in group.layer_names:
layer_names.add(layer_name)
assert layer_names == set(kv_cache_raw_tensors.keys()), (
"Some layers are not correctly initialized"
)
return kv_cache_raw_tensors
def _reshape_kv_cache(
kv_cache_config: KVCacheConfig,
kv_cache_raw_tensors: dict[str, torch.Tensor],
attn_backends: dict[str, AttentionBackend],
) -> dict[str, torch.Tensor]:
kv_caches: dict[str, torch.Tensor] = {}
for kv_cache_group_spec in kv_cache_config.kv_cache_groups:
kv_cache_spec = kv_cache_group_spec.kv_cache_spec
assert isinstance(kv_cache_spec, AttentionSpec)
for layer_name in kv_cache_group_spec.layer_names:
raw_tensor = kv_cache_raw_tensors[layer_name]
assert raw_tensor.numel() % kv_cache_spec.page_size_bytes == 0
num_blocks = raw_tensor.numel() // kv_cache_spec.page_size_bytes
attn_backend = attn_backends[layer_name]
kv_cache_shape = attn_backend.get_kv_cache_shape(
num_blocks,
kv_cache_spec.block_size,
kv_cache_spec.num_kv_heads,
kv_cache_spec.head_size,
)
# FIXME(woosuk): Add kv_cache_stride_order to all attention backends.
try:
kv_cache_stride_order = attn_backend.get_kv_cache_stride_order()
assert len(kv_cache_stride_order) == len(kv_cache_shape)
except (AttributeError, NotImplementedError):
kv_cache_stride_order = tuple(range(len(kv_cache_shape)))
kv_cache_shape = tuple(kv_cache_shape[i] for i in kv_cache_stride_order)
inv_order = [
kv_cache_stride_order.index(i)
for i in range(len(kv_cache_stride_order))
]
dtype = kv_cache_spec.dtype
raw_tensor = raw_tensor.view(dtype)
raw_tensor = raw_tensor.view(kv_cache_shape)
kv_caches[layer_name] = raw_tensor.permute(*inv_order)
return kv_caches
def init_kv_cache(
runner_kv_caches: list[torch.Tensor],
forward_context: dict[str, Any],
kv_cache_config: KVCacheConfig,
attn_backends: dict[str, AttentionBackend],
device: torch.device,
) -> dict[str, torch.Tensor]:
kv_cache_raw_tensors = _allocate_kv_cache(kv_cache_config, device)
kv_caches = _reshape_kv_cache(kv_cache_config, kv_cache_raw_tensors, attn_backends)
bind_kv_cache(kv_caches, forward_context, runner_kv_caches)
return kv_caches
def build_slot_mappings_by_layer(
slot_mappings: torch.Tensor, kv_cache_config: KVCacheConfig
) -> dict[str, torch.Tensor]:
slot_mappings_by_layer: dict[str, torch.Tensor] = {}
kv_cache_groups = kv_cache_config.kv_cache_groups
for slot_mapping, kv_cache_group in zip(slot_mappings, kv_cache_groups):
for layer_name in kv_cache_group.layer_names:
slot_mappings_by_layer[layer_name] = slot_mapping
return slot_mappings_by_layer
def build_attn_metadata(
attn_groups: list[list[AttentionGroup]],
num_reqs: int,
num_tokens: int,
query_start_loc_gpu: torch.Tensor,
query_start_loc_cpu: torch.Tensor,
max_query_len: int,
seq_lens: torch.Tensor,
max_seq_len: int,
block_tables: Sequence[torch.Tensor],
slot_mappings: torch.Tensor,
kv_cache_config: KVCacheConfig,
dcp_local_seq_lens: torch.Tensor | None = None,
) -> dict[str, Any]:
seq_lens = seq_lens[:num_reqs]
if dcp_local_seq_lens is not None:
dcp_local_seq_lens = dcp_local_seq_lens[:num_reqs]
attn_metadata: dict[str, Any] = {}
num_kv_cache_groups = len(kv_cache_config.kv_cache_groups)
for i in range(num_kv_cache_groups):
block_table = block_tables[i]
slot_mapping = slot_mappings[i]
common_attn_metadata = CommonAttentionMetadata(
query_start_loc=query_start_loc_gpu,
query_start_loc_cpu=query_start_loc_cpu,
seq_lens=seq_lens,
max_seq_len=max_seq_len,
num_reqs=num_reqs,
num_actual_tokens=num_tokens,
max_query_len=max_query_len,
block_table_tensor=block_table,
slot_mapping=slot_mapping,
causal=True,
dcp_local_seq_lens=dcp_local_seq_lens,
)
for attn_group in attn_groups[i]:
attn_metadata_builder = attn_group.get_metadata_builder(0)
metadata = attn_metadata_builder.build(
common_prefix_len=0, common_attn_metadata=common_attn_metadata
)
for layer_name in attn_group.layer_names:
attn_metadata[layer_name] = metadata
return attn_metadata
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/worker/gpu/attn_utils.py",
"license": "Apache License 2.0",
"lines": 185,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/v1/worker/gpu/block_table.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Iterable
import torch
from vllm.triton_utils import tl, triton
from vllm.utils.math_utils import cdiv
from vllm.v1.attention.backends.utils import PAD_SLOT_ID
from vllm.v1.worker.gpu.buffer_utils import StagedWriteTensor, UvaBackedTensor
class BlockTables:
def __init__(
self,
block_sizes: list[int],
max_num_reqs: int,
max_num_batched_tokens: int,
max_model_len: int,
device: torch.device,
cp_size: int = 1,
cp_rank: int = 0,
cp_interleave: int = 1,
):
self.block_sizes = block_sizes
self.max_num_reqs = max_num_reqs
self.max_num_batched_tokens = max_num_batched_tokens
self.max_model_len = max_model_len
self.device = device
self.cp_size = cp_size
self.cp_rank = cp_rank
self.cp_interleave = cp_interleave
self.num_kv_cache_groups = len(self.block_sizes)
# num_kv_cache_groups x [max_num_reqs, max_num_blocks]
self.block_tables: list[StagedWriteTensor] = []
for i in range(self.num_kv_cache_groups):
block_size = self.block_sizes[i]
# When using DCP, each request's KV cache is sharded among different ranks.
# As a result, one block on the current rank covers `block_size * cp_size`
# tokens in the full, global (unsharded) sequence.
max_num_blocks = cdiv(self.max_model_len, block_size * self.cp_size)
block_table = StagedWriteTensor(
(self.max_num_reqs, max_num_blocks),
dtype=torch.int32,
device=device,
)
self.block_tables.append(block_table)
self.block_table_ptrs = self._make_ptr_tensor(
[b.gpu for b in self.block_tables]
)
self.block_table_strides = torch.tensor(
[b.gpu.stride(0) for b in self.block_tables],
dtype=torch.int64,
device=self.device,
)
self.block_sizes_tensor = torch.tensor(
self.block_sizes, dtype=torch.int32, device=self.device
)
self.num_blocks = UvaBackedTensor(
(self.num_kv_cache_groups, self.max_num_reqs),
dtype=torch.int32,
)
# Block tables used for model's forward pass.
# num_kv_cache_groups x [max_num_reqs, max_num_blocks]
self.input_block_tables: list[torch.Tensor] = [
torch.zeros_like(b.gpu) for b in self.block_tables
]
self.input_block_table_ptrs = self._make_ptr_tensor(self.input_block_tables)
self.slot_mappings = torch.zeros(
self.num_kv_cache_groups,
self.max_num_batched_tokens,
dtype=torch.int64,
device=self.device,
)
def _make_ptr_tensor(self, x: Iterable[torch.Tensor]) -> torch.Tensor:
# NOTE(woosuk): Use uint64 instead of int64 to cover all possible addresses.
return torch.tensor(
[t.data_ptr() for t in x], dtype=torch.uint64, device=self.device
)
def append_block_ids(
self,
req_index: int,
new_block_ids: tuple[list[int], ...],
overwrite: bool,
) -> None:
for i in range(self.num_kv_cache_groups):
start = self.num_blocks.np[i, req_index] if not overwrite else 0
block_ids = new_block_ids[i]
self.block_tables[i].stage_write(req_index, start, block_ids)
self.num_blocks.np[i, req_index] = start + len(block_ids)
def apply_staged_writes(self) -> None:
# TODO(woosuk): This can be inefficient since it launches one kernel per
# block table. Implement a kernel to handle all block tables at once.
for block_table in self.block_tables:
block_table.apply_write()
self.num_blocks.copy_to_uva()
def gather_block_tables(
self, idx_mapping: torch.Tensor
) -> tuple[torch.Tensor, ...]:
num_reqs = idx_mapping.shape[0]
_gather_block_tables_kernel[(self.num_kv_cache_groups, num_reqs)](
idx_mapping,
self.block_table_ptrs,
self.input_block_table_ptrs,
self.block_table_strides,
self.num_blocks.gpu,
self.num_blocks.gpu.stride(0),
BLOCK_SIZE=1024, # type: ignore
)
return tuple(block_table[:num_reqs] for block_table in self.input_block_tables)
def get_dummy_block_tables(self, num_reqs: int) -> tuple[torch.Tensor, ...]:
# NOTE(woosuk): The output may be used for CUDA graph capture.
# Therefore, this method must return the persistent tensor
# with the same memory address as that used during the model's forward pass,
# rather than allocating a new tensor.
return tuple(block_table[:num_reqs] for block_table in self.input_block_tables)
def compute_slot_mappings(
self,
idx_mapping: torch.Tensor,
query_start_loc: torch.Tensor,
positions: torch.Tensor,
) -> torch.Tensor:
num_reqs = idx_mapping.shape[0]
num_tokens = positions.shape[0]
num_groups = self.num_kv_cache_groups
_compute_slot_mappings_kernel[(num_groups, num_reqs + 1)](
num_tokens,
self.max_num_batched_tokens,
idx_mapping,
query_start_loc,
positions,
self.block_table_ptrs,
self.block_table_strides,
self.block_sizes_tensor,
self.slot_mappings,
self.slot_mappings.stride(0),
self.cp_rank,
CP_SIZE=self.cp_size,
CP_INTERLEAVE=self.cp_interleave,
PAD_ID=PAD_SLOT_ID,
TRITON_BLOCK_SIZE=1024, # type: ignore
)
return self.slot_mappings[:, :num_tokens]
def get_dummy_slot_mappings(self, num_tokens: int) -> torch.Tensor:
# Fill the entire slot_mappings tensor, not just the first `num_tokens` entries.
# This is because the padding logic is complex and kernels may access beyond
# the requested range.
self.slot_mappings.fill_(PAD_SLOT_ID)
# NOTE(woosuk): The output may be used for CUDA graph capture.
# Therefore, this method must return the persistent tensor
# with the same memory address as that used during the model's forward pass,
# rather than allocating a new tensor.
return self.slot_mappings[:, :num_tokens]
@triton.jit
def _gather_block_tables_kernel(
batch_idx_to_req_idx, # [batch_size]
src_block_table_ptrs, # [num_kv_cache_groups]
dst_block_table_ptrs, # [num_kv_cache_groups]
block_table_strides, # [num_kv_cache_groups]
num_blocks_ptr, # [num_kv_cache_groups, max_num_reqs]
num_blocks_stride,
BLOCK_SIZE: tl.constexpr,
):
# kv cache group id
group_id = tl.program_id(0)
batch_idx = tl.program_id(1)
req_idx = tl.load(batch_idx_to_req_idx + batch_idx)
group_num_blocks_ptr = num_blocks_ptr + group_id * num_blocks_stride
num_blocks = tl.load(group_num_blocks_ptr + req_idx)
stride = tl.load(block_table_strides + group_id)
src_block_table_ptr = _load_ptr(src_block_table_ptrs + group_id, tl.int32)
src_row_ptr = src_block_table_ptr + req_idx * stride
dst_block_table_ptr = _load_ptr(dst_block_table_ptrs + group_id, tl.int32)
dst_row_ptr = dst_block_table_ptr + batch_idx * stride
for i in tl.range(0, num_blocks, BLOCK_SIZE):
offset = i + tl.arange(0, BLOCK_SIZE)
block_ids = tl.load(src_row_ptr + offset, mask=offset < num_blocks)
tl.store(dst_row_ptr + offset, block_ids, mask=offset < num_blocks)
@triton.jit
def _compute_slot_mappings_kernel(
num_tokens,
max_num_tokens,
idx_mapping, # [num_reqs]
query_start_loc, # [num_reqs + 1]
pos, # [num_tokens]
block_table_ptrs, # [num_kv_cache_groups]
block_table_strides, # [num_kv_cache_groups]
block_sizes, # [num_kv_cache_groups]
slot_mappings_ptr, # [num_kv_cache_groups, max_num_tokens]
slot_mappings_stride,
cp_rank,
CP_SIZE: tl.constexpr,
CP_INTERLEAVE: tl.constexpr,
PAD_ID: tl.constexpr,
TRITON_BLOCK_SIZE: tl.constexpr,
):
# kv cache group id
group_id = tl.program_id(0)
batch_idx = tl.program_id(1)
slot_mapping_ptr = slot_mappings_ptr + group_id * slot_mappings_stride
if batch_idx == tl.num_programs(1) - 1:
# Pad remaining slots to -1. This is needed for CUDA graphs.
for i in range(num_tokens, max_num_tokens, TRITON_BLOCK_SIZE):
offset = i + tl.arange(0, TRITON_BLOCK_SIZE)
tl.store(slot_mapping_ptr + offset, PAD_ID, mask=offset < max_num_tokens)
return
block_table_ptr = _load_ptr(block_table_ptrs + group_id, tl.int32)
block_table_stride = tl.load(block_table_strides + group_id)
block_size = tl.load(block_sizes + group_id)
req_state_idx = tl.load(idx_mapping + batch_idx)
start_idx = tl.load(query_start_loc + batch_idx)
end_idx = tl.load(query_start_loc + batch_idx + 1)
for i in range(start_idx, end_idx, TRITON_BLOCK_SIZE):
offset = i + tl.arange(0, TRITON_BLOCK_SIZE)
positions = tl.load(pos + offset, mask=offset < end_idx, other=0)
block_indices = positions // (block_size * CP_SIZE)
block_offsets = positions % (block_size * CP_SIZE)
block_numbers = tl.load(
block_table_ptr + req_state_idx * block_table_stride + block_indices
)
if CP_SIZE == 1:
# Common case: Context parallelism is not used.
slot_ids = block_numbers * block_size + block_offsets
else:
# Context parallelism is used.
is_local = block_offsets // CP_INTERLEAVE % CP_SIZE == cp_rank
rounds = block_offsets // (CP_INTERLEAVE * CP_SIZE)
remainder = block_offsets % CP_INTERLEAVE
local_offsets = rounds * CP_INTERLEAVE + remainder
slot_ids = block_numbers * block_size + local_offsets
slot_ids = tl.where(is_local, slot_ids, PAD_ID)
tl.store(slot_mapping_ptr + offset, slot_ids, mask=offset < end_idx)
@triton.jit
def _load_ptr(ptr_to_ptr, elem_dtype):
ptr = tl.load(ptr_to_ptr)
ptr = tl.cast(ptr, tl.pointer_type(elem_dtype))
return tl.multiple_of(ptr, 16)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/worker/gpu/block_table.py",
"license": "Apache License 2.0",
"lines": 233,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/v1/worker/gpu/cudagraph_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Callable
from typing import Any
import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
from vllm.config import VllmConfig
from vllm.config.compilation import CUDAGraphMode
from vllm.distributed.parallel_state import graph_capture, is_global_first_rank
from vllm.forward_context import BatchDescriptor, set_forward_context
from vllm.model_executor.offloader.base import get_offloader
from vllm.utils.math_utils import cdiv
from vllm.v1.kv_cache_interface import KVCacheConfig
from vllm.v1.worker.gpu.attn_utils import (
build_attn_metadata,
build_slot_mappings_by_layer,
)
from vllm.v1.worker.gpu.block_table import BlockTables
from vllm.v1.worker.gpu.dp_utils import make_num_tokens_across_dp
from vllm.v1.worker.gpu.input_batch import InputBuffers
from vllm.v1.worker.gpu.model_states.interface import ModelState
from vllm.v1.worker.utils import AttentionGroup
class CudaGraphManager:
def __init__(
self,
vllm_config: VllmConfig,
use_aux_hidden_state_outputs: bool,
device: torch.device,
):
self.vllm_config = vllm_config
self.scheduler_config = vllm_config.scheduler_config
self.use_aux_hidden_state_outputs = use_aux_hidden_state_outputs
self.device = device
self.max_model_len = vllm_config.model_config.max_model_len
self.max_num_reqs = self.scheduler_config.max_num_seqs
self.max_num_tokens = self.scheduler_config.max_num_batched_tokens
self.dp_size = vllm_config.parallel_config.data_parallel_size
self.uniform_decode_query_len = 1
spec_config = vllm_config.speculative_config
if spec_config is not None:
self.uniform_decode_query_len += spec_config.num_speculative_tokens
self.compilation_config = vllm_config.compilation_config
assert self.compilation_config is not None
self.cudagraph_mode = self.compilation_config.cudagraph_mode
use_uniform_decode_cudagraph = (
self.cudagraph_mode.decode_mode() == CUDAGraphMode.FULL
and self.cudagraph_mode.separate_routine()
)
self.cudagraph_sizes, self.uniform_decode_cudagraph_sizes = get_cudagraph_sizes(
self.compilation_config.cudagraph_capture_sizes,
self.max_num_reqs,
self.max_num_tokens,
self.cudagraph_mode,
self.uniform_decode_query_len,
use_uniform_decode_cudagraph,
)
self.graphs: dict[int, torch.cuda.CUDAGraph] = {}
self.pool = None
if self.cudagraph_mode != CUDAGraphMode.NONE:
self.pool = torch.cuda.graph_pool_handle()
self.hidden_states: torch.Tensor | None = None
self.aux_hidden_states: list[torch.Tensor] = []
def needs_capture(self) -> bool:
return len(self.cudagraph_sizes) > 0
def get_cudagraph_size(
self, num_tokens: int, uniform_decode: bool = False
) -> int | None:
if uniform_decode and self.uniform_decode_cudagraph_sizes:
return self.uniform_decode_cudagraph_sizes.get(num_tokens)
return self.cudagraph_sizes.get(num_tokens)
def capture_graph(
self,
num_tokens: int,
capture_cg_mode: CUDAGraphMode,
model: nn.Module,
model_state: ModelState,
input_buffers: InputBuffers,
block_tables: BlockTables,
attn_groups: list[list[AttentionGroup]],
kv_cache_config: KVCacheConfig,
has_lora: bool = False,
uniform_decode: bool = False,
) -> None:
# select and check capture function
assert capture_cg_mode in [CUDAGraphMode.PIECEWISE, CUDAGraphMode.FULL], (
f"Invalid capture_cudagraph_mode for capture: {capture_cg_mode}"
)
if capture_cg_mode == CUDAGraphMode.PIECEWISE:
capture_fn = self._capture_piecewise_graph
else:
capture_fn = self._capture_full_graph
# prepare inputs
if uniform_decode:
num_reqs = min(
cdiv(num_tokens, self.uniform_decode_query_len),
self.max_num_reqs,
)
else:
num_reqs = min(num_tokens, self.max_num_reqs)
model_inputs = {
"input_ids": input_buffers.input_ids[:num_tokens],
"positions": input_buffers.positions[:num_tokens],
# NOTE: Values returned by `prepare_dummy_inputs` will override the
# default values above.
**model_state.prepare_dummy_inputs(num_reqs, num_tokens),
}
attn_metadata, slot_mappings = prepare_inputs_to_capture(
num_reqs,
num_tokens,
input_buffers,
block_tables,
attn_groups,
self.max_model_len,
kv_cache_config,
uniform_decode_query_len=(
self.uniform_decode_query_len if uniform_decode else 0
),
)
num_tokens_across_dp = make_num_tokens_across_dp(self.dp_size, num_tokens)
# Warm up.
with set_forward_context(
attn_metadata,
self.vllm_config,
num_tokens=num_tokens,
cudagraph_runtime_mode=CUDAGraphMode.NONE,
num_tokens_across_dp=num_tokens_across_dp,
slot_mapping=slot_mappings,
):
model_output = model(**model_inputs)
if self.use_aux_hidden_state_outputs:
hidden_states, aux_hidden_states = model_output
else:
hidden_states = model_output
aux_hidden_states = None
# Allocate output buffers if not already done.
if self.hidden_states is None:
self.hidden_states = torch.empty_like(hidden_states)
if self.use_aux_hidden_state_outputs and not self.aux_hidden_states:
self.aux_hidden_states = [torch.empty_like(x) for x in aux_hidden_states]
capture_fn(
num_tokens=num_tokens,
num_reqs=num_reqs,
model=model,
model_inputs=model_inputs,
num_tokens_across_dp=num_tokens_across_dp,
attn_metadata=attn_metadata,
slot_mappings=slot_mappings,
has_lora=has_lora,
)
def _capture_full_graph(
self,
num_tokens: int,
num_reqs: int,
model: nn.Module,
model_inputs: dict[str, torch.Tensor | None],
num_tokens_across_dp: torch.Tensor,
attn_metadata: dict[str, Any] | None,
slot_mappings: dict[str, torch.Tensor] | None,
has_lora: bool = False,
) -> None:
assert attn_metadata is not None
# Capture the graph.
assert num_tokens not in self.graphs
graph = torch.cuda.CUDAGraph()
# Sync offloader's copy stream before capture.
# Ensure any pre-capture prefetches from offloader are complete.
get_offloader().sync_prev_onload()
with (
set_forward_context(
attn_metadata=attn_metadata,
vllm_config=self.vllm_config,
num_tokens=num_tokens,
cudagraph_runtime_mode=CUDAGraphMode.NONE,
num_tokens_across_dp=num_tokens_across_dp,
slot_mapping=slot_mappings,
),
torch.cuda.graph(graph, self.pool),
):
model_output = model(**model_inputs)
# Join offloader's copy stream after forward to avoid unjoined
# stream error. The last layer's start_prefetch forks copy_stream,
# but wait_prefetch only happens in the next forward pass.
get_offloader().join_after_forward()
if self.use_aux_hidden_state_outputs:
hidden_states, aux_hidden_states = model_output
else:
hidden_states = model_output
aux_hidden_states = None
# Copy outputs to the output buffers.
assert self.hidden_states is not None
self.hidden_states[:num_tokens] = hidden_states
if self.use_aux_hidden_state_outputs:
for i, aux_hidden in enumerate(aux_hidden_states):
self.aux_hidden_states[i][:num_tokens] = aux_hidden
self.graphs[num_tokens] = graph
def _capture_piecewise_graph(
self,
num_tokens: int,
num_reqs: int,
model: nn.Module,
model_inputs: dict[str, torch.Tensor | None],
num_tokens_across_dp: torch.Tensor,
attn_metadata: dict[str, Any] | None,
slot_mappings: dict[str, torch.Tensor] | None,
has_lora: bool = False,
) -> None:
# create batch descriptor for piecewise cudagraph dispatch key
batch_descriptor = BatchDescriptor(num_tokens=num_tokens, has_lora=has_lora)
# Capture run - CUDAGraphWrapper inside torch.compile will auto capture.
with set_forward_context(
attn_metadata=None, # piecewise no need attn_metadata
vllm_config=self.vllm_config,
num_tokens=num_tokens,
cudagraph_runtime_mode=CUDAGraphMode.PIECEWISE,
num_tokens_across_dp=num_tokens_across_dp,
batch_descriptor=batch_descriptor,
slot_mapping=slot_mappings,
):
model(**model_inputs)
@torch.inference_mode()
def capture(
self,
model: nn.Module,
model_state: ModelState,
input_buffers: InputBuffers,
block_tables: BlockTables,
attn_groups: list[list[AttentionGroup]],
kv_cache_config: KVCacheConfig,
has_lora: bool = False,
) -> None:
common_kwargs = dict(
device=self.device,
capture_fn=self.capture_graph,
model=model,
model_state=model_state,
input_buffers=input_buffers,
block_tables=block_tables,
attn_groups=attn_groups,
kv_cache_config=kv_cache_config,
has_lora=has_lora,
)
# Phase 1: Capture for mixed prefill-decode batches if needed.
mixed_mode = self.cudagraph_mode.mixed_mode()
if mixed_mode != CUDAGraphMode.NONE:
capture_graphs(
cudagraph_sizes=self.cudagraph_sizes,
capture_cudagraph_mode=mixed_mode,
desc=f"Capturing CUDA graphs (mixed, {mixed_mode.name})",
uniform_decode=False,
**common_kwargs,
)
# Phase 2: Capture FULL graphs for uniform decode batches if needed.
# This is only needed if we use a separate routine for decode batches
# and the decode_mode is FULL.
if self.uniform_decode_cudagraph_sizes:
capture_graphs(
cudagraph_sizes=self.uniform_decode_cudagraph_sizes,
capture_cudagraph_mode=CUDAGraphMode.FULL,
desc="Capturing CUDA graphs (decode, FULL)",
uniform_decode=True,
**common_kwargs,
)
def get_cudagraph_runtime_mode(
self, num_reqs: int, num_tokens: int, max_query_len: int
) -> tuple[CUDAGraphMode, int | None]:
is_uniform_decode = (max_query_len == self.uniform_decode_query_len) and (
num_tokens == max_query_len * num_reqs
)
cudagraph_size = self.get_cudagraph_size(num_tokens, is_uniform_decode)
if cudagraph_size is None:
cudagraph_mode = CUDAGraphMode.NONE
elif is_uniform_decode:
cudagraph_mode = self.cudagraph_mode.decode_mode()
else:
cudagraph_mode = self.cudagraph_mode.mixed_mode()
if (
cudagraph_mode == CUDAGraphMode.FULL
and cudagraph_size is not None
and cudagraph_size not in self.graphs
):
# If graph wasn't captured yet, fall back to eager.
# This might happen when the dummy run is called before capture.
cudagraph_mode = CUDAGraphMode.NONE
cudagraph_size = None
return cudagraph_mode, cudagraph_size
def run_fullgraph(
self, num_tokens: int
) -> torch.Tensor | tuple[torch.Tensor, list[torch.Tensor]]:
assert num_tokens in self.graphs, f"No cudagraph for {num_tokens} tokens"
# Sync offloader before replay - needed when transitioning from
# eager/piecewise to full cudagraph (e.g., prefill → decode).
# The previous eager iteration's start_prefetch may have queued
# H2D copies on copy_stream that the graph's captured events
# cannot see. Without this, replay could overwrite static buffers
# while those copies are still in flight.
get_offloader().sync_prev_onload()
self.graphs[num_tokens].replay()
assert self.hidden_states is not None
hidden_states = self.hidden_states[:num_tokens]
if not self.use_aux_hidden_state_outputs:
return hidden_states
return hidden_states, [x[:num_tokens] for x in self.aux_hidden_states]
def get_cudagraph_sizes(
capture_sizes: list[int] | None,
max_num_reqs: int,
max_num_tokens: int,
cudagraph_mode: CUDAGraphMode,
uniform_decode_query_len: int = 1,
uniform_decode_cudagraph: bool = False,
) -> tuple[dict[int, int], dict[int, int]]:
# Support both FULL and PIECEWISE cudagraph modes
if cudagraph_mode == CUDAGraphMode.NONE:
return {}, {}
if not capture_sizes:
return {}, {}
capture_sizes = sorted(capture_sizes)
if not capture_sizes:
return {}, {}
cudagraph_sizes: dict[int, int] = {}
for i in range(1, capture_sizes[-1] + 1):
for x in capture_sizes:
if i <= x:
cudagraph_sizes[i] = x
break
uniform_decode_cudagraph_sizes: dict[int, int] = {}
if uniform_decode_cudagraph:
max_num_tokens = max_num_reqs * uniform_decode_query_len
uniform_decode_cudagraph_sizes = {
k: v
for k, v in cudagraph_sizes.items()
if v <= max_num_tokens and v >= uniform_decode_query_len
}
return cudagraph_sizes, uniform_decode_cudagraph_sizes
def capture_graphs(
cudagraph_sizes: dict[int, int],
device: torch.device,
capture_fn: Callable,
capture_cudagraph_mode: CUDAGraphMode,
desc: str = "Capturing CUDA graphs",
**capture_kwargs,
) -> None:
# Capture larger graphs first.
sizes_to_capture = sorted(set(cudagraph_sizes.values()), reverse=True)
if is_global_first_rank():
sizes_to_capture = tqdm(sizes_to_capture, desc=desc)
with graph_capture(device=device):
for size in sizes_to_capture:
capture_fn(size, capture_cudagraph_mode, **capture_kwargs)
def prepare_inputs_to_capture(
num_reqs: int,
num_tokens: int,
input_buffers: InputBuffers,
block_tables: BlockTables,
attn_groups: list[list[AttentionGroup]],
max_model_len: int,
kv_cache_config: KVCacheConfig,
uniform_decode_query_len: int = 0,
) -> tuple[dict[str, Any], dict[str, torch.Tensor]]:
if uniform_decode_query_len > 0:
num_tokens_per_req = uniform_decode_query_len
else:
num_tokens_per_req = num_tokens // num_reqs
query_start_loc_np = np.arange(num_reqs + 1, dtype=np.int32) * num_tokens_per_req
query_start_loc_np[-1] = num_tokens
query_start_loc_cpu = torch.from_numpy(query_start_loc_np)
input_buffers.query_start_loc[: num_reqs + 1] = query_start_loc_cpu
input_buffers.query_start_loc[num_reqs + 1 :] = num_tokens
query_start_loc = input_buffers.query_start_loc[: num_reqs + 1]
# HACK(woosuk): For faster warmup, we set seq_lens (GPU) to num_tokens
# rather than max_model_len.
input_buffers.seq_lens[:num_reqs] = num_tokens
input_buffers.seq_lens[num_reqs:] = 0
input_buffers.dcp_local_seq_lens[:num_reqs] = num_tokens
input_buffers.dcp_local_seq_lens[num_reqs:] = 0
input_block_tables = block_tables.get_dummy_block_tables(num_reqs)
slot_mappings = block_tables.get_dummy_slot_mappings(num_tokens)
slot_mappings_by_layer = build_slot_mappings_by_layer(
slot_mappings, kv_cache_config
)
attn_metadata = build_attn_metadata(
attn_groups=attn_groups,
num_reqs=num_reqs,
num_tokens=num_tokens,
query_start_loc_gpu=query_start_loc,
query_start_loc_cpu=query_start_loc_cpu,
max_query_len=num_tokens_per_req,
seq_lens=input_buffers.seq_lens,
max_seq_len=max_model_len,
block_tables=input_block_tables,
slot_mappings=slot_mappings,
kv_cache_config=kv_cache_config,
dcp_local_seq_lens=input_buffers.dcp_local_seq_lens,
)
return attn_metadata, slot_mappings_by_layer
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/worker/gpu/cudagraph_utils.py",
"license": "Apache License 2.0",
"lines": 396,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/v1/worker/gpu/dp_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import torch
import torch.distributed as dist
from vllm.distributed.parallel_state import get_dp_group
def make_num_tokens_across_dp(dp_size: int, num_tokens: int) -> torch.Tensor | None:
if dp_size == 1:
return None
return torch.full((dp_size,), num_tokens, dtype=torch.int32, device="cpu")
def get_batch_metadata_across_dp(
num_tokens: int,
cudagraph_size: int,
cudagraph_runtime_mode: int,
dp_size: int,
dp_rank: int,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
assert dp_size > 1
# Use CPU group to avoid CPU-GPU synchronization.
group = get_dp_group().cpu_group
tensor = torch.zeros(3, dp_size, dtype=torch.int32, device="cpu")
tensor[0][dp_rank] = num_tokens
tensor[1][dp_rank] = cudagraph_size
tensor[2][dp_rank] = cudagraph_runtime_mode
dist.all_reduce(tensor, group=group)
return tensor[0], tensor[1], tensor[2]
def get_cudagraph_and_dp_padding(
num_tokens: int,
cudagraph_size: int | None,
cudagraph_runtime_mode: int,
dp_size: int,
dp_rank: int,
) -> tuple[int, torch.Tensor | None, int]:
if dp_size == 1:
if cudagraph_size is not None:
return cudagraph_size, None, cudagraph_runtime_mode
else:
return num_tokens, None, cudagraph_runtime_mode
# Convert None to -1 for sync (indicates no cudagraph available)
if num_tokens == 0:
cudagraph_size = 0
elif cudagraph_size is None:
cudagraph_size = -1
num_tokens_across_dp, cudagraph_size_across_dp, cudagraph_mode_across_dp = (
get_batch_metadata_across_dp(
num_tokens, cudagraph_size, cudagraph_runtime_mode, dp_size, dp_rank
)
)
if torch.all(num_tokens_across_dp == 0).item():
# All ranks have zero tokens to run.
return 0, None, 0
# Synchronize cudagraph_runtime_mode across ranks by taking the minimum.
synced_cudagraph_mode = int(cudagraph_mode_across_dp.min().item())
# Check if all ranks have valid cudagraph_size.
all_have_cudagraph = torch.all(cudagraph_size_across_dp != -1).item()
if synced_cudagraph_mode != 0 and all_have_cudagraph:
# All ranks use cudagraph. Pad to max cudagraph_size.
max_cudagraph_size = int(cudagraph_size_across_dp.max().item())
num_tokens_across_dp[:] = max_cudagraph_size
return max_cudagraph_size, num_tokens_across_dp, synced_cudagraph_mode
else:
# Fall back to eager mode (no cudagraph).
# Either some rank doesn't have cudagraph size or mode is NONE.
synced_cudagraph_mode = 0
num_tokens_across_dp = torch.clamp(num_tokens_across_dp, min=1)
num_tokens_after_padding = int(num_tokens_across_dp[dp_rank].item())
return num_tokens_after_padding, num_tokens_across_dp, synced_cudagraph_mode
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/worker/gpu/dp_utils.py",
"license": "Apache License 2.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/v1/worker/gpu/input_batch.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from dataclasses import dataclass
import numpy as np
import torch
from vllm.triton_utils import tl, triton
from vllm.utils import random_uuid
class InputBuffers:
def __init__(
self,
max_num_reqs: int,
max_num_tokens: int,
device: torch.device,
):
self.max_num_reqs = max_num_reqs
self.max_num_tokens = max_num_tokens
self.device = device
self.input_ids = torch.zeros(max_num_tokens, dtype=torch.int32, device=device)
self.positions = torch.zeros(max_num_tokens, dtype=torch.int64, device=device)
self.query_start_loc = torch.zeros(
max_num_reqs + 1, dtype=torch.int32, device=device
)
self.seq_lens = torch.zeros(max_num_reqs, dtype=torch.int32, device=device)
# DCP: per-request local seq_lens buffer
self.dcp_local_seq_lens = torch.zeros(
max_num_reqs, dtype=torch.int32, device=device
)
@dataclass
class InputBatch:
# batch_idx -> req_id
req_ids: list[str]
num_reqs: int
# batch_idx -> req_state_idx
idx_mapping: torch.Tensor
idx_mapping_np: np.ndarray
# Identical to idx_mapping except for spec decoding.
expanded_idx_mapping: torch.Tensor
# [total_num_logits] position within request for each logit
expanded_local_pos: torch.Tensor
# [num_reqs]
# batch_idx -> num_scheduled_tokens
num_scheduled_tokens: np.ndarray
# sum(num_scheduled_tokens)
num_tokens: int
num_tokens_after_padding: int
num_draft_tokens: int
# [num_reqs + 1]
query_start_loc: torch.Tensor
query_start_loc_np: np.ndarray
# [num_reqs]
seq_lens: torch.Tensor
# [num_reqs]
dcp_local_seq_lens: torch.Tensor | None
# [num_tokens_after_padding]
input_ids: torch.Tensor
# [num_tokens_after_padding]
positions: torch.Tensor
# [total_num_logits]
logits_indices: torch.Tensor
# [num_reqs + 1]
cu_num_logits: torch.Tensor
cu_num_logits_np: np.ndarray
# Whether any requests in batch use structured output.
has_structured_output_reqs: bool
@classmethod
def make_dummy(
cls,
num_reqs: int,
num_tokens: int,
input_buffers: InputBuffers,
device: torch.device,
) -> "InputBatch":
assert 0 < num_reqs <= num_tokens
req_ids = [f"req_{i}_{random_uuid()}" for i in range(num_reqs)]
idx_mapping_np = np.arange(num_reqs, dtype=np.int32)
idx_mapping = torch.arange(num_reqs, dtype=torch.int32, device=device)
expanded_idx_mapping = idx_mapping
expanded_local_pos = torch.zeros(num_reqs, dtype=torch.int32, device=device)
num_scheduled_tokens = np.full(num_reqs, num_tokens // num_reqs, dtype=np.int32)
num_scheduled_tokens[-1] += num_tokens % num_reqs
assert int(num_scheduled_tokens.sum()) == num_tokens
# seq_len equals to query_len
input_buffers.seq_lens[:num_reqs] = num_tokens // num_reqs
input_buffers.seq_lens[num_reqs - 1] += num_tokens % num_reqs
# Pad for full CUDA graph mode.
input_buffers.seq_lens[num_reqs:] = 0
seq_lens = input_buffers.seq_lens[:num_reqs]
query_start_loc_np = np.empty(num_reqs + 1, dtype=np.int32)
query_start_loc_np[0] = 0
np.cumsum(num_scheduled_tokens, out=query_start_loc_np[1:])
input_buffers.query_start_loc[:1] = 0
torch.cumsum(
seq_lens, dim=0, out=input_buffers.query_start_loc[1 : num_reqs + 1]
)
# Pad for full CUDA graph mode.
input_buffers.query_start_loc[num_reqs + 1 :] = num_tokens
query_start_loc = input_buffers.query_start_loc[: num_reqs + 1]
input_ids = input_buffers.input_ids[:num_tokens].zero_()
positions = input_buffers.positions[:num_tokens].zero_()
# attn_metadata = defaultdict(lambda: None)
logits_indices = query_start_loc[1:] - 1
cu_num_logits = torch.arange(num_reqs + 1, device=device, dtype=torch.int32)
cu_num_logits_np = np.arange(num_reqs + 1, dtype=np.int32)
return cls(
req_ids=req_ids,
num_reqs=num_reqs,
idx_mapping=idx_mapping,
idx_mapping_np=idx_mapping_np,
expanded_idx_mapping=expanded_idx_mapping,
expanded_local_pos=expanded_local_pos,
num_scheduled_tokens=num_scheduled_tokens,
num_tokens=num_tokens,
num_tokens_after_padding=num_tokens,
num_draft_tokens=0,
query_start_loc=query_start_loc,
query_start_loc_np=query_start_loc_np,
seq_lens=seq_lens,
dcp_local_seq_lens=None,
input_ids=input_ids,
positions=positions,
logits_indices=logits_indices,
cu_num_logits=cu_num_logits,
cu_num_logits_np=cu_num_logits_np,
has_structured_output_reqs=False,
)
@triton.jit
def _prepare_prefill_inputs_kernel(
input_ids_ptr,
next_prefill_tokens_ptr,
idx_mapping_ptr,
query_start_loc_ptr,
all_token_ids_ptr,
all_token_ids_stride,
prefill_lens_ptr,
num_computed_tokens_ptr,
BLOCK_SIZE: tl.constexpr,
):
batch_idx = tl.program_id(0)
req_state_idx = tl.load(idx_mapping_ptr + batch_idx)
prefill_len = tl.load(prefill_lens_ptr + req_state_idx)
num_computed = tl.load(num_computed_tokens_ptr + req_state_idx)
if num_computed >= prefill_len:
# Not prefill.
return
query_start = tl.load(query_start_loc_ptr + batch_idx)
query_end = tl.load(query_start_loc_ptr + batch_idx + 1)
query_len = query_end - query_start
request_ptr = all_token_ids_ptr + req_state_idx * all_token_ids_stride
for i in range(0, query_len, BLOCK_SIZE):
block = i + tl.arange(0, BLOCK_SIZE)
mask = block < query_len
tokens = tl.load(request_ptr + num_computed + block, mask=mask)
tl.store(input_ids_ptr + query_start + block, tokens, mask=mask)
next_pos = num_computed + query_len
if next_pos < prefill_len:
next_token = tl.load(request_ptr + next_pos)
tl.store(next_prefill_tokens_ptr + req_state_idx, next_token)
def prepare_prefill_inputs(
input_ids: torch.Tensor,
next_prefill_tokens: torch.Tensor,
idx_mapping: torch.Tensor,
query_start_loc: torch.Tensor,
all_token_ids: torch.Tensor,
prefill_len: torch.Tensor,
num_computed_tokens: torch.Tensor,
) -> None:
num_reqs = idx_mapping.shape[0]
_prepare_prefill_inputs_kernel[(num_reqs,)](
input_ids,
next_prefill_tokens,
idx_mapping,
query_start_loc,
all_token_ids,
all_token_ids.stride(0),
prefill_len,
num_computed_tokens,
BLOCK_SIZE=1024,
)
@triton.jit
def _prepare_pos_seq_lens_kernel(
pos_ptr,
seq_lens_ptr,
idx_mapping_ptr,
query_start_loc_ptr,
num_computed_tokens_ptr,
max_num_reqs,
BLOCK_SIZE: tl.constexpr,
):
req_id = tl.program_id(0)
num_reqs = tl.num_programs(0) - 1
if req_id == num_reqs:
# Pad unused seq_lens as 0 for full CUDA graphs.
for i in tl.range(num_reqs, max_num_reqs, BLOCK_SIZE):
block = i + tl.arange(0, BLOCK_SIZE)
mask = block < max_num_reqs
tl.store(seq_lens_ptr + block, 0, mask=mask)
return
req_state_idx = tl.load(idx_mapping_ptr + req_id)
num_computed_tokens = tl.load(num_computed_tokens_ptr + req_state_idx)
start = tl.load(query_start_loc_ptr + req_id)
end = tl.load(query_start_loc_ptr + req_id + 1)
query_len = end - start
seq_len = num_computed_tokens + query_len
tl.store(seq_lens_ptr + req_id, seq_len)
for i in tl.range(0, query_len, BLOCK_SIZE):
block = i + tl.arange(0, BLOCK_SIZE)
mask = block < query_len
pos = num_computed_tokens + block
tl.store(pos_ptr + start + block, pos, mask=mask)
def prepare_pos_seq_lens(
idx_mapping: torch.Tensor,
query_start_loc: torch.Tensor,
num_computed_tokens: torch.Tensor,
pos: torch.Tensor,
seq_lens: torch.Tensor,
) -> None:
num_reqs = idx_mapping.shape[0]
# NOTE(woosuk): We do +1 because the last thread block is used
# to pad unused seq_lens as 0 for full CUDA graphs.
_prepare_pos_seq_lens_kernel[(num_reqs + 1,)](
pos,
seq_lens,
idx_mapping,
query_start_loc,
num_computed_tokens,
seq_lens.shape[0],
BLOCK_SIZE=1024,
)
@triton.jit
def _combine_sampled_and_draft_tokens_kernel(
input_ids_ptr,
idx_mapping_ptr,
last_sampled_tokens_ptr,
query_start_loc_ptr,
seq_lens_ptr,
prefill_len_ptr,
draft_tokens_ptr,
draft_tokens_stride,
cu_num_logits_ptr,
logits_indices_ptr,
BLOCK_SIZE: tl.constexpr,
):
batch_idx = tl.program_id(0)
req_state_idx = tl.load(idx_mapping_ptr + batch_idx)
# Get the number of logits and draft tokens.
cu_num_logits_start = tl.load(cu_num_logits_ptr + batch_idx)
cu_num_logits_end = tl.load(cu_num_logits_ptr + batch_idx + 1)
num_logits = cu_num_logits_end - cu_num_logits_start
num_draft_tokens = num_logits - 1
# Compute the logits indices.
block = tl.arange(0, BLOCK_SIZE)
query_end = tl.load(query_start_loc_ptr + batch_idx + 1)
logits_start = query_end - num_logits
tl.store(
logits_indices_ptr + cu_num_logits_start + block,
logits_start + block,
mask=block < num_logits,
)
seq_len = tl.load(seq_lens_ptr + batch_idx)
prefill_len = tl.load(prefill_len_ptr + req_state_idx)
if seq_len <= prefill_len:
# Handling prefill tokens. No sampled or draft tokens.
return
# Write the last sampled token ID to input_ids.
last_token_id = tl.load(last_sampled_tokens_ptr + req_state_idx)
tl.store(input_ids_ptr + query_end - num_logits, last_token_id)
# Write the draft tokens (if any) to input_ids.
if num_draft_tokens > 0:
mask = block < num_draft_tokens
draft_tokens = tl.load(
draft_tokens_ptr + req_state_idx * draft_tokens_stride + block,
mask=mask,
)
tl.store(
input_ids_ptr + query_end - num_draft_tokens + block,
draft_tokens,
mask=mask,
)
def combine_sampled_and_draft_tokens(
input_ids: torch.Tensor,
idx_mapping: torch.Tensor,
last_sampled_tokens: torch.Tensor,
query_start_loc: torch.Tensor,
seq_lens: torch.Tensor,
prefill_len: torch.Tensor,
draft_tokens: torch.Tensor,
cu_num_logits: torch.Tensor,
num_logits: int,
) -> torch.Tensor:
num_reqs = seq_lens.shape[0]
num_speculative_steps = draft_tokens.shape[-1]
logits_indices = torch.empty(
num_logits,
dtype=torch.int64,
device=input_ids.device,
)
_combine_sampled_and_draft_tokens_kernel[(num_reqs,)](
input_ids,
idx_mapping,
last_sampled_tokens,
query_start_loc,
seq_lens,
prefill_len,
draft_tokens,
draft_tokens.stride(0),
cu_num_logits,
logits_indices,
# NOTE(woosuk): Add 1 to ensure the block can cover the last sampled token
# in addition to all draft tokens.
BLOCK_SIZE=triton.next_power_of_2(num_speculative_steps + 1),
)
return logits_indices
@triton.jit
def _get_num_sampled_and_rejected_kernel(
num_sampled_ptr,
num_rejected_ptr,
seq_lens_ptr,
cu_num_logits_ptr,
idx_mapping_ptr,
prefill_len_ptr,
):
batch_idx = tl.program_id(0)
req_state_idx = tl.load(idx_mapping_ptr + batch_idx)
seq_len = tl.load(seq_lens_ptr + batch_idx)
prefill_len = tl.load(prefill_len_ptr + req_state_idx)
is_chunked_prefilling = seq_len < prefill_len
num_sampled = tl.load(num_sampled_ptr + batch_idx)
num_sampled = tl.where(is_chunked_prefilling, 0, num_sampled)
tl.store(num_sampled_ptr + batch_idx, num_sampled)
logits_start = tl.load(cu_num_logits_ptr + batch_idx)
logits_end = tl.load(cu_num_logits_ptr + batch_idx + 1)
num_logits = logits_end - logits_start
num_rejected = num_logits - num_sampled
num_rejected = tl.where(is_chunked_prefilling, 0, num_rejected)
tl.store(num_rejected_ptr + batch_idx, num_rejected)
def get_num_sampled_and_rejected(
num_sampled: torch.Tensor,
seq_lens: torch.Tensor,
cu_num_logits: torch.Tensor,
idx_mapping: torch.Tensor,
prefill_len: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor]:
num_reqs = idx_mapping.shape[0]
num_rejected = torch.empty_like(num_sampled)
_get_num_sampled_and_rejected_kernel[(num_reqs,)](
num_sampled,
num_rejected,
seq_lens,
cu_num_logits,
idx_mapping,
prefill_len,
)
return num_sampled, num_rejected
@triton.jit
def _post_update_kernel(
idx_mapping_ptr,
num_computed_tokens_ptr,
last_sampled_tokens_ptr,
output_bin_counts_ptr,
output_bin_counts_stride,
sampled_tokens_ptr,
sampled_tokens_stride,
num_sampled_ptr,
num_rejected_ptr,
query_start_loc_ptr,
all_token_ids_ptr,
all_token_ids_stride,
total_len_ptr,
):
req_id = tl.program_id(0)
req_state_idx = tl.load(idx_mapping_ptr + req_id)
total_len = tl.load(total_len_ptr + req_state_idx)
num_sampled = tl.load(num_sampled_ptr + req_id)
if num_sampled > 0:
token_id = tl.load(
sampled_tokens_ptr + req_id * sampled_tokens_stride + num_sampled - 1
)
tl.store(last_sampled_tokens_ptr + req_state_idx, token_id)
tl.store(total_len_ptr + req_state_idx, total_len + num_sampled)
for i in range(num_sampled):
token_id = tl.load(sampled_tokens_ptr + req_id * sampled_tokens_stride + i)
token_ptr = (
output_bin_counts_ptr + req_state_idx * output_bin_counts_stride + token_id
)
count = tl.load(token_ptr)
count += 1
tl.store(token_ptr, count)
tl.store(
all_token_ids_ptr + req_state_idx * all_token_ids_stride + total_len + i,
token_id,
)
query_start = tl.load(query_start_loc_ptr + req_id)
query_end = tl.load(query_start_loc_ptr + req_id + 1)
query_len = query_end - query_start
num_rejected = tl.load(num_rejected_ptr + req_id)
num_computed = tl.load(num_computed_tokens_ptr + req_state_idx)
num_computed += query_len - num_rejected
tl.store(num_computed_tokens_ptr + req_state_idx, num_computed)
def post_update(
# [num_reqs]
idx_mapping: torch.Tensor,
# [max_num_reqs]
num_computed_tokens: torch.Tensor,
# [max_num_reqs]
last_sampled_tokens: torch.Tensor,
# [max_num_reqs, vocab_size]
output_bin_counts: torch.Tensor,
# [num_reqs, num_speculative_steps + 1]
sampled_tokens: torch.Tensor,
# [num_reqs]
num_sampled: torch.Tensor,
# [num_reqs]
num_rejected: torch.Tensor,
# [num_reqs + 1]
query_start_loc: torch.Tensor,
# [max_num_reqs, max_model_len]
all_token_ids: torch.Tensor,
# [max_num_reqs]
total_len: torch.Tensor,
) -> None:
num_reqs = idx_mapping.shape[0]
_post_update_kernel[(num_reqs,)](
idx_mapping,
num_computed_tokens,
last_sampled_tokens,
output_bin_counts,
output_bin_counts.stride(0),
sampled_tokens,
sampled_tokens.stride(0),
num_sampled,
num_rejected,
query_start_loc,
all_token_ids,
all_token_ids.stride(0),
total_len,
num_warps=1,
)
@triton.jit
def _post_update_pool_kernel(
idx_mapping_ptr,
num_computed_tokens_ptr,
query_start_loc_ptr,
):
batch_id = tl.program_id(0)
query_start = tl.load(query_start_loc_ptr + batch_id)
query_end = tl.load(query_start_loc_ptr + batch_id + 1)
query_len = query_end - query_start
req_state_idx = tl.load(idx_mapping_ptr + batch_id)
num_computed = tl.load(num_computed_tokens_ptr + req_state_idx)
tl.store(num_computed_tokens_ptr + req_state_idx, num_computed + query_len)
def post_update_pool(
# [num_reqs]
idx_mapping: torch.Tensor,
# [max_num_reqs]
num_computed_tokens: torch.Tensor,
# [num_reqs + 1]
query_start_loc: torch.Tensor,
) -> None:
num_reqs = idx_mapping.shape[0]
_post_update_pool_kernel[(num_reqs,)](
idx_mapping,
num_computed_tokens,
query_start_loc,
)
@triton.jit
def _expand_idx_mapping_kernel(
idx_mapping_ptr,
expanded_idx_mapping_ptr,
expanded_local_pos_ptr,
cu_num_logits_ptr,
BLOCK_SIZE: tl.constexpr,
):
req_idx = tl.program_id(0)
start_idx = tl.load(cu_num_logits_ptr + req_idx)
end_idx = tl.load(cu_num_logits_ptr + req_idx + 1)
num_tokens = end_idx - start_idx
block = tl.arange(0, BLOCK_SIZE)
mask = block < num_tokens
req_state_idx = tl.load(idx_mapping_ptr + req_idx)
tl.store(expanded_idx_mapping_ptr + start_idx + block, req_state_idx, mask=mask)
tl.store(expanded_local_pos_ptr + start_idx + block, block, mask=mask)
def expand_idx_mapping(
idx_mapping: torch.Tensor,
total_num_logits: int,
cu_num_logits: torch.Tensor,
max_expand_len: int,
) -> tuple[torch.Tensor, torch.Tensor]:
num_reqs = idx_mapping.shape[0]
expanded_idx_mapping = idx_mapping.new_empty(total_num_logits)
expanded_local_pos = torch.empty(
total_num_logits, dtype=torch.int32, device=idx_mapping.device
)
_expand_idx_mapping_kernel[(num_reqs,)](
idx_mapping,
expanded_idx_mapping,
expanded_local_pos,
cu_num_logits,
BLOCK_SIZE=triton.next_power_of_2(max_expand_len),
)
return expanded_idx_mapping, expanded_local_pos
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/worker/gpu/input_batch.py",
"license": "Apache License 2.0",
"lines": 500,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/v1/worker/gpu/model_runner.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
NOTE: Coding style guide for this file:
This model runner is shared by all models: text and multimodal, generative
and embedding, public and private. As a result, this file must only contain
code that is common to every model. Model-specific behavior belongs in the
appropriate model-specific files.
In other words:
* Be paranoid about changing this file. It should remain stable.
* Be even more paranoid about adding new lines. It should remain minimal.
Even for shared features (for example, different parallelism modes), keep the
complexity out of this path. The less common the feature, the more it should be
hidden. Prefer utility functions defined elsewhere and call them from here,
instead of embedding feature-specific logic directly.
"""
import functools
import gc
import time
from copy import deepcopy
import numpy as np
import torch
import torch.nn as nn
from vllm.config import VllmConfig
from vllm.config.compilation import CUDAGraphMode
from vllm.distributed.parallel_state import (
get_dcp_group,
get_pp_group,
prepare_communication_buffer_for_model,
)
from vllm.forward_context import BatchDescriptor, set_forward_context
from vllm.logger import init_logger
from vllm.model_executor.model_loader import get_model_loader
from vllm.multimodal import MULTIMODAL_REGISTRY
from vllm.sequence import IntermediateTensors
from vllm.tasks import SupportedTask
from vllm.utils.mem_utils import DeviceMemoryProfiler, format_gib
from vllm.utils.torch_utils import STR_DTYPE_TO_TORCH_DTYPE
from vllm.v1.core.sched.output import GrammarOutput, SchedulerOutput
from vllm.v1.kv_cache_interface import KVCacheConfig
from vllm.v1.outputs import DraftTokenIds, ModelRunnerOutput
from vllm.v1.worker.cp_utils import check_attention_cp_compatibility
from vllm.v1.worker.gpu.async_utils import AsyncOutput, AsyncPoolingOutput
from vllm.v1.worker.gpu.attn_utils import (
build_slot_mappings_by_layer,
get_kv_cache_spec,
init_attn_backend,
init_kv_cache,
)
from vllm.v1.worker.gpu.block_table import BlockTables
from vllm.v1.worker.gpu.buffer_utils import async_copy_to_gpu
from vllm.v1.worker.gpu.cp_utils import prepare_dcp_local_seq_lens
from vllm.v1.worker.gpu.cudagraph_utils import CudaGraphManager
from vllm.v1.worker.gpu.dp_utils import (
get_cudagraph_and_dp_padding,
make_num_tokens_across_dp,
)
from vllm.v1.worker.gpu.input_batch import (
InputBatch,
InputBuffers,
combine_sampled_and_draft_tokens,
expand_idx_mapping,
get_num_sampled_and_rejected,
post_update,
post_update_pool,
prepare_pos_seq_lens,
prepare_prefill_inputs,
)
from vllm.v1.worker.gpu.kv_connector import (
NO_OP_KV_CONNECTOR,
KVConnector,
get_kv_connector,
)
from vllm.v1.worker.gpu.lora_utils import LoraState
from vllm.v1.worker.gpu.mm.encoder_cache import EncoderCache
from vllm.v1.worker.gpu.model_states import init_model_state
from vllm.v1.worker.gpu.pool.pooling_runner import PoolingRunner
from vllm.v1.worker.gpu.pp_utils import pp_broadcast, pp_receive
from vllm.v1.worker.gpu.sample.output import SamplerOutput
from vllm.v1.worker.gpu.sample.prompt_logprob import PromptLogprobsWorker
from vllm.v1.worker.gpu.sample.sampler import Sampler
from vllm.v1.worker.gpu.spec_decode import init_speculator
from vllm.v1.worker.gpu.spec_decode.eagle.eagle3_utils import (
set_eagle3_aux_hidden_state_layers,
)
from vllm.v1.worker.gpu.spec_decode.rejection_sample import rejection_sample
from vllm.v1.worker.gpu.spec_decode.utils import DraftTokensHandler
from vllm.v1.worker.gpu.states import RequestState
from vllm.v1.worker.gpu.structured_outputs import StructuredOutputsWorker
from vllm.v1.worker.lora_model_runner_mixin import LoRAModelRunnerMixin
logger = init_logger(__name__)
class GPUModelRunner(LoRAModelRunnerMixin):
def __init__(
self,
vllm_config: VllmConfig,
device: torch.device,
):
self.vllm_config = vllm_config
self.model_config = vllm_config.model_config
self.cache_config = vllm_config.cache_config
self.compilation_config = vllm_config.compilation_config
self.lora_config = vllm_config.lora_config
self.load_config = vllm_config.load_config
self.parallel_config = vllm_config.parallel_config
self.scheduler_config = vllm_config.scheduler_config
self.speculative_config = vllm_config.speculative_config
self.observability_config = vllm_config.observability_config
self.device = device
self.dtype = self.model_config.dtype
self.kv_cache_dtype = self.dtype
if self.cache_config.cache_dtype != "auto":
# Quantized KV cache.
self.kv_cache_dtype = STR_DTYPE_TO_TORCH_DTYPE[
self.cache_config.cache_dtype
]
self.vocab_size = self.model_config.get_vocab_size()
self.max_model_len = self.model_config.max_model_len
self.max_num_tokens = self.scheduler_config.max_num_batched_tokens
self.max_num_reqs = self.scheduler_config.max_num_seqs
self.use_async_scheduling = self.scheduler_config.async_scheduling
self.output_copy_stream = torch.cuda.Stream(self.device)
self.output_copy_event = torch.cuda.Event()
# Pipeline parallelism.
self.pp_size = self.parallel_config.pipeline_parallel_size
self.use_pp = self.pp_size > 1
if self.use_pp:
self.is_first_pp_rank = get_pp_group().is_first_rank
self.is_last_pp_rank = get_pp_group().is_last_rank
else:
self.is_first_pp_rank = True
self.is_last_pp_rank = True
# Decode context parallelism.
self.dcp_size = self.parallel_config.decode_context_parallel_size
self.use_dcp = self.dcp_size > 1
self.dcp_rank = get_dcp_group().rank_in_group if self.use_dcp else 0
self.cp_interleave = self.parallel_config.cp_kv_cache_interleave_size
# Multimodal
self.mm_registry = MULTIMODAL_REGISTRY
self.supports_mm_inputs = self.mm_registry.supports_multimodal_inputs(
self.model_config
)
self.encoder_cache = None
if self.supports_mm_inputs and self.is_first_pp_rank:
self.encoder_cache = EncoderCache()
self.speculator = None
self.num_speculative_steps = 0
self.use_aux_hidden_state_outputs = False
if self.speculative_config is not None:
self.num_speculative_steps = self.speculative_config.num_speculative_tokens
if self.is_last_pp_rank:
self.speculator = init_speculator(self.vllm_config, self.device)
if self.speculative_config.method == "eagle3":
# EAGLE3 may require auxiliary hidden states from target model outputs.
self.use_aux_hidden_state_outputs = True
if self.pp_size > 1:
raise ValueError("EAGLE3 with pipeline parallel is not supported.")
# Draft tokens propagation - for spec-dec + struct outputs.
self.draft_tokens_handler = DraftTokensHandler(self.device)
self.req_states = RequestState(
max_num_reqs=self.max_num_reqs,
max_model_len=self.max_model_len,
max_num_batched_tokens=self.max_num_tokens,
num_speculative_steps=self.num_speculative_steps,
vocab_size=self.vocab_size,
device=self.device,
)
self.input_buffers = InputBuffers(
max_num_reqs=self.max_num_reqs,
max_num_tokens=self.max_num_tokens,
device=self.device,
)
self.sampler = Sampler(
max_num_reqs=self.max_num_reqs,
vocab_size=self.vocab_size,
device=self.device,
req_states=self.req_states,
logprobs_mode=self.model_config.logprobs_mode,
num_speculative_tokens=self.num_speculative_steps + 1,
)
self.prompt_logprobs_worker = PromptLogprobsWorker(self.max_num_reqs)
# CUDA graphs.
self.cudagraph_manager = CudaGraphManager(
self.vllm_config,
self.use_aux_hidden_state_outputs,
self.device,
)
# Structured outputs worker.
self.structured_outputs_worker = StructuredOutputsWorker(
max_num_logits=self.max_num_reqs * (self.num_speculative_steps + 1),
vocab_size=self.vocab_size,
device=self.device,
)
# LoRA-related workers.
self.lora_state = LoraState(max_num_reqs=self.max_num_reqs)
# KV Connector if configured.
self.kv_connector: KVConnector = NO_OP_KV_CONNECTOR
# Pooling models.
self.is_pooling_model = self.model_config.runner_type == "pooling"
self.pooling_runner: PoolingRunner | None = None
# For transferring state from execute_model to subsequent sample_tokens call.
self.execute_model_state: tuple | None = None
def update_max_model_len(self, max_model_len: int) -> None:
self.max_model_len = max_model_len
self.req_states.max_model_len = max_model_len
def get_supported_tasks(self) -> tuple[SupportedTask, ...]:
tasks: list[SupportedTask] = []
if self.model_config.runner_type == "generate":
tasks.append("generate")
if self.pooling_runner is not None:
tasks.extend(self.pooling_runner.get_supported_pooling_tasks())
return tuple(tasks)
def load_model(self, *args, **kwargs) -> None:
time_before_load = time.perf_counter()
with DeviceMemoryProfiler() as m:
model_loader = get_model_loader(self.vllm_config.load_config)
logger.info("Loading model from scratch...")
self.model = model_loader.load_model(
vllm_config=self.vllm_config,
model_config=self.vllm_config.model_config,
)
if self.lora_config:
self.model = self.load_lora_model(
self.model, self.vllm_config, self.device
)
if self.use_aux_hidden_state_outputs:
assert self.speculative_config is not None
set_eagle3_aux_hidden_state_layers(self.model, self.speculative_config)
if self.speculator is not None:
self.speculator.load_model(self.model)
time_after_load = time.perf_counter()
self.model_memory_usage = m.consumed_memory
logger.info(
"Model loading took %s GiB and %.6f seconds",
format_gib(m.consumed_memory),
time_after_load - time_before_load,
)
prepare_communication_buffer_for_model(self.model)
if self.speculator is not None:
prepare_communication_buffer_for_model(self.speculator)
# Initialize the components that require the model.
self.model_state = init_model_state(
self.vllm_config, self.model, self.encoder_cache, self.device
)
if self.is_pooling_model:
self.pooling_runner = PoolingRunner(self.model)
def get_model(self) -> nn.Module:
return self.model
@functools.cached_property
def main_stream(self) -> torch.cuda.Stream:
# Cache the default CUDA stream to avoid lookup overhead.
return torch.cuda.current_stream(self.device)
def get_kv_cache_spec(self):
return get_kv_cache_spec(self.vllm_config)
def initialize_kv_cache(self, kv_cache_config: KVCacheConfig) -> None:
kv_cache_config = deepcopy(kv_cache_config)
self.kv_cache_config = kv_cache_config
block_sizes = [
kv_cache_group.kv_cache_spec.block_size
for kv_cache_group in kv_cache_config.kv_cache_groups
]
self.block_tables = BlockTables(
block_sizes=block_sizes,
max_num_reqs=self.max_num_reqs,
max_num_batched_tokens=self.max_num_tokens,
max_model_len=self.max_model_len,
device=self.device,
cp_size=self.dcp_size,
cp_rank=self.dcp_rank,
cp_interleave=self.cp_interleave,
)
self.attn_backends, self.attn_groups = init_attn_backend(
self.kv_cache_config, self.vllm_config, self.device
)
check_attention_cp_compatibility(self.vllm_config)
if self.speculator is not None:
# HACK(woosuk)
self.speculator.set_attn(
self.kv_cache_config,
self.attn_groups,
self.block_tables,
)
self.kv_caches: list[torch.Tensor] = []
kv_caches_dict = init_kv_cache(
self.kv_caches,
self.compilation_config.static_forward_context,
self.kv_cache_config,
self.attn_backends,
self.device,
)
self.kv_connector = get_kv_connector(self.vllm_config, kv_caches_dict)
@torch.inference_mode()
def _dummy_run(
self, num_tokens: int, *args, skip_attn: bool = True, **kwargs
) -> tuple[torch.Tensor | None, torch.Tensor | None]:
# Create a dummy scheduler output.
num_reqs = min(num_tokens, self.max_num_reqs)
num_tokens_per_request = [num_tokens // num_reqs] * num_reqs
num_tokens_per_request[-1] += num_tokens % num_reqs
assert sum(num_tokens_per_request) == num_tokens
num_scheduled_tokens = {
f"_dummy_req_{i}": n for i, n in enumerate(num_tokens_per_request)
}
dummy_scheduler_output = SchedulerOutput.make_empty()
dummy_scheduler_output.total_num_scheduled_tokens = num_tokens
dummy_scheduler_output.num_scheduled_tokens = num_scheduled_tokens
# Disable any use of KVConnector for dummy runs.
self.kv_connector.set_disabled(True)
# For non-first PP ranks, create dummy intermediate_tensors.
intermediate_tensors = None
if not self.is_first_pp_rank:
intermediate_tensors = self.model.make_empty_intermediate_tensors(
batch_size=num_tokens,
dtype=self.model_config.dtype,
device=self.device,
)
# Execute the model.
self.execute_model(
dummy_scheduler_output,
intermediate_tensors=intermediate_tensors,
dummy_run=True,
skip_attn_for_dummy_run=skip_attn,
)
self.kv_connector.set_disabled(False)
# Non-last PP ranks don't produce output for sampling.
if not self.is_last_pp_rank:
return None, None
assert self.execute_model_state is not None
input_batch, _, _, _, hidden_states, _, _ = self.execute_model_state
self.execute_model_state = None
assert hidden_states is not None # Last PP rank always has hidden_states
sample_hidden_states = hidden_states[input_batch.logits_indices]
return hidden_states, sample_hidden_states
@torch.inference_mode()
def _dummy_sampler_run(self, hidden_states: torch.Tensor) -> None:
num_reqs = hidden_states.shape[0]
logits = self.model.compute_logits(hidden_states)
idx_mapping = torch.arange(num_reqs, dtype=torch.int32, device=self.device)
idx_mapping_np = np.arange(num_reqs, dtype=np.int32)
pos = torch.zeros(num_reqs, dtype=torch.int64, device=self.device)
dummy_input_ids = torch.zeros(num_reqs, dtype=torch.int32, device=self.device)
expanded_local_pos = torch.zeros(
num_reqs, dtype=torch.int32, device=self.device
)
# NOTE(woosuk): During the initial memory profiling, the sampler may skip
# top_k, top_p, and logprobs, using less GPU memory than what is possible
# during actual execution.
self.sampler(
logits,
idx_mapping,
idx_mapping_np,
idx_mapping_np,
pos,
dummy_input_ids,
expanded_local_pos,
)
@torch.inference_mode()
def _dummy_pooler_run(self, hidden_states: torch.Tensor) -> None:
assert self.pooling_runner is not None
self.pooling_runner.dummy_pooler_run(hidden_states)
@torch.inference_mode()
def profile_run(self) -> None:
hidden_states, sample_hidden_states = self._dummy_run(
self.max_num_tokens, skip_attn=True
)
# Only run sampler/pooler on last PP rank (non-last ranks return None).
if self.is_last_pp_rank:
assert sample_hidden_states is not None
if self.pooling_runner is None:
self._dummy_sampler_run(sample_hidden_states)
else:
self._dummy_pooler_run(hidden_states)
if self.speculator is not None:
num_tokens_across_dp = make_num_tokens_across_dp(
self.parallel_config.data_parallel_size, self.max_num_tokens
)
self.speculator.run_model(
self.max_num_tokens,
attn_metadata=None,
slot_mappings=None,
num_tokens_across_dp=num_tokens_across_dp,
)
torch.cuda.synchronize()
del hidden_states, sample_hidden_states
gc.collect()
def reset_mm_cache(self) -> None:
if self.encoder_cache is not None:
self.encoder_cache.reset_mm_cache()
def reset_encoder_cache(self) -> None:
if self.encoder_cache is not None:
self.encoder_cache.reset_encoder_cache()
def _get_num_input_tokens(self, num_scheduled_tokens: int) -> int:
# SP is not supported yet.
return num_scheduled_tokens
@torch.inference_mode()
def capture_model(self) -> int:
if not self.cudagraph_manager.needs_capture():
logger.warning(
"Skipping CUDA graph capture. To turn on CUDA graph capture, "
"ensure `cudagraph_mode` was not manually set to `NONE`"
)
return 0
# TODO (zhanqiu): support CUDA graph for PP.
if self.use_pp:
logger.warning_once(
"Skipping CUDA graph capture because pipeline parallel is "
"enabled. Pipeline parallel is currently eager-only.",
)
return 0
start_time = time.perf_counter()
gc.collect()
torch.cuda.empty_cache()
start_free_gpu_memory = torch.cuda.mem_get_info()[0]
with self.maybe_setup_dummy_loras(self.lora_config):
self.cudagraph_manager.capture(
model=self.model,
model_state=self.model_state,
input_buffers=self.input_buffers,
block_tables=self.block_tables,
attn_groups=self.attn_groups,
kv_cache_config=self.kv_cache_config,
has_lora=self.lora_config is not None,
)
if self.speculator is not None:
self.speculator.capture_model()
end_time = time.perf_counter()
end_free_gpu_memory = torch.cuda.mem_get_info()[0]
elapsed_time = end_time - start_time
cuda_graph_size = start_free_gpu_memory - end_free_gpu_memory
# This usually takes 5~20 seconds.
logger.info(
"Graph capturing finished in %.0f secs, took %.2f GiB",
elapsed_time,
cuda_graph_size / (1 << 30),
)
return cuda_graph_size
def warmup_for_prefill(self) -> None:
# For FlashInfer, we would like to execute a dummy prefill run
# to trigger JIT compilation.
if all("FLASHINFER" in b.get_name() for b in self.attn_backends.values()):
self._dummy_run(self.max_num_tokens, skip_attn=False)
torch.cuda.synchronize()
def finish_requests(self, scheduler_output: SchedulerOutput) -> None:
finished_req_ids = scheduler_output.finished_req_ids
preempted_req_ids = scheduler_output.preempted_req_ids
if preempted_req_ids:
finished_req_ids = finished_req_ids.union(preempted_req_ids)
for req_id in finished_req_ids:
self.req_states.remove_request(req_id)
if self.encoder_cache is not None:
self.encoder_cache.remove_request(req_id)
self.prompt_logprobs_worker.remove_request(req_id)
self.lora_state.remove_request(req_id)
def free_states(self, scheduler_output: SchedulerOutput) -> None:
if self.encoder_cache is not None:
for mm_hash in scheduler_output.free_encoder_mm_hashes:
self.encoder_cache.free_encoder_cache(mm_hash)
def add_requests(self, scheduler_output: SchedulerOutput) -> None:
for new_req_data in scheduler_output.scheduled_new_reqs:
assert new_req_data.prompt_token_ids is not None
assert new_req_data.prefill_token_ids is not None
req_id = new_req_data.req_id
prompt_len = len(new_req_data.prompt_token_ids)
self.req_states.add_request(
req_id=req_id,
prompt_len=prompt_len,
all_token_ids=new_req_data.prefill_token_ids,
num_computed_tokens=new_req_data.num_computed_tokens,
)
req_index = self.req_states.req_id_to_index[req_id]
if self.encoder_cache is not None:
self.encoder_cache.add_request(req_id, new_req_data.mm_features)
self.model_state.add_request(req_index, new_req_data)
self.block_tables.append_block_ids(
req_index, new_req_data.block_ids, overwrite=True
)
self.lora_state.add_request(req_id, req_index, new_req_data.lora_request)
if new_req_data.sampling_params is not None:
self.sampler.add_request(
req_index, prompt_len, new_req_data.sampling_params
)
self.prompt_logprobs_worker.add_request(
req_id, req_index, new_req_data.sampling_params
)
if scheduler_output.scheduled_new_reqs:
self.req_states.apply_staged_writes()
self.sampler.apply_staged_writes()
self.model_state.apply_staged_writes()
def update_requests(self, scheduler_output: SchedulerOutput) -> None:
# Add new blocks for the existing requests.
reqs = scheduler_output.scheduled_cached_reqs
for req_new_block_ids, req_id in zip(reqs.new_block_ids, reqs.req_ids):
if req_new_block_ids is not None:
req_index = self.req_states.req_id_to_index[req_id]
self.block_tables.append_block_ids(
req_index, req_new_block_ids, overwrite=False
)
def prepare_inputs(
self, scheduler_output: SchedulerOutput, num_tokens_after_padding: int
) -> InputBatch:
num_tokens = scheduler_output.total_num_scheduled_tokens
assert num_tokens > 0
num_tokens_per_req = scheduler_output.num_scheduled_tokens
num_reqs = len(num_tokens_per_req)
# Decode first, then prefill.
# batch_idx -> req_id
req_ids = sorted(num_tokens_per_req, key=num_tokens_per_req.get) # type: ignore[arg-type]
numtoks_iter = map(num_tokens_per_req.get, req_ids)
num_scheduled_tokens = np.fromiter(numtoks_iter, dtype=np.int32, count=num_reqs)
idx_mapping_iter = map(self.req_states.req_id_to_index.get, req_ids)
idx_mapping_np = np.fromiter(idx_mapping_iter, dtype=np.int32, count=num_reqs)
idx_mapping = async_copy_to_gpu(idx_mapping_np, device=self.device)
# Get the number of draft tokens for each request.
draft_tokens = scheduler_output.scheduled_spec_decode_tokens
if not draft_tokens:
# No draft token scheduled (common case).
total_num_draft_tokens = 0
total_num_logits = num_reqs
cu_num_logits_np = np.arange(num_reqs + 1, dtype=np.int32)
cu_num_logits = torch.arange(
num_reqs + 1, device=self.device, dtype=torch.int32
)
expanded_idx_mapping = idx_mapping
expanded_local_pos = torch.zeros(
num_reqs, dtype=torch.int32, device=self.device
)
else:
num_draft_tokens = np.array(
[len(draft_tokens.get(req_id, ())) for req_id in req_ids],
dtype=np.int32,
)
total_num_draft_tokens = int(num_draft_tokens.sum())
total_num_logits = num_reqs + total_num_draft_tokens
num_logits = num_draft_tokens + 1
cu_num_logits_np = np.empty(num_reqs + 1, dtype=np.int32)
cu_num_logits_np[0] = 0
np.cumsum(num_logits, out=cu_num_logits_np[1:])
cu_num_logits = async_copy_to_gpu(cu_num_logits_np, device=self.device)
max_expand_len = self.num_speculative_steps + 1
expanded_idx_mapping, expanded_local_pos = expand_idx_mapping(
idx_mapping, total_num_logits, cu_num_logits, max_expand_len
)
# Get query_start_loc.
query_start_loc_np = np.empty(self.max_num_reqs + 1, dtype=np.int32)
query_start_loc_np[0] = 0
np.cumsum(num_scheduled_tokens, out=query_start_loc_np[1 : num_reqs + 1])
# Pad for full CUDA graph mode.
# Some attention backends like FA3 require query_start_loc to be non-decreasing.
query_start_loc_np[num_reqs + 1 :] = num_tokens
async_copy_to_gpu(query_start_loc_np, out=self.input_buffers.query_start_loc)
query_start_loc_np = query_start_loc_np[: num_reqs + 1]
query_start_loc = self.input_buffers.query_start_loc[: num_reqs + 1]
# Get prefill tokens if any.
if self.req_states.any_prefills(idx_mapping_np):
prepare_prefill_inputs(
self.input_buffers.input_ids,
self.req_states.next_prefill_tokens,
idx_mapping,
query_start_loc,
self.req_states.all_token_ids.gpu,
self.req_states.prefill_len.gpu,
self.req_states.num_computed_tokens.gpu,
)
# Prepare positions and seq_lens.
prepare_pos_seq_lens(
idx_mapping,
query_start_loc,
self.req_states.num_computed_tokens.gpu,
self.input_buffers.positions,
self.input_buffers.seq_lens,
)
seq_lens = self.input_buffers.seq_lens[:num_reqs]
dcp_local_seq_lens = None
if self.use_dcp:
# Prepare dcp local seq_lens.
prepare_dcp_local_seq_lens(
self.input_buffers.dcp_local_seq_lens,
self.input_buffers.seq_lens,
num_reqs,
self.dcp_size,
self.dcp_rank,
self.cp_interleave,
)
dcp_local_seq_lens = self.input_buffers.dcp_local_seq_lens[:num_reqs]
# Some input token ids are directly read from the last sampled tokens
# and draft tokens. Also, get the logits indices to sample tokens from.
logits_indices = combine_sampled_and_draft_tokens(
self.input_buffers.input_ids,
idx_mapping,
self.req_states.last_sampled_tokens,
query_start_loc,
seq_lens,
self.req_states.prefill_len.gpu,
self.req_states.draft_tokens,
cu_num_logits,
total_num_logits,
)
return InputBatch(
req_ids=req_ids,
num_reqs=num_reqs,
idx_mapping=idx_mapping,
idx_mapping_np=idx_mapping_np,
expanded_idx_mapping=expanded_idx_mapping,
expanded_local_pos=expanded_local_pos,
num_scheduled_tokens=num_scheduled_tokens,
num_tokens=num_tokens,
num_tokens_after_padding=num_tokens_after_padding,
num_draft_tokens=total_num_draft_tokens,
query_start_loc=query_start_loc,
query_start_loc_np=query_start_loc_np,
seq_lens=seq_lens,
dcp_local_seq_lens=dcp_local_seq_lens,
input_ids=self.input_buffers.input_ids[:num_tokens_after_padding],
positions=self.input_buffers.positions[:num_tokens_after_padding],
logits_indices=logits_indices,
cu_num_logits=cu_num_logits,
cu_num_logits_np=cu_num_logits_np,
has_structured_output_reqs=scheduler_output.has_structured_output_requests,
)
def prepare_attn(
self, input_batch: InputBatch
) -> tuple[tuple[torch.Tensor, ...], torch.Tensor]:
# Block tables: num_kv_cache_groups x [num_reqs, max_num_blocks]
block_tables = self.block_tables.gather_block_tables(input_batch.idx_mapping)
# Compute slot mappings: [num_kv_cache_groups, num_tokens]
slot_mappings = self.block_tables.compute_slot_mappings(
input_batch.idx_mapping,
input_batch.query_start_loc,
input_batch.positions,
)
return block_tables, slot_mappings
def prepare_dummy_attn(
self, input_batch: InputBatch
) -> tuple[tuple[torch.Tensor, ...], torch.Tensor]:
block_tables = self.block_tables.get_dummy_block_tables(input_batch.num_reqs)
slot_mappings = self.block_tables.get_dummy_slot_mappings(
input_batch.num_tokens
)
return block_tables, slot_mappings
def sample(
self,
hidden_states: torch.Tensor,
input_batch: InputBatch,
grammar_output: GrammarOutput | None,
) -> tuple[SamplerOutput, torch.Tensor, torch.Tensor]:
sample_hidden_states = hidden_states[input_batch.logits_indices]
sample_pos = input_batch.positions[input_batch.logits_indices]
input_ids = input_batch.input_ids[input_batch.logits_indices]
logits = self.model.compute_logits(sample_hidden_states)
if grammar_output is not None:
# Apply grammar bitmask to the logits in-place.
self.structured_outputs_worker.apply_grammar_bitmask(
logits,
input_batch,
grammar_output.structured_output_request_ids,
grammar_output.grammar_bitmask,
)
# Sample tokens and compute logprobs (if needed).
sampler_output = self.sampler(
logits,
input_batch.expanded_idx_mapping,
input_batch.idx_mapping_np,
input_batch.cu_num_logits_np,
sample_pos,
input_ids,
input_batch.expanded_local_pos,
)
if input_batch.num_draft_tokens == 0:
# No draft tokens (common case).
num_sampled = torch.ones(
input_batch.num_reqs, dtype=torch.int32, device=self.device
)
else:
# Rejection sampling for spec decoding.
sampled_tokens, num_sampled = rejection_sample(
sampler_output.sampled_token_ids,
input_ids,
input_batch.cu_num_logits,
self.num_speculative_steps,
)
sampler_output.sampled_token_ids = sampled_tokens
# Get the number of sampled and rejected tokens.
# For chunked prefills, num_sampled and num_rejected are both 0.
num_sampled, num_rejected = get_num_sampled_and_rejected(
num_sampled,
input_batch.seq_lens,
input_batch.cu_num_logits,
input_batch.idx_mapping,
self.req_states.prefill_len.gpu,
)
return sampler_output, num_sampled, num_rejected
def postprocess(
self,
input_batch: InputBatch,
sampled_tokens: torch.Tensor,
num_sampled: torch.Tensor,
num_rejected: torch.Tensor,
) -> None:
# Update the number of computed tokens.
post_update(
input_batch.idx_mapping,
self.req_states.num_computed_tokens.gpu,
self.req_states.last_sampled_tokens,
self.sampler.penalties_state.output_bin_counts,
sampled_tokens,
num_sampled,
num_rejected,
input_batch.query_start_loc,
self.req_states.all_token_ids.gpu,
self.req_states.total_len.gpu,
)
# Update the number of computed prefill tokens.
idx_mapping_np = input_batch.idx_mapping_np
computed_prefill = self.req_states.num_computed_prefill_tokens
computed_prefill[idx_mapping_np] += input_batch.num_scheduled_tokens
np.minimum(
computed_prefill, self.req_states.prefill_len.np, out=computed_prefill
)
@torch.inference_mode()
def execute_model(
self,
scheduler_output: SchedulerOutput,
intermediate_tensors: IntermediateTensors | None = None,
dummy_run: bool = False,
skip_attn_for_dummy_run: bool = False,
) -> ModelRunnerOutput | IntermediateTensors | None:
if not dummy_run:
# Update the request states.
self.finish_requests(scheduler_output)
self.free_states(scheduler_output)
self.add_requests(scheduler_output)
self.update_requests(scheduler_output)
self.block_tables.apply_staged_writes()
if scheduler_output.total_num_scheduled_tokens == 0:
# No need to run the model.
empty_output = self.kv_connector.no_forward(scheduler_output)
return empty_output
# Get local cudagraph mode and size.
local_cudagraph_mode, local_cudagraph_size = (
self.cudagraph_manager.get_cudagraph_runtime_mode(
num_reqs=len(scheduler_output.num_scheduled_tokens),
num_tokens=scheduler_output.total_num_scheduled_tokens,
max_query_len=max(scheduler_output.num_scheduled_tokens.values()),
)
)
# DP sync: num_tokens + cudagraph_size + cudagraph_mode
num_tokens_after_padding, num_tokens_across_dp, synced_cudagraph_mode = (
get_cudagraph_and_dp_padding(
scheduler_output.total_num_scheduled_tokens,
local_cudagraph_size,
local_cudagraph_mode.value,
self.parallel_config.data_parallel_size,
self.parallel_config.data_parallel_rank,
)
)
cudagraph_runtime_mode = CUDAGraphMode(synced_cudagraph_mode)
if num_tokens_after_padding == 0:
# All DP ranks have zero tokens to run.
empty_output = self.kv_connector.no_forward(scheduler_output)
return empty_output
if not dummy_run:
# Common case.
# Prepare all the inputs and copy to the input buffers.
input_batch = self.prepare_inputs(
scheduler_output, num_tokens_after_padding
)
block_tables, slot_mappings = self.prepare_attn(input_batch)
if self.lora_config:
# Activate LoRA adapters.
lora_inputs = self.lora_state.make_lora_inputs(
input_batch.req_ids,
input_batch.idx_mapping_np,
input_batch.num_scheduled_tokens,
)
self._set_active_loras(*lora_inputs)
else:
# No actual tokens to run. A dummy run for DP or memory profiling.
num_reqs = min(num_tokens_after_padding, self.max_num_reqs)
input_batch = InputBatch.make_dummy(
num_reqs=num_reqs,
num_tokens=num_tokens_after_padding,
input_buffers=self.input_buffers,
device=self.device,
)
if not skip_attn_for_dummy_run:
block_tables, slot_mappings = self.prepare_dummy_attn(input_batch)
else:
block_tables = None
slot_mappings = None
# FIXME(woosuk): Fix warmup for LoRA.
attn_metadata = None
slot_mappings_by_layer = None
if not (dummy_run and skip_attn_for_dummy_run):
assert slot_mappings is not None
slot_mappings_by_layer = build_slot_mappings_by_layer(
slot_mappings, self.kv_cache_config
)
assert block_tables is not None
attn_metadata = self.model_state.prepare_attn(
input_batch,
block_tables,
slot_mappings,
self.attn_groups,
self.kv_cache_config,
)
inputs_embeds = None
if self.supports_mm_inputs and self.is_first_pp_rank and not dummy_run:
# Run MM encoder (if needed) and get multimodal embeddings.
# Only first PP rank prepares multimodal embeddings.
inputs_embeds = self.model_state.get_mm_embeddings(
scheduler_output.scheduled_encoder_inputs,
input_batch,
self.req_states,
)
model_inputs = {
"input_ids": input_batch.input_ids,
"positions": input_batch.positions,
"inputs_embeds": inputs_embeds,
# NOTE: Values returned by `prepare_inputs` will override the default
# values above.
**self.model_state.prepare_inputs(input_batch, self.req_states),
}
if not self.is_first_pp_rank:
# Update for non-first PP ranks.
model_inputs["input_ids"] = None
model_inputs["inputs_embeds"] = None
model_inputs["intermediate_tensors"] = intermediate_tensors
# Run model.
if cudagraph_runtime_mode == CUDAGraphMode.FULL:
# Use explicit cudagraph replay for FULL mode.
# NOTE(woosuk): Here, we don't need to pass the input tensors,
# because they are already copied to the CUDA graph input buffers.
self.kv_connector.pre_forward(scheduler_output)
model_output = self.cudagraph_manager.run_fullgraph(
input_batch.num_tokens_after_padding
)
if self.use_aux_hidden_state_outputs:
hidden_states, aux_hidden_states = model_output
else:
hidden_states = model_output
aux_hidden_states = None
else:
# For piecewise and eager mode, just call model().
batch_descriptor = BatchDescriptor(
num_tokens=input_batch.num_tokens_after_padding,
has_lora=self.lora_config is not None,
)
with set_forward_context(
attn_metadata,
self.vllm_config,
num_tokens=input_batch.num_tokens_after_padding,
cudagraph_runtime_mode=cudagraph_runtime_mode,
num_tokens_across_dp=num_tokens_across_dp,
batch_descriptor=batch_descriptor,
slot_mapping=slot_mappings_by_layer,
):
self.kv_connector.pre_forward(scheduler_output)
model_output = self.model(**model_inputs)
if self.use_aux_hidden_state_outputs:
hidden_states, aux_hidden_states = model_output
else:
hidden_states = model_output
aux_hidden_states = None
kv_connector_output = self.kv_connector.post_forward(scheduler_output)
self.execute_model_state = (
input_batch,
model_inputs,
attn_metadata,
slot_mappings_by_layer,
hidden_states,
aux_hidden_states,
kv_connector_output,
)
if not self.is_last_pp_rank:
# Non-last PP rank: return IntermediateTensors for sending.
assert isinstance(hidden_states, IntermediateTensors)
hidden_states.kv_connector_output = kv_connector_output
return hidden_states
# Last rank (or no PP): hidden_states is a tensor for sampling.
assert isinstance(hidden_states, torch.Tensor)
return None
@torch.inference_mode()
def sample_tokens(
self, grammar_output: GrammarOutput | None
) -> AsyncOutput | ModelRunnerOutput | None:
if self.execute_model_state is None:
# The prior execute_model call must have failed.
return None
(
input_batch,
model_inputs,
attn_metadata,
slot_mappings_by_layer,
hidden_states,
aux_hidden_states,
kv_connector_output,
) = self.execute_model_state
self.execute_model_state = None
if not self.is_last_pp_rank:
# Non-last PP rank: hidden_states is None because this rank produced
# IntermediateTensors instead of final hidden states. Receive the
# sampled tokens broadcast from the last rank and update local state.
sampled, num_sampled, num_rejected = pp_receive(
input_batch.num_reqs, max_sample_len=self.num_speculative_steps + 1
)
self.postprocess(input_batch, sampled, num_sampled, num_rejected)
return None
# Last rank: sample tokens
sampler_output, num_sampled, num_rejected = self.sample(
hidden_states, input_batch, grammar_output
)
if self.use_pp:
# Broadcast to non-last PP ranks (handles spec decode multi-token).
pp_broadcast(sampler_output.sampled_token_ids, num_sampled, num_rejected)
prompt_logprobs_dict = self.prompt_logprobs_worker.compute_prompt_logprobs(
self.model.compute_logits,
hidden_states,
input_batch,
self.req_states.all_token_ids.gpu,
self.req_states.num_computed_tokens.gpu,
self.req_states.prompt_len.np,
self.req_states.prefill_len.np,
self.req_states.num_computed_prefill_tokens,
)
# Prepare the model runner output.
model_runner_output = ModelRunnerOutput(
req_ids=input_batch.req_ids,
# NOTE(woosuk): req_id_to_index is unused in this model runner.
# Only for compatibility with the existing model runner and scheduler.
req_id_to_index={req_id: i for i, req_id in enumerate(input_batch.req_ids)},
sampled_token_ids=None, # type: ignore
prompt_logprobs_dict=prompt_logprobs_dict, # type: ignore[arg-type]
kv_connector_output=kv_connector_output,
)
async_output = AsyncOutput(
model_runner_output=model_runner_output,
sampler_output=sampler_output,
num_sampled_tokens=num_sampled,
main_stream=self.main_stream,
copy_stream=self.output_copy_stream,
copy_event=self.output_copy_event,
)
# Postprocess results and update request states.
# NOTE: This is intentionally done after creating the AsyncOutput,
# ensuring that `copy_event` is recorded before calling postprocess.
# This sequencing may slightly reduce latency as async D2H copy does not
# need to wait for the postprocess to finish.
self.postprocess(
input_batch, sampler_output.sampled_token_ids, num_sampled, num_rejected
)
if self.speculator is not None:
draft_tokens = self.speculator.propose(
input_batch,
attn_metadata,
slot_mappings_by_layer,
hidden_states,
aux_hidden_states,
num_sampled,
num_rejected,
self.req_states.last_sampled_tokens,
self.req_states.next_prefill_tokens,
self.sampler.sampling_states.temperature.gpu,
self.sampler.sampling_states.seeds.gpu,
)
self.req_states.draft_tokens[input_batch.idx_mapping] = draft_tokens
self.draft_tokens_handler.set_draft_tokens(input_batch, draft_tokens)
if self.use_async_scheduling:
return async_output
return async_output.get_output()
def take_draft_token_ids(self) -> DraftTokenIds | None:
return self.draft_tokens_handler.get_draft_tokens()
@torch.inference_mode()
def pool(self) -> AsyncPoolingOutput | ModelRunnerOutput | None:
if self.execute_model_state is None:
# The prior execute_model call must have failed.
return None
input_batch, _, _, _, hidden_states, _, kv_connector_output = (
self.execute_model_state
)
self.execute_model_state = None
if not self.is_last_pp_rank:
self.postprocess_pool(input_batch)
return None
assert self.pooling_runner is not None
pooler_output, is_valid = self.pooling_runner.pool(
hidden_states, input_batch, self.req_states
)
self.postprocess_pool(input_batch)
# Build the model runner output.
model_runner_output = ModelRunnerOutput(
req_ids=input_batch.req_ids,
req_id_to_index={req_id: i for i, req_id in enumerate(input_batch.req_ids)},
kv_connector_output=kv_connector_output,
)
async_output = AsyncPoolingOutput(
model_runner_output=model_runner_output,
pooler_output=pooler_output,
is_valid=is_valid,
main_stream=self.main_stream,
copy_stream=self.output_copy_stream,
copy_event=self.output_copy_event,
)
if self.use_async_scheduling:
return async_output
return async_output.get_output()
def postprocess_pool(self, input_batch: InputBatch) -> None:
# Update the number of computed tokens.
post_update_pool(
input_batch.idx_mapping,
self.req_states.num_computed_tokens.gpu,
input_batch.query_start_loc,
)
# Update the number of computed prefill tokens.
idx_mapping_np = input_batch.idx_mapping_np
computed_prefill = self.req_states.num_computed_prefill_tokens
computed_prefill[idx_mapping_np] += input_batch.num_scheduled_tokens
np.minimum(
computed_prefill, self.req_states.prefill_len.np, out=computed_prefill
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/worker/gpu/model_runner.py",
"license": "Apache License 2.0",
"lines": 1018,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/v1/worker/gpu/states.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import numpy as np
import torch
from vllm.v1.worker.gpu.buffer_utils import StagedWriteTensor, UvaBackedTensor
class RequestState:
def __init__(
self,
max_num_reqs: int,
max_model_len: int,
max_num_batched_tokens: int,
num_speculative_steps: int,
vocab_size: int,
device: torch.device,
):
self.max_num_reqs = max_num_reqs
self.max_model_len = max_model_len
self.max_num_batched_tokens = max_num_batched_tokens
self.num_speculative_steps = num_speculative_steps
self.vocab_size = vocab_size
self.device = device
self.req_id_to_index: dict[str, int] = {}
self.index_to_req_id: dict[int, str] = {}
self.free_indices = list(range(max_num_reqs))
# NOTE(woosuk): This tensor can be extremely large (e.g., several GBs)
# depending on the configured max_num_reqs and max_model_len.
# To save GPU memory, we use UVA instead of GPU for this tensor.
self.all_token_ids = StagedWriteTensor(
(self.max_num_reqs, self.max_model_len),
dtype=torch.int32,
device=device,
uva_instead_of_gpu=True,
)
# NOTE(woosuk): Distinguish clearly between prompt_len and prefill_len:
# - prompt_len: Number of tokens in the user-provided prompt.
# - prefill_len: Number of tokens passed into the model runner.
# This can include the prompt and additional partial output tokens,
# so prefill_len >= prompt_len.
# Usually, prefill_len equals prompt_len, but in cases such as resumption after
# preemption, prefill_len may be greater. Differentiating between these values
# is crucial, as certain features such as prompt logprobs or frequency penalties
# must treat prompt and output tokens separately.
self.prompt_len = UvaBackedTensor(self.max_num_reqs, dtype=torch.int32)
self.prefill_len = UvaBackedTensor(self.max_num_reqs, dtype=torch.int32)
# total_len = prompt_len + output_len. It grows as the request progresses.
self.total_len = StagedWriteTensor(
self.max_num_reqs, dtype=torch.int32, device=device
)
# Number of computed tokens.
self.num_computed_prefill_tokens = np.zeros(self.max_num_reqs, dtype=np.int32)
self.num_computed_tokens = StagedWriteTensor(
self.max_num_reqs, dtype=torch.int32, device=device
)
# Last sampled tokens.
self.last_sampled_tokens = torch.zeros(
self.max_num_reqs, 1, dtype=torch.int64, device=device
)
# Draft tokens.
self.draft_tokens = torch.zeros(
self.max_num_reqs,
self.num_speculative_steps,
dtype=torch.int64,
device=device,
)
self.next_prefill_tokens = torch.zeros(
self.max_num_reqs, dtype=torch.int32, device=device
)
@property
def num_reqs(self) -> int:
return len(self.req_id_to_index)
def add_request(
self,
req_id: str,
prompt_len: int,
all_token_ids: list[int],
num_computed_tokens: int,
) -> None:
assert len(self.free_indices) > 0, "No free indices"
req_idx = self.free_indices.pop()
self.req_id_to_index[req_id] = req_idx
self.index_to_req_id[req_idx] = req_id
self.prompt_len.np[req_idx] = prompt_len
prefill_len = len(all_token_ids)
assert prefill_len >= prompt_len, (
f"prefill_len {prefill_len} < prompt_len {prompt_len}"
)
self.prefill_len.np[req_idx] = prefill_len
self.total_len.stage_write_elem(req_idx, prefill_len)
self.all_token_ids.stage_write(req_idx, 0, all_token_ids)
self.num_computed_prefill_tokens[req_idx] = num_computed_tokens
self.num_computed_tokens.stage_write_elem(req_idx, num_computed_tokens)
def apply_staged_writes(self) -> None:
self.prompt_len.copy_to_uva()
self.prefill_len.copy_to_uva()
self.total_len.apply_write()
self.all_token_ids.apply_write()
self.num_computed_tokens.apply_write()
def remove_request(self, req_id: str) -> None:
req_idx = self.req_id_to_index.pop(req_id, None)
if req_idx is None:
# Request not found.
return
self.index_to_req_id.pop(req_idx, None)
self.free_indices.append(req_idx)
def any_prefills(self, idx_mapping_np: np.ndarray) -> bool:
return np.any(
self.num_computed_prefill_tokens[idx_mapping_np]
< self.prefill_len.np[idx_mapping_np]
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/worker/gpu/states.py",
"license": "Apache License 2.0",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/v1/worker/gpu/structured_outputs.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import numpy as np
import torch
from vllm.triton_utils import tl, triton
from vllm.utils.math_utils import cdiv
from vllm.v1.worker.gpu.buffer_utils import async_copy_to_gpu
from vllm.v1.worker.gpu.input_batch import InputBatch
class StructuredOutputsWorker:
def __init__(self, max_num_logits: int, vocab_size: int, device: torch.device):
self.logits_indices = torch.zeros(
max_num_logits, dtype=torch.int32, device=device
)
self.grammar_bitmask = torch.zeros(
(max_num_logits, cdiv(vocab_size, 32)), dtype=torch.int32, device=device
)
self.device = device
self.copy_stream = torch.cuda.Stream()
def apply_grammar_bitmask(
self,
logits: torch.Tensor,
input_batch: InputBatch,
grammar_req_ids: list[str],
grammar_bitmask: np.ndarray,
) -> None:
if not grammar_req_ids:
return
# Asynchronously copy the bitmask to GPU.
with torch.cuda.stream(self.copy_stream):
bitmask = async_copy_to_gpu(
grammar_bitmask, out=self.grammar_bitmask[: grammar_bitmask.shape[0]]
)
# Construct bitmask -> logits mapping
mapping: list[int] = []
req_ids = input_batch.req_ids
cu_num_logits = input_batch.cu_num_logits_np.tolist()
req_id_to_idx = {req_id: i for i, req_id in enumerate(req_ids)}
for grammar_req_id in grammar_req_ids:
req_idx = req_id_to_idx[grammar_req_id]
logits_start_idx = cu_num_logits[req_idx]
logits_end_idx = cu_num_logits[req_idx + 1]
mapping.extend(range(logits_start_idx, logits_end_idx))
# Asynchronously copy the mapping to GPU.
with torch.cuda.stream(self.copy_stream):
logits_indices = torch.tensor(
mapping, dtype=torch.int32, device="cpu", pin_memory=True
)
logits_indices = self.logits_indices[: len(mapping)].copy_(
logits_indices, non_blocking=True
)
# Ensure all async copies are complete before launching the kernel.
current_stream = torch.cuda.current_stream()
current_stream.wait_stream(self.copy_stream)
num_masks = bitmask.shape[0]
assert num_masks == len(mapping)
vocab_size = logits.shape[-1]
BLOCK_SIZE = 8192
grid = (num_masks, triton.cdiv(vocab_size, BLOCK_SIZE))
_apply_grammar_bitmask_kernel[grid](
logits,
logits.stride(0),
logits_indices,
bitmask,
bitmask.stride(0),
vocab_size,
BLOCK_SIZE=BLOCK_SIZE,
)
# Ensure the copy stream waits for the device tensors to finish being used
# before it re-uses or deallocates them
self.copy_stream.wait_stream(current_stream)
# Adapted from
# https://github.com/mlc-ai/xgrammar/blob/main/python/xgrammar/kernels/apply_token_bitmask_inplace_triton.py
@triton.jit
def _apply_grammar_bitmask_kernel(
logits_ptr,
logits_stride,
logits_indices_ptr,
bitmask_ptr,
bitmask_stride,
vocab_size,
BLOCK_SIZE: tl.constexpr,
):
bitmask_idx = tl.program_id(0)
logits_idx = tl.load(logits_indices_ptr + bitmask_idx)
# Load the bitmask.
block_id = tl.program_id(1)
bitmask_offset = (block_id * BLOCK_SIZE) // 32 + tl.arange(0, BLOCK_SIZE // 32)
packed_bitmask = tl.load(
bitmask_ptr + bitmask_idx * bitmask_stride + bitmask_offset,
mask=bitmask_offset < bitmask_stride,
)
# Unpack the bitmask.
bitmask = ((packed_bitmask[:, None] >> (tl.arange(0, 32)[None, :])) & 1) == 0
bitmask = bitmask.reshape(BLOCK_SIZE)
# Apply the bitmask to the logits.
block_offset = block_id * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
tl.store(
logits_ptr + logits_idx * logits_stride + block_offset,
-float("inf"),
mask=bitmask & (block_offset < vocab_size),
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/worker/gpu/structured_outputs.py",
"license": "Apache License 2.0",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/plamo3.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Inference-only PLaMo3 model."""
from collections.abc import Iterable
from itertools import islice
from typing import Any
import torch
from torch import nn
from transformers import PretrainedConfig
from vllm.compilation.decorators import support_torch_compile
from vllm.config import VllmConfig
from vllm.distributed import get_tensor_model_parallel_world_size
from vllm.distributed.parallel_state import get_pp_group
from vllm.model_executor.layers.activation import SiluAndMul
from vllm.model_executor.layers.attention import Attention
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.linear import (
MergedColumnParallelLinear,
QKVParallelLinear,
RowParallelLinear,
)
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.rotary_embedding import get_rope
from vllm.model_executor.layers.vocab_parallel_embedding import (
DEFAULT_VOCAB_PADDING_SIZE,
ParallelLMHead,
VocabParallelEmbedding,
)
from vllm.model_executor.model_loader.weight_utils import (
LoaderFunction,
composed_weight_loader,
default_weight_loader,
)
from vllm.model_executor.models.interfaces import SupportsLoRA, SupportsPP
from vllm.model_executor.models.utils import (
AutoWeightsLoader,
extract_layer_index,
make_empty_intermediate_tensors_factory,
make_layers,
maybe_prefix,
)
from vllm.model_executor.utils import set_weight_attrs
from vllm.sequence import IntermediateTensors
# Only used for type hinting.
class Plamo3Config(PretrainedConfig): # type: ignore
model_type: str = "plamo3"
hidden_size: int
num_hidden_layers: int
rms_norm_eps: float
# Attention
num_attention_heads: int
head_dim: int
num_key_value_heads: int
# vllm rename `sliding_window` attr to `interleaved_sliding_window`
# if `sliding_window` is list
interleaved_sliding_window: list[int | None]
sliding_window_pattern: int
rope_parameters: dict[str, Any]
rope_local_theta: int
# MLP
intermediate_size: int
# Tokenizer
vocab_size: int
def rms_norm_weight_loader(offset: float) -> LoaderFunction:
return composed_weight_loader(
default_weight_loader,
lambda x: x + offset,
)
class DenseMLP(nn.Module):
def __init__(
self,
config: Plamo3Config,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_up_proj = MergedColumnParallelLinear(
self.hidden_size,
[self.intermediate_size] * 2,
bias=False,
prefix=f"{prefix}.gate_up_proj",
quant_config=quant_config,
return_bias=False,
)
self.act = SiluAndMul()
self.down_proj = RowParallelLinear(
self.intermediate_size,
self.hidden_size,
bias=False,
prefix=f"{prefix}.down_proj",
quant_config=quant_config,
return_bias=False,
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
h = self.gate_up_proj(hidden_states)
h = self.act(h)
return self.down_proj(h)
class Plamo3AttentionMixer(nn.Module):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = "", **kwargs) -> None:
super().__init__()
config = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
self.hidden_size = config.hidden_size
tp_size = get_tensor_model_parallel_world_size()
self.total_num_heads = config.num_attention_heads
assert self.total_num_heads % tp_size == 0
self.num_heads = self.total_num_heads // tp_size
self.total_num_kv_heads = config.num_key_value_heads
if self.total_num_kv_heads >= tp_size:
# Number of KV heads is greater than TP size, so we partition
# the KV heads across multiple tensor parallel GPUs.
assert self.total_num_kv_heads % tp_size == 0
else:
# Number of KV heads is less than TP size, so we replicate
# the KV heads across multiple tensor parallel GPUs.
assert tp_size % self.total_num_kv_heads == 0
self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
self.head_dim = config.head_dim
self.q_size = self.num_heads * self.head_dim
self.kv_size = self.num_kv_heads * self.head_dim
self.scaling = self.head_dim**-0.5
self.qkv_proj = QKVParallelLinear(
config.hidden_size,
self.head_dim,
self.total_num_heads,
self.total_num_kv_heads,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.qkv_proj",
)
self.o_proj = RowParallelLinear(
self.total_num_heads * self.head_dim,
config.hidden_size,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.o_proj",
)
layer_idx = extract_layer_index(prefix)
layer_type = config.layer_types[layer_idx]
is_sliding = layer_type == "sliding_attention"
# Initialize the rotary embedding.
if layer_type in config.rope_parameters:
# Transformers v5 rope config.
rope_parameters = config.rope_parameters[layer_type]
else:
# Transformers v4 rope config.
# Global attention. Use the values in config.json.
rope_parameters = config.rope_parameters
# Local attention. Override the values in config.json.
if is_sliding:
rope_parameters = dict(
rope_type="default", rope_theta=config.rope_local_theta
)
max_position = config.max_position_embeddings
if hasattr(vllm_config.model_config, "max_model_len") and isinstance(
vllm_config.model_config.max_model_len, int
):
max_position = min(max_position, vllm_config.model_config.max_model_len)
self.rotary_emb = get_rope(
self.head_dim,
max_position=max_position,
rope_parameters=rope_parameters,
)
self.q_norm = RMSNorm(self.head_dim, eps=config.rms_norm_eps)
set_weight_attrs(
self.q_norm.weight, {"weight_loader": rms_norm_weight_loader(offset=1.0)}
)
self.k_norm = RMSNorm(self.head_dim, eps=config.rms_norm_eps)
set_weight_attrs(
self.k_norm.weight, {"weight_loader": rms_norm_weight_loader(offset=1.0)}
)
self.attn = Attention(
self.num_heads,
self.head_dim,
self.scaling,
num_kv_heads=self.num_kv_heads,
cache_config=vllm_config.cache_config,
per_layer_sliding_window=config.interleaved_sliding_window[layer_idx],
prefix=f"{prefix}.attn",
)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
residual: torch.Tensor | None,
**kwargs: Any,
) -> torch.Tensor:
qkv, _ = self.qkv_proj(hidden_states)
q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
q_shape = q.shape
q = q.reshape(q_shape[:-1] + (q_shape[-1] // self.head_dim, self.head_dim))
q = self.q_norm.forward_native(q).reshape(q_shape)
k_shape = k.shape
k = k.reshape(k_shape[:-1] + (k_shape[-1] // self.head_dim, self.head_dim))
k = self.k_norm.forward_native(k).reshape(k_shape)
q, k = self.rotary_emb(positions, q, k)
attn_output = self.attn(q, k, v)
output, _ = self.o_proj(attn_output)
return output
class Plamo3DecoderLayer(nn.Module):
def __init__(
self, vllm_config: VllmConfig, prefix: str = "", **kwargs: Any
) -> None:
super().__init__()
config = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
self.mixer = Plamo3AttentionMixer(
vllm_config=vllm_config,
prefix=f"{prefix}.mixer",
)
self.mlp = DenseMLP(
config=config, quant_config=quant_config, prefix=f"{prefix}.mlp"
)
self.pre_mixer_norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
set_weight_attrs(
self.pre_mixer_norm.weight,
{"weight_loader": rms_norm_weight_loader(offset=1.0)},
)
self.post_mixer_norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
set_weight_attrs(
self.post_mixer_norm.weight,
{"weight_loader": rms_norm_weight_loader(offset=1.0 / 5)},
)
self.pre_mlp_norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
set_weight_attrs(
self.pre_mlp_norm.weight,
{"weight_loader": rms_norm_weight_loader(offset=1.0)},
)
self.post_mlp_norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
set_weight_attrs(
self.post_mlp_norm.weight,
{"weight_loader": rms_norm_weight_loader(offset=1.0 / (5**1.5))},
)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
residual: torch.Tensor | None,
**kwargs: Any,
) -> tuple[torch.Tensor, torch.Tensor | None]:
if residual is None:
residual = hidden_states
hidden_states = self.pre_mixer_norm(hidden_states)
else:
hidden_states, residual = self.pre_mixer_norm(hidden_states, residual)
hidden_states = self.mixer(
positions=positions, hidden_states=hidden_states, residual=residual
)
hidden_states = self.post_mixer_norm(hidden_states)
# Fully Connected
hidden_states, residual = self.pre_mlp_norm(hidden_states, residual)
hidden_states = self.mlp(hidden_states)
hidden_states = self.post_mlp_norm(hidden_states)
return hidden_states, residual
class Plamo3Decoder(torch.nn.Module):
def __init__(self, vllm_config: VllmConfig, prefix: str = "") -> None:
super().__init__()
num_hidden_layers = vllm_config.model_config.hf_config.num_hidden_layers
self.start_layer, self.end_layer, self.layers = make_layers(
num_hidden_layers,
lambda prefix: Plamo3DecoderLayer(vllm_config, prefix=prefix),
prefix=f"{prefix}.layers",
)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
residual: torch.Tensor | None,
) -> tuple[torch.Tensor, torch.Tensor | None]:
for layer in islice(self.layers, self.start_layer, self.end_layer):
hidden_states, residual = layer(
positions=positions,
hidden_states=hidden_states,
residual=residual,
)
return hidden_states, residual
@support_torch_compile
class Plamo3Model(nn.Module):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
self.config = config
self.vocab_size = config.vocab_size
self.org_vocab_size = config.vocab_size
self.embed_tokens = VocabParallelEmbedding(
self.vocab_size,
config.hidden_size,
org_num_embeddings=config.vocab_size,
prefix=f"{prefix}.embed_tokens",
)
self.make_empty_intermediate_tensors = make_empty_intermediate_tensors_factory(
["hidden_states", "residual"], config.hidden_size
)
self.layers = Plamo3Decoder(vllm_config, prefix=f"{prefix}.layers")
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
set_weight_attrs(
self.norm.weight,
{"weight_loader": rms_norm_weight_loader(offset=1.0)},
)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.embed_tokens(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor:
if get_pp_group().is_first_rank:
if inputs_embeds is not None:
hidden_states = inputs_embeds
else:
hidden_states = self.embed_input_ids(input_ids)
residual = None
else:
assert intermediate_tensors is not None
hidden_states = intermediate_tensors["hidden_states"]
residual = intermediate_tensors["residual"]
hidden_states, residual = self.layers(
positions=positions, hidden_states=hidden_states, residual=residual
)
if not get_pp_group().is_last_rank:
return IntermediateTensors(
{"hidden_states": hidden_states, "residual": residual}
)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states
class Plamo3ForCausalLM(nn.Module, SupportsLoRA, SupportsPP):
packed_modules_mapping = {
"qkv_proj": ["qkv_proj"],
"gate_up_proj": ["gate_up_proj"],
}
def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None:
super().__init__()
self.config = vllm_config.model_config.hf_config
self.vllm_config = vllm_config
self.model_config = vllm_config.model_config
self.scheduler_config = vllm_config.scheduler_config
self.model = Plamo3Model(
vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")
)
self.vocab_size = self.config.vocab_size
self.unpadded_vocab_size = self.config.vocab_size
num_embeddings = ((self.vocab_size + 15) // 16) * 16
self.lm_head = ParallelLMHead(
num_embeddings,
self.config.hidden_size,
org_num_embeddings=self.config.vocab_size,
padding_size=DEFAULT_VOCAB_PADDING_SIZE,
prefix=f"{prefix}.lm_head",
)
if self.config.tie_word_embeddings:
self.lm_head = self.lm_head.tie_weights(self.model.embed_tokens)
self.logits_processor = LogitsProcessor(
self.unpadded_vocab_size, self.config.vocab_size
)
self.make_empty_intermediate_tensors = (
self.model.make_empty_intermediate_tensors
)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.model.embed_input_ids(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor:
hidden_states = self.model(
input_ids, positions, intermediate_tensors, inputs_embeds
)
return hidden_states
def compute_logits(
self,
hidden_states: torch.Tensor,
) -> torch.Tensor | None:
logits = self.logits_processor(self.lm_head, hidden_states)
return logits
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]):
loader = AutoWeightsLoader(
self,
skip_prefixes=(["lm_head."] if self.config.tie_word_embeddings else None),
)
return loader.load_weights(weights)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/plamo3.py",
"license": "Apache License 2.0",
"lines": 387,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/v1/attention/backends/mla/rocm_aiter_mla_sparse.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from dataclasses import dataclass
from typing import TYPE_CHECKING, ClassVar
import numpy as np
import torch
from vllm._aiter_ops import rocm_aiter_ops
from vllm.config import VllmConfig
from vllm.logger import init_logger
from vllm.model_executor.layers.attention.mla_attention import (
get_mla_dims,
)
from vllm.triton_utils import tl, triton
from vllm.v1.attention.backend import (
AttentionBackend,
AttentionCGSupport,
AttentionLayer,
AttentionMetadata,
AttentionMetadataBuilder,
CommonAttentionMetadata,
SparseMLAAttentionImpl,
)
from vllm.v1.attention.backends.mla.flashmla_sparse import (
triton_convert_req_index_to_global_index,
)
from vllm.v1.kv_cache_interface import AttentionSpec
if TYPE_CHECKING:
from vllm.model_executor.models.deepseek_v2 import Indexer
logger = init_logger(__name__)
@triton.jit
def fetch_id_to_ragged_kernel(
in_tensor_ptr, # [num_seq, topk]
cumsum_ptr, # [num_seq + 1]
out_tensor_ptr, # [max_num_seq * topk]
in_tensor_ptr_stride,
TOPK: tl.constexpr,
TOKEN_NUM: tl.constexpr,
BLOCK_SIZE: tl.constexpr,
):
seq_id = tl.program_id(0)
block_id = tl.program_id(1)
offset = tl.arange(0, BLOCK_SIZE)
token_start = tl.load(cumsum_ptr + seq_id)
token_end = tl.load(cumsum_ptr + seq_id + 1)
token_num = token_end - token_start
row_offset = block_id * BLOCK_SIZE
if row_offset >= token_num:
return
in_tensor_offset = seq_id * in_tensor_ptr_stride + row_offset + offset
in_tensor_mask = (row_offset + offset) < TOPK
in_tensor_val = tl.load(in_tensor_ptr + in_tensor_offset, mask=in_tensor_mask)
out_tensor_offset = token_start + row_offset + offset
out_tensor_mask = (out_tensor_offset < token_end) & in_tensor_mask
tl.store(out_tensor_ptr + out_tensor_offset, in_tensor_val, mask=out_tensor_mask)
def fetch_id_to_ragged_triton(
in_tensor: torch.Tensor, cumsum: torch.Tensor, out_tensor: torch.Tensor, topk
):
num_tokens = in_tensor.size(0)
block_size = 64
num_block_per_row = triton.cdiv(topk, block_size)
grid = (
num_tokens,
num_block_per_row,
)
fetch_id_to_ragged_kernel[grid](
in_tensor, cumsum, out_tensor, in_tensor.stride(0), topk, num_tokens, block_size
)
class ROCMAiterMLASparseBackend(AttentionBackend):
accept_output_buffer: bool = True
@staticmethod
def get_name() -> str:
return "ROCM_AITER_MLA_SPARSE"
@staticmethod
def get_metadata_cls() -> type["ROCMAiterMLASparseMetadata"]:
return ROCMAiterMLASparseMetadata
@staticmethod
def get_builder_cls() -> type["ROCMAiterMLASparseMetadataBuilder"]:
return ROCMAiterMLASparseMetadataBuilder
@staticmethod
def get_impl_cls() -> type["ROCMAiterMLASparseImpl"]:
return ROCMAiterMLASparseImpl
@staticmethod
def get_kv_cache_shape(
num_blocks: int,
block_size: int,
num_kv_heads: int, # assumed to be 1 for MLA
head_size: int,
cache_dtype_str: str = "auto",
) -> tuple[int, ...]:
return (num_blocks, block_size, head_size)
@classmethod
def get_supported_dtypes(cls) -> list[torch.dtype]:
return [torch.bfloat16]
@classmethod
def get_supported_head_sizes(cls) -> list[int]:
return [576]
@dataclass
class ROCMAiterMLASparseMetadata(AttentionMetadata):
num_reqs: int
max_query_len: int
max_seq_len: int
num_actual_tokens: int # Number of tokens excluding padding.
query_start_loc: torch.Tensor
slot_mapping: torch.Tensor
block_table: torch.Tensor
req_id_per_token: torch.Tensor
qo_indptr: torch.Tensor
paged_kv_last_page_len: torch.Tensor
paged_kv_indices: torch.Tensor
paged_kv_indptr: torch.Tensor
paged_kv_indptr_rest: torch.Tensor
block_size: int = 1
topk_tokens: int = 2048
@dataclass
class ROCMAiterMLASparseMetadataBuilder(
AttentionMetadataBuilder[ROCMAiterMLASparseMetadata]
):
_cudagraph_support: ClassVar[AttentionCGSupport] = AttentionCGSupport.NEVER
def __init__(
self,
kv_cache_spec: AttentionSpec,
layer_names: list[str],
vllm_config: VllmConfig,
device: torch.device,
):
self.kv_cache_spec = kv_cache_spec
self.model_config = vllm_config.model_config
parallel_config = vllm_config.parallel_config
self.device = device
max_num_batched_tokens = vllm_config.scheduler_config.max_num_batched_tokens
self.num_heads = self.model_config.get_num_attention_heads(parallel_config)
self.mla_dims = get_mla_dims(self.model_config)
self.topk_tokens = vllm_config.model_config.hf_config.index_topk
self.topk_tokens_tensor = torch.tensor(
[self.topk_tokens], device=device, dtype=torch.int32
)
self.max_model_len_tensor = torch.tensor(
[self.model_config.max_model_len], device=device, dtype=torch.int32
)
# this is ignored by `flash_mla_with_kvcache` if indices not None
self.dummy_block_table = torch.empty(
(1, 1), dtype=torch.int32, device=self.device
)
self.req_id_per_token_buffer = torch.empty(
(vllm_config.scheduler_config.max_num_batched_tokens,),
dtype=torch.int32,
device=device,
)
self.qo_indptr = torch.arange(
0, max_num_batched_tokens + 1, dtype=torch.int32, device=device
)
self.paged_kv_last_page_len = torch.ones(
max_num_batched_tokens, dtype=torch.int32, device=device
)
# These two needs to be calculated in runtime,
# but we still needs to prepare the buffer
self.paged_kv_indices = torch.zeros(
[max_num_batched_tokens * self.topk_tokens],
dtype=torch.int32,
device=device,
)
self.paged_kv_indptr = torch.zeros(
[max_num_batched_tokens + 1], dtype=torch.int32, device=device
)
def build(
self,
common_prefix_len: int,
common_attn_metadata: CommonAttentionMetadata,
fast_build: bool = False,
) -> ROCMAiterMLASparseMetadata:
num_tokens = common_attn_metadata.num_actual_tokens
starts = np.asarray(common_attn_metadata.query_start_loc_cpu, dtype=np.int32)
seg_lengths = np.diff(starts)
req_id_per_token = np.repeat(
np.arange(seg_lengths.shape[0], dtype=np.int32), seg_lengths
)
# Zero-fill for cudagraphs
self.req_id_per_token_buffer.fill_(0)
self.req_id_per_token_buffer[: req_id_per_token.shape[0]].copy_(
torch.from_numpy(req_id_per_token), non_blocking=True
)
self.paged_kv_indices.fill_(0)
self.paged_kv_indptr.fill_(0)
req_id_per_token = self.req_id_per_token_buffer[:num_tokens]
qo_indptr = self.qo_indptr[: num_tokens + 1]
paged_kv_last_page_len = self.paged_kv_last_page_len[:num_tokens]
paged_kv_indices = self.paged_kv_indices[: num_tokens * self.topk_tokens]
paged_kv_indptr = self.paged_kv_indptr[: num_tokens + 1]
paged_kv_indptr_rest = self.paged_kv_indptr[num_tokens + 1 :]
metadata = ROCMAiterMLASparseMetadata(
num_reqs=common_attn_metadata.num_reqs,
max_query_len=common_attn_metadata.max_query_len,
max_seq_len=common_attn_metadata.max_seq_len,
num_actual_tokens=common_attn_metadata.num_actual_tokens,
query_start_loc=common_attn_metadata.query_start_loc,
slot_mapping=common_attn_metadata.slot_mapping,
block_table=common_attn_metadata.block_table_tensor,
req_id_per_token=req_id_per_token,
block_size=self.kv_cache_spec.block_size,
topk_tokens=self.topk_tokens,
qo_indptr=qo_indptr,
paged_kv_last_page_len=paged_kv_last_page_len,
paged_kv_indices=paged_kv_indices,
paged_kv_indptr=paged_kv_indptr,
paged_kv_indptr_rest=paged_kv_indptr_rest,
)
return metadata
# Take from
# https://github.com/deepseek-ai/FlashMLA/blob/main/tests/test_flash_mla_prefill.py#L72
def reference_mla_sparse_prefill(
q: torch.Tensor, kv: torch.Tensor, indices: torch.Tensor, sm_scale: float, d_v: int
) -> tuple[torch.Tensor, torch.Tensor]:
import math
def log2sumexp2(a: torch.Tensor, dim: int) -> torch.Tensor:
return torch.logsumexp(a * math.log(2), dim=dim) * math.log2(math.e)
skv = kv.shape[0]
sq = q.shape[0]
topk = indices.shape[-1]
dqk = q.shape[-1]
indices = indices[:, 0, :] # [s_q, topk]
invalid_indices_mask = (indices < 0) | (indices >= skv)
indices[invalid_indices_mask] = 0
qs = q # [s_q, h_q, d_qk]
kvs = kv[:, 0, :][indices].view(sq, topk, dqk) # [s_q, topk, d_qk]
attn_score = (qs @ kvs.transpose(1, 2)).float() # [s_q, h_q, topk]
attn_score.masked_fill_(invalid_indices_mask.unsqueeze(1), float("-inf"))
attn_score *= sm_scale * math.log2(math.e)
lse = log2sumexp2(attn_score, dim=-1) # [s_q, h_q]
attn_score = torch.exp2(attn_score - lse.unsqueeze(-1)) # [s_q, h_q, topk]
result = attn_score.to(q.dtype) @ kvs[:, :, :d_v]
return (result, lse)
class ROCMAiterMLASparseImpl(SparseMLAAttentionImpl[ROCMAiterMLASparseMetadata]):
def __init__(
self,
num_heads: int,
head_size: int,
scale: float,
num_kv_heads: int,
alibi_slopes: list[float] | None,
sliding_window: int | None,
kv_cache_dtype: str,
logits_soft_cap: float | None,
attn_type: str,
kv_sharing_target_layer_name: str | None,
# MLA Specific Arguments
topk_indice_buffer: torch.Tensor | None = None,
indexer: "Indexer | None" = None,
**mla_args,
) -> None:
self.num_heads = num_heads
self.head_size = head_size
self.scale = float(scale)
self.num_kv_heads = num_kv_heads
self.kv_cache_dtype = kv_cache_dtype
self.kv_lora_rank: int = mla_args["kv_lora_rank"]
self.softmax_scale = scale
assert indexer is not None
self.topk_indices_buffer: torch.Tensor | None = indexer.topk_indices_buffer
def _forward_bf16_kv(
self,
q: torch.Tensor, # [sq, heads, d_qk]
kv_c_and_k_pe_cache: torch.Tensor, # [blocks, heads, d_qk]
topk_indices: torch.Tensor, # [sq, topk]
attn_metadata: ROCMAiterMLASparseMetadata,
) -> torch.Tensor:
num_tokens = q.shape[0]
output = torch.empty(
[num_tokens, self.num_heads, self.kv_lora_rank],
dtype=q.dtype,
device=q.device,
)
seq_len = (topk_indices != -1).sum(dim=-1)
torch.cumsum(seq_len, dim=0, out=attn_metadata.paged_kv_indptr[1:])
attn_metadata.paged_kv_indptr_rest.fill_(attn_metadata.paged_kv_indptr[-1])
fetch_id_to_ragged_triton(
topk_indices,
attn_metadata.paged_kv_indptr,
attn_metadata.paged_kv_indices,
attn_metadata.topk_tokens,
)
rocm_aiter_ops.mla_decode_fwd(
q,
kv_c_and_k_pe_cache,
output,
self.scale,
attn_metadata.qo_indptr,
1,
attn_metadata.paged_kv_indptr,
attn_metadata.paged_kv_indices,
attn_metadata.paged_kv_last_page_len,
)
return output[:, : self.num_heads, :]
def forward_mqa(
self,
q: torch.Tensor | tuple[torch.Tensor, torch.Tensor],
kv_c_and_k_pe_cache: torch.Tensor,
attn_metadata: ROCMAiterMLASparseMetadata,
layer: AttentionLayer,
) -> tuple[torch.Tensor, torch.Tensor | None]:
# NOTE(lucas): for the sparse FlashMLA kernels the kernels want to use
# MQA 576/512 approach for both prefill and decode
# Concatenate q if it's a tuple (ql_nope, q_pe)
if isinstance(q, tuple):
q = torch.cat(q, dim=-1)
num_actual_toks = q.shape[0]
# Get topk indices
assert self.topk_indices_buffer is not None
topk_indices = self.topk_indices_buffer[:num_actual_toks]
topk_indices_global = triton_convert_req_index_to_global_index(
attn_metadata.req_id_per_token,
attn_metadata.block_table,
topk_indices,
BLOCK_SIZE=attn_metadata.block_size,
NUM_TOPK_TOKENS=attn_metadata.topk_tokens,
)
attn_out = self._forward_bf16_kv(
q, kv_c_and_k_pe_cache, topk_indices_global, attn_metadata
)
return attn_out, None
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/attention/backends/mla/rocm_aiter_mla_sparse.py",
"license": "Apache License 2.0",
"lines": 319,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/kernels/test_cache_kernels.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Unit tests for CUDA kernels in cache_kernels.cu."""
import pytest
import torch
try:
from vllm import _custom_ops as ops
except ImportError:
pytest.skip(
"Could not import vllm._custom_ops. (pip install -e .)", allow_module_level=True
)
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Need CUDA device")
def test_gather_cache_oob():
"""
Tests for OOB read in gather_and_maybe_dequant_cache (Issue #27909).
This test constructs a boundary case identified in the issue where
seq_starts causes the block_table offset to read out of bounds.
"""
batch_size = 1
block_size = 64
entry_size = 128
block_table = torch.tensor([[1, 2]], dtype=torch.int32, device="cuda")
# This will result in offset = 128 / block_size = 128 / 64 = 2
# This will cause the kernel to try to read from
# block_table[0, 2], but its size is only 2.
seq_starts = torch.tensor([128], dtype=torch.int32, device="cuda")
seq_len = 65
cu_seq_lens = torch.tensor([0, seq_len], dtype=torch.int32, device="cuda")
# src_cache: [num_blocks, block_size, entry_size]
num_blocks = 5
src_cache = torch.randn(
(num_blocks, block_size, entry_size), dtype=torch.float16, device="cuda"
)
dst = torch.empty((seq_len, entry_size), dtype=torch.float16, device="cuda")
scale = torch.tensor([1.0], dtype=torch.float32, device="cuda")
# Calling the C++ function gather_and_maybe_dequant_cache
ops.gather_and_maybe_dequant_cache(
src_cache,
dst,
block_table,
cu_seq_lens,
batch_size,
"auto", # kv_cache_dtype
scale,
seq_starts,
)
torch.cuda.synchronize()
assert True
if __name__ == "__main__":
pytest.main([__file__])
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/kernels/test_cache_kernels.py",
"license": "Apache License 2.0",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/v1/worker/test_gpu_profiler.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
from vllm.config import ProfilerConfig
from vllm.config.profiler import _is_uri_path
from vllm.profiler.wrapper import WorkerProfiler
class ConcreteWorkerProfiler(WorkerProfiler):
"""
A basic implementation of a worker profiler for testing purposes.
"""
def __init__(self, profiler_config: ProfilerConfig):
self.start_call_count = 0
self.stop_call_count = 0
self.should_fail_start = False
super().__init__(profiler_config)
def _start(self) -> None:
if self.should_fail_start:
raise RuntimeError("Simulated start failure")
self.start_call_count += 1
def _stop(self) -> None:
self.stop_call_count += 1
@pytest.fixture
def default_profiler_config():
return ProfilerConfig(
profiler="torch",
torch_profiler_dir="/tmp/mock",
delay_iterations=0,
max_iterations=0,
)
def test_immediate_start_stop(default_profiler_config):
"""Test standard start without delay."""
profiler = ConcreteWorkerProfiler(default_profiler_config)
profiler.start()
assert profiler._running is True
assert profiler._active is True
assert profiler.start_call_count == 1
profiler.stop()
assert profiler._running is False
assert profiler._active is False
assert profiler.stop_call_count == 1
def test_delayed_start(default_profiler_config):
"""Test that profiler waits for N steps before actually starting."""
default_profiler_config.delay_iterations = 2
profiler = ConcreteWorkerProfiler(default_profiler_config)
# User requests start
profiler.start()
# Should be active (request accepted) but not running (waiting for delay)
assert profiler._active is True
assert profiler._running is False
assert profiler.start_call_count == 0
# Step 1
profiler.step()
assert profiler._running is False
# Step 2 (Threshold reached)
profiler.step()
assert profiler._running is True
assert profiler.start_call_count == 1
def test_max_iterations(default_profiler_config):
"""Test that profiler stops automatically after max iterations."""
default_profiler_config.max_iterations = 2
profiler = ConcreteWorkerProfiler(default_profiler_config)
profiler.start()
assert profiler._running is True
# Iteration 1
profiler.step() # profiling_count becomes 1
assert profiler._running is True
# Iteration 2
profiler.step() # profiling_count becomes 2
assert profiler._running is True
# Iteration 3 (Exceeds max)
profiler.step() # profiling_count becomes 3
# Should have stopped now
assert profiler._running is False
assert profiler.stop_call_count == 1
def test_delayed_start_and_max_iters(default_profiler_config):
"""Test combined delayed start and max iterations."""
default_profiler_config.delay_iterations = 2
default_profiler_config.max_iterations = 2
profiler = ConcreteWorkerProfiler(default_profiler_config)
profiler.start()
# Step 1
profiler.step()
assert profiler._running is False
assert profiler._active is True
# Step 2 (Starts now)
profiler.step()
assert profiler._profiling_for_iters == 1
assert profiler._running is True
assert profiler._active is True
# Next iteration
profiler.step()
assert profiler._profiling_for_iters == 2
assert profiler._running is True
# Iteration 2 (exceeds max)
profiler.step()
# Should have stopped now
assert profiler._running is False
assert profiler.stop_call_count == 1
def test_idempotency(default_profiler_config):
"""Test that calling start/stop multiple times doesn't break logic."""
profiler = ConcreteWorkerProfiler(default_profiler_config)
# Double Start
profiler.start()
profiler.start()
assert profiler.start_call_count == 1 # Should only start once
# Double Stop
profiler.stop()
profiler.stop()
assert profiler.stop_call_count == 1 # Should only stop once
def test_step_inactive(default_profiler_config):
"""Test that stepping while inactive does nothing."""
default_profiler_config.delay_iterations = 2
profiler = ConcreteWorkerProfiler(default_profiler_config)
# Not started yet
profiler.step()
profiler.step()
# Even though we stepped 2 times, start shouldn't happen because active=False
assert profiler.start_call_count == 0
def test_start_failure(default_profiler_config):
"""Test behavior when the underlying _start method raises exception."""
profiler = ConcreteWorkerProfiler(default_profiler_config)
profiler.should_fail_start = True
profiler.start()
# Exception caught in _call_start
assert profiler._running is False # Should not mark as running
assert profiler._active is True # Request is still considered active
assert profiler.start_call_count == 0 # Logic failed inside start
def test_shutdown(default_profiler_config):
"""Test that shutdown calls stop only if running."""
profiler = ConcreteWorkerProfiler(default_profiler_config)
# Case 1: Not running
profiler.shutdown()
assert profiler.stop_call_count == 0
# Case 2: Running
profiler.start()
profiler.shutdown()
assert profiler.stop_call_count == 1
def test_mixed_delay_and_stop(default_profiler_config):
"""Test manual stop during the delay period."""
default_profiler_config.delay_iterations = 5
profiler = ConcreteWorkerProfiler(default_profiler_config)
profiler.start()
profiler.step()
profiler.step()
# User cancels before delay finishes
profiler.stop()
assert profiler._active is False
# Further steps should not trigger start
profiler.step()
profiler.step()
profiler.step()
assert profiler.start_call_count == 0
class TestIsUriPath:
"""Tests for the _is_uri_path helper function."""
@pytest.mark.parametrize(
"path,expected",
[
# Valid URI schemes - should return True
("gs://bucket/path", True),
("s3://bucket/path", True),
("hdfs://cluster/path", True),
("abfs://container/path", True),
("http://example.com/path", True),
("https://example.com/path", True),
# Local paths - should return False
("/tmp/local/path", False),
("./relative/path", False),
("relative/path", False),
("/absolute/path", False),
# Windows drive letters - should return False (single char scheme)
("C://windows/path", False),
("D://drive/path", False),
# Edge cases
("", False),
("no-scheme", False),
("scheme-no-slashes:", False),
("://no-scheme", False),
],
)
def test_is_uri_path(self, path, expected):
"""Test that _is_uri_path correctly identifies URI vs local paths."""
assert _is_uri_path(path) == expected
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/worker/test_gpu_profiler.py",
"license": "Apache License 2.0",
"lines": 182,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/kernels/moe/test_cutedsl_moe.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
from vllm.platforms import current_platform
if not current_platform.has_device_capability(100):
pytest.skip(
reason="Nvfp4 Requires compute capability of 10 or above.",
allow_module_level=True,
)
import torch
from flashinfer import fp4_quantize
from torch.nn import functional as F
from vllm.model_executor.layers.activation import SiluAndMul
from vllm.model_executor.layers.fused_moe.flashinfer_cutedsl_moe import (
flashinfer_cutedsl_moe_masked,
)
from vllm.utils.flashinfer import (
flashinfer_cutedsl_grouped_gemm_nt_masked as cutedsl_gmm_masked,
)
from vllm.utils.flashinfer import (
scaled_fp4_grouped_quantize,
)
kE2M1ToFloat = torch.tensor(
[0.0, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 6.0], dtype=torch.float32
)
FLOAT8_E4M3_MAX = 448.0
FLOAT4_E2M1_MAX = 6.0
def convert_swizzled_to_linear(a_sf_swizzled: torch.Tensor, m, k, block_size):
m_tiles = (m + 128 - 1) // 128
f = block_size * 4
k_tiles = (k + f - 1) // f
tmp = torch.reshape(a_sf_swizzled, (1, m_tiles, k_tiles, 32, 4, 4))
tmp = torch.permute(tmp, (0, 1, 4, 3, 2, 5))
out = tmp.reshape(m_tiles * 128, k_tiles * f // block_size)
return out[0:m, 0:k]
def dequantize_nvfp4_to_dtype(
tensor_fp4, tensor_sf, global_scale, dtype, device, block_size=16
):
"""Dequantize the fp4 tensor back to high precision."""
# Two fp4 values are packed into one uint8.
assert tensor_fp4.dtype == torch.uint8
m, packed_k = tensor_fp4.shape
k = packed_k * 2
tensor_f32 = break_fp4_bytes(tensor_fp4, dtype)
tensor_f32 = tensor_f32.reshape(m, k // block_size, block_size)
tensor_sf = tensor_sf.view(torch.float8_e4m3fn)
tensor_sf = convert_swizzled_to_linear(tensor_sf, m, k, block_size)
tensor_sf_dtype = tensor_sf.to(torch.float32) / global_scale
# scale the tensor
out = (tensor_f32 * tensor_sf_dtype.unsqueeze(-1)).reshape(m, k)
return out.to(dtype=dtype)
def break_fp4_bytes(a, dtype):
assert a.dtype == torch.uint8
m, n = a.shape
# Vectorized nibble processing
a_flat = a.flatten()
high = (a_flat & 0xF0) >> 4 # Upper nibbles
low = a_flat & 0x0F # Lower nibbles
# Combine nibbles for batch processing
combined = torch.stack((low, high), dim=1).flatten()
# Vectorized sign and magnitude extraction
signs = (combined & 0x08).to(torch.bool) # Sign bits
abs_vals = (combined & 0x07).to(torch.long) # Magnitude indices
# Device-aware lookup and sign application
kE2M1 = kE2M1ToFloat.to(device=a.device)
values = kE2M1[abs_vals] * torch.where(signs, -1.0, 1.0)
# Reshape to final form
return values.reshape(m, n * 2).to(dtype=dtype)
def generate_balanced_routing(
hidden_states: torch.Tensor, num_experts: int, top_k: int
):
"""
Generate routing weights and topk indices such that every expert is active.
Returns routing_weights, topk_idx
"""
num_tokens, hidden_dim = hidden_states.shape
# num_tokens = batch_size * seq_len
# First, assign at least one token per expert
tokens_per_expert = torch.arange(num_tokens) % num_experts
tokens_per_expert = tokens_per_expert[torch.randperm(num_tokens)] # shuffle
# Each token has top_k experts — start with one guaranteed expert
topk_idx = torch.full((num_tokens, top_k), -1, dtype=torch.long)
topk_idx[:, 0] = tokens_per_expert
# For remaining top_k - 1 experts, pick randomly (allowing repeats)
if top_k > 1:
random_choices = torch.randint(0, num_experts, (num_tokens, top_k - 1))
topk_idx[:, 1:] = random_choices
# Normalize routing weights so each token's weights sum to 1
routing_weights = torch.rand(num_tokens, top_k)
routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
# Reshape back if needed
routing_weights = routing_weights.view(num_tokens, top_k)
topk_idx = topk_idx.view(num_tokens, top_k)
return routing_weights, topk_idx
def prepare_inputs(
hidden_states: torch.Tensor,
router_logits: torch.Tensor,
num_experts: int,
topk: int,
):
routing_weights, topk_idx = generate_balanced_routing(
router_logits, num_experts, topk
)
masked_m = []
for i in range(num_experts):
mask = topk_idx.view(-1) == i
masked_m.append(mask.sum())
masked_m = torch.tensor(masked_m, dtype=torch.int32)
# Initialize the hidden_states_3d with ones instead of empty to avoid nan
# issue.
hidden_states_3d = torch.ones(
(num_experts, max(masked_m), hidden_states.shape[1]), dtype=hidden_states.dtype
)
for i in range(num_experts):
hidden_states_3d[i, : masked_m[i], :] = hidden_states[topk_idx.view(-1) == i]
return hidden_states_3d, masked_m, topk_idx, routing_weights
MNK_FACTORS = [
(2, 1024, 1024),
(2, 1024, 1536),
(2, 3072, 1024),
(2, 3072, 1536),
(64, 1024, 1024),
(64, 1024, 1536),
(64, 3072, 1024),
(64, 2048, 1024),
(224, 1024, 1024),
(224, 1024, 1536),
]
# Reference implementation of torch_moe
def torch_moe(a, w1, w2, score, topk, expert_map):
B, D = a.shape
a = a.view(B, -1, D).repeat(1, topk, 1).reshape(-1, D)
out = torch.zeros(B * topk, w2.shape[1], dtype=a.dtype, device=a.device)
score = torch.softmax(score, dim=-1, dtype=torch.float32)
topk_weight, topk_ids = torch.topk(score, topk)
topk_weight = topk_weight.view(-1)
topk_ids = topk_ids.view(-1)
if expert_map is not None:
topk_ids = expert_map[topk_ids]
for i in range(w1.shape[0]):
mask = topk_ids == i
if mask.sum():
out[mask] = SiluAndMul()(a[mask] @ w1[i].transpose(0, 1)) @ w2[i].transpose(
0, 1
)
return (
out.view(B, -1, w2.shape[1]) * topk_weight.view(B, -1, 1).to(out.dtype)
).sum(dim=1)
def torch_moe_nvfp4(a, w1, w2, topk, topk_weight, topk_ids):
B, D = a.shape
a = a.view(B, -1, D).repeat(1, topk, 1).reshape(-1, D)
out = torch.zeros(B * topk, w2.shape[1], dtype=a.dtype, device=a.device)
topk_weight = topk_weight.view(-1)
topk_ids = topk_ids.view(-1)
for i in range(w1.shape[0]):
mask = topk_ids == i
if mask.sum():
m = w1[i].shape[0]
assert m % 2 == 0
# Note: w1 and w3 are swapped!
w3_expert, w1_expert = w1[i][m // 2 :, :], w1[i][: m // 2, :]
inter = F.silu(a[mask] @ w1_expert.t()) * (a[mask] @ w3_expert.t())
inter_gs = torch.tensor(1.0).cuda()
inter_q, inter_blockscale = fp4_quantize(inter, inter_gs)
inter = dequantize_nvfp4_to_dtype(
inter_q,
inter_blockscale,
inter_gs,
dtype=inter.dtype,
device=inter.device,
block_size=16,
).cuda()
out[mask] = inter @ w2[i].transpose(0, 1)
return (
out.view(B, -1, w2.shape[1]) * topk_weight.view(B, -1, 1).to(out.dtype)
).sum(dim=1)
def grouped_gemm_ref(
hidden_states_expanded: torch.Tensor,
hidden_states_3d: torch.Tensor,
weights: torch.Tensor,
topk_idx: torch.Tensor,
masked_m: torch.Tensor,
B: int,
topk: int,
num_experts: int,
*,
block_size: int = 16,
) -> torch.Tensor:
"""
Computes the reference grouped GEMM (fp4 quantized per-expert loop),
computes flashinfer grouped GEMM (for scale consistency),
and returns ONLY the repacked reference output: out_ref.
Returns:
out_ref: Tensor [num_experts, max_m, n_out]
"""
device_hs = hidden_states_expanded.device
device_w = weights.device
out_dtype = weights.dtype
n_out = weights.shape[1]
# Flattened reference output (B*topk, n_out)
out = torch.zeros((B * topk, n_out), dtype=out_dtype, device=device_w)
# Per-expert reference compute loop
for i in range(num_experts):
mask = topk_idx.view(-1) == i
if mask.any():
lhs = hidden_states_expanded[mask]
rhs = weights[i]
a_amax = lhs.abs().max().to(torch.float32).to(device_hs)
b_amax = rhs.abs().max().to(torch.float32).to(device_w)
a_gs = FLOAT8_E4M3_MAX * FLOAT4_E2M1_MAX / a_amax
b_gs = FLOAT8_E4M3_MAX * FLOAT4_E2M1_MAX / b_amax
lhsq, lhsq_sf = fp4_quantize(lhs, a_gs)
rhsq, rhsq_sf = fp4_quantize(rhs, b_gs)
lhs_in_dtype = dequantize_nvfp4_to_dtype(
lhsq,
lhsq_sf,
a_gs,
dtype=lhs.dtype,
device=device_hs,
block_size=block_size,
)
rhs_in_dtype = dequantize_nvfp4_to_dtype(
rhsq,
rhsq_sf,
b_gs,
dtype=rhs.dtype,
device=device_w,
block_size=block_size,
)
out[mask] = lhs_in_dtype @ rhs_in_dtype.t()
# Determine per-expert max_m
max_m_val = int(masked_m.max().item())
# Repack into [num_experts, max_m, n_out]
out_ref = torch.zeros(
(num_experts, max_m_val, n_out),
dtype=out.dtype,
device=out.device,
)
expert_slot = [0] * num_experts
for i, expert_id in enumerate(topk_idx.view(-1).tolist()):
slot = expert_slot[expert_id]
if slot < max_m_val:
out_ref[expert_id, slot, :] = out[i]
expert_slot[expert_id] += 1
else:
raise IndexError(
f"Expert {expert_id} exceeded max slots ({max_m_val}). "
"Increase max_m or check masked_m."
)
return out_ref
def flashinfer_cutedsl_grouped_gemm_nt_masked(
hidden_states: torch.Tensor, # 3d
input_global_scale: torch.Tensor, # (l,)
weights: torch.Tensor,
w_global_scale: torch.Tensor, # (l,)
masked_m: torch.Tensor,
):
# hidden_states: [l, m, k]
# weights: [l, n, k]
aq, aq_sf = scaled_fp4_grouped_quantize(
hidden_states,
masked_m.to(hidden_states.device),
input_global_scale,
)
num_experts, n, k = weights.shape
bq, bq_sf = scaled_fp4_grouped_quantize(
weights,
torch.full((num_experts,), n, device=weights.device, dtype=torch.int32),
w_global_scale,
)
out = torch.zeros(
(num_experts, max(masked_m), n), dtype=weights.dtype, device=aq.device
)
out = out.permute(1, 2, 0) # requirement of kernel
sf_vec_size = 16
ab_dtype = "float4_e2m1fn"
sf_dtype = "float8_e4m3fn"
c_dtype = "bfloat16"
alpha = 1.0 / (input_global_scale * w_global_scale).to(out.dtype).view(
1, 1, num_experts
)
def get_cute_dtype(input: torch.Tensor) -> str:
if input.dtype == torch.bfloat16:
return "bfloat16"
elif input.dtype == torch.float16:
return "float16"
elif input.dtype == torch.float32:
return "float32"
else:
raise ValueError(f"Unsupported cute dtype {input.dtype}")
cutedsl_gmm_masked(
(aq, aq_sf),
(bq, bq_sf),
out,
masked_m.to(aq.device),
ab_dtype=ab_dtype,
sf_dtype=sf_dtype,
c_dtype=c_dtype,
sf_vec_size=sf_vec_size,
alpha=alpha,
alpha_dtype=get_cute_dtype(alpha),
)
return out
@pytest.mark.parametrize("bs, hidden_dim, inter_dim", [(2, 128, 256), (16, 128, 512)])
@pytest.mark.parametrize("topk", [1, 2, 4])
@torch.inference_mode()
def test_flashinfer_cutedsl_moe_masked(
bs: int, hidden_dim: int, inter_dim: int, topk: int
):
torch.manual_seed(42)
device = "cuda"
num_experts = 8
hidden_states = (
torch.randn(bs, hidden_dim, dtype=torch.bfloat16, device=device) / 5.0
)
w1 = (
torch.randn(
num_experts, 2 * inter_dim, hidden_dim, dtype=torch.bfloat16, device=device
)
/ 10.0
)
w2 = (
torch.randn(
num_experts, hidden_dim, inter_dim, dtype=torch.bfloat16, device=device
)
/ 10.0
)
router_logits = torch.randn(bs, num_experts, dtype=torch.float32)
hidden_states_expanded = (
hidden_states.view(bs, -1, hidden_dim)
.repeat(1, topk, 1)
.reshape(-1, hidden_dim)
)
hidden_states_3d, masked_m, topk_idx, routing_weights = prepare_inputs(
hidden_states_expanded, router_logits, num_experts, topk
)
w1_amax = w1.abs().amax(dim=(1, 2)).to(torch.float32).to(w1.device)
w2_amax = w2.abs().amax(dim=(1, 2)).to(torch.float32).to(w2.device)
input_global_scale = torch.ones(
(num_experts,), dtype=torch.float32, device=hidden_states.device
)
w1_global_scale = FLOAT8_E4M3_MAX * FLOAT4_E2M1_MAX / w1_amax
w2_global_scale = FLOAT8_E4M3_MAX * FLOAT4_E2M1_MAX / w2_amax
a2_global_scale = torch.ones(
(num_experts,), dtype=torch.float32, device=hidden_states.device
) # assume intermediate scale is 1.0
w1_fp4, w1_blockscale = scaled_fp4_grouped_quantize(
w1,
torch.ones(num_experts, dtype=torch.int32, device=w1.device) * 2 * inter_dim,
w1_global_scale,
)
w2_fp4, w2_blockscale = scaled_fp4_grouped_quantize(
w2,
torch.ones(num_experts, dtype=torch.int32, device=w2.device) * hidden_dim,
w2_global_scale,
)
w1_alpha = 1.0 / (input_global_scale * w1_global_scale)
w2_alpha = 1.0 / (a2_global_scale * w2_global_scale)
out = torch.empty_like(hidden_states_3d)
# Note: the 1st dim shouldn't be bs
wk = torch.empty(
num_experts,
hidden_states_3d.shape[1],
inter_dim * 2,
dtype=hidden_states_3d.dtype,
device=hidden_states.device,
)
flashinfer_cutedsl_moe_masked(
hidden_states_3d.to(hidden_states.device),
input_global_scale,
w1_fp4.permute(2, 0, 1),
w1_blockscale,
w1_alpha,
w2_fp4.permute(2, 0, 1),
a2_global_scale,
w2_blockscale,
w2_alpha,
masked_m.to(hidden_states.device),
wk,
out,
)
# reference
a_fp4, a_scale_interleaved = fp4_quantize(hidden_states, input_global_scale)
a_in_dtype = dequantize_nvfp4_to_dtype(
a_fp4,
a_scale_interleaved,
input_global_scale,
dtype=hidden_states.dtype,
device=hidden_states.device,
block_size=16,
)
w1_d = torch.empty(
(num_experts, 2 * inter_dim, hidden_dim), device=w1.device, dtype=w1.dtype
)
w2_d = torch.empty(
(num_experts, hidden_dim, inter_dim), device=w2.device, dtype=w2.dtype
)
for idx in range(0, num_experts):
w1_fp4_sliced, w1_blockscale_sliced = fp4_quantize(
w1[idx], w1_global_scale[idx]
)
w2_fp4_sliced, w2_blockscale_sliced = fp4_quantize(
w2[idx], w2_global_scale[idx]
)
w1_d[idx] = dequantize_nvfp4_to_dtype(
w1_fp4_sliced,
w1_blockscale_sliced,
w1_global_scale[idx],
dtype=w1.dtype,
device=w1.device,
block_size=16,
)
w2_d[idx] = dequantize_nvfp4_to_dtype(
w2_fp4_sliced,
w2_blockscale_sliced,
w2_global_scale[idx],
dtype=w2.dtype,
device=w2.device,
block_size=16,
)
ref_output = torch_moe_nvfp4(
a_in_dtype,
w1_d,
w2_d,
topk,
routing_weights.to(a_in_dtype.device),
topk_idx.to(a_in_dtype.device),
)
out_weighted = torch.zeros_like(ref_output, device=out.device, dtype=out.dtype)
positions = torch.nonzero(masked_m[topk_idx], as_tuple=False)
rows, cols = positions[:, 0], positions[:, 1]
experts = topk_idx[rows, cols]
for i in range(num_experts):
mask = experts == i
if mask.any():
idx = torch.nonzero(mask, as_tuple=False).squeeze(-1)
r, c = rows[idx], cols[idx]
out_weighted[r] += out[i, : len(r), :] * routing_weights[r, c].to(
out.device
).unsqueeze(-1)
torch.testing.assert_close(
out_weighted.cpu(), ref_output.cpu(), atol=2e-1, rtol=2e-1
)
@pytest.mark.parametrize(
"bs, hidden_dim, inter_dim, topk", [(2, 128, 256, 2), (16, 128, 512, 5)]
)
@torch.inference_mode()
def test_grouped_gemm_nt_masked(
bs: int, hidden_dim: int, inter_dim: int, topk: int
) -> None:
torch.manual_seed(42)
B = bs
D = hidden_dim
N = inter_dim
# CuteDSL group gemm has issue when not all experts are active.
# i.e. masked = [2, 3, 0, 0, 1] where the 2nd and 3rd experts are inactive
# see https://github.com/flashinfer-ai/flashinfer/issues/1856
num_experts = bs
hidden_states = torch.randn(B, D, dtype=torch.bfloat16, device="cuda")
weights = torch.randn(num_experts, N, D, dtype=torch.bfloat16, device="cuda")
router_logits = torch.randn(B, num_experts, dtype=torch.float32)
hidden_states_expanded = (
hidden_states.view(B, -1, D).repeat(1, topk, 1).reshape(-1, D)
)
hidden_states_3d, masked_m, topk_idx, _ = prepare_inputs(
hidden_states_expanded, router_logits, num_experts, topk
)
a_amax = (
hidden_states_3d.abs()
.amax(dim=(1, 2))
.to(torch.float32)
.to(hidden_states.device)
)
b_amax = weights.abs().amax(dim=(1, 2)).to(torch.float32).to(weights.device)
a_gs = FLOAT8_E4M3_MAX * FLOAT4_E2M1_MAX / a_amax
b_gs = FLOAT8_E4M3_MAX * FLOAT4_E2M1_MAX / b_amax
out_flashinfer = flashinfer_cutedsl_grouped_gemm_nt_masked(
hidden_states_3d.to(hidden_states.device), a_gs, weights, b_gs, masked_m
)
# reference
out_ref = grouped_gemm_ref(
hidden_states_expanded=hidden_states_expanded,
hidden_states_3d=hidden_states_3d,
weights=weights,
topk_idx=topk_idx,
masked_m=masked_m,
B=B,
topk=topk,
num_experts=num_experts,
)
# Note: just to compare the masked position due to cutedsl may write nan
# into unmasked position.
for i in range(num_experts):
torch.testing.assert_close(
out_flashinfer.permute(2, 0, 1)[i, : masked_m[i]],
out_ref.to(out_flashinfer.device)[i, : masked_m[i]],
atol=1e-1,
rtol=1e-1,
)
if __name__ == "__main__":
test_flashinfer_cutedsl_moe_masked(16, 128, 512, 4)
test_grouped_gemm_nt_masked(16, 128, 512, 4)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/kernels/moe/test_cutedsl_moe.py",
"license": "Apache License 2.0",
"lines": 502,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_executor/layers/fused_moe/flashinfer_cutedsl_moe.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import torch
import vllm.model_executor.layers.fused_moe.modular_kernel as mk
from vllm import envs
from vllm.logger import init_logger
from vllm.model_executor.layers.fused_moe.activation import MoEActivation
from vllm.model_executor.layers.fused_moe.config import (
FusedMoEConfig,
FusedMoEParallelConfig,
FusedMoEQuantConfig,
)
from vllm.model_executor.layers.fused_moe.topk_weight_and_reduce import (
TopKWeightAndReduceDelegate,
)
from vllm.model_executor.layers.quantization.utils.quant_utils import (
QuantKey,
kNvfp4Dynamic,
kNvfp4Static,
)
from vllm.platforms import current_platform
from vllm.utils.flashinfer import (
flashinfer_cutedsl_grouped_gemm_nt_masked,
scaled_fp4_grouped_quantize,
silu_and_mul_scaled_nvfp4_experts_quantize,
)
logger = init_logger(__name__)
class FlashInferCuteDSLExperts(mk.FusedMoEPermuteExpertsUnpermute):
def __init__(
self,
moe_config: FusedMoEConfig,
quant_config: FusedMoEQuantConfig,
max_num_tokens: int,
num_dispatchers: int,
):
super().__init__(
moe_config=moe_config,
quant_config=quant_config,
max_num_tokens=max_num_tokens,
num_dispatchers=num_dispatchers,
)
assert quant_config.quant_dtype == "nvfp4", (
"Only nvfp4 quantization are currently supported."
)
self.out_dtype = moe_config.in_dtype
@staticmethod
def activation_format() -> mk.FusedMoEActivationFormat:
return mk.FusedMoEActivationFormat.BatchedExperts
@staticmethod
def _supports_current_device() -> bool:
p = current_platform
return p.is_cuda() and p.is_device_capability_family(100)
@staticmethod
def _supports_no_act_and_mul() -> bool:
return False
@staticmethod
def _supports_quant_scheme(
weight_key: QuantKey | None,
activation_key: QuantKey | None,
) -> bool:
SUPPORTED_W_A = [
(kNvfp4Static, kNvfp4Dynamic),
]
return (weight_key, activation_key) in SUPPORTED_W_A
@staticmethod
def _supports_activation(activation: MoEActivation) -> bool:
return activation == MoEActivation.SILU
@staticmethod
def _supports_parallel_config(moe_parallel_config: FusedMoEParallelConfig) -> bool:
return True
def supports_expert_map(self) -> bool:
return False
def supports_chunking(self) -> bool:
# This refers to TP chunking; DP chunking is handled separately.
# TODO(shuw@nvidia.com): Set to False to be consistent with
# batched_deep_gemm_moe
return False
def finalize_weight_and_reduce_impl(self) -> mk.TopKWeightAndReduce:
# Let PrepareAndFinalize::finalize() decide the impl.
return TopKWeightAndReduceDelegate()
def workspace_shapes(
self,
M: int,
N: int,
K: int,
topk: int,
global_num_experts: int,
local_num_experts: int,
expert_tokens_meta: mk.ExpertTokensMetadata | None,
activation: MoEActivation,
) -> tuple[tuple[int, ...], tuple[int, ...], tuple[int, ...]]:
# We use global_num_experts due to how moe_align_block_size handles
# expert_maps.
"""
Compute the shapes for the temporary and final outputs of the two gemms
and activation in the fused expert function. Since the gemms are
independent, the workspace for the first gemm can be shared with the
workspace for the last gemm.
Returns a tuple of:
- workspace13 shape tuple: must be large enough to hold the
result of either expert gemm.
- workspace2 shape tuple: must be large enough to hold the
result of the activation function.
- output shape tuple: must be exact size of the final gemm output.
- Workspace type: The dtype to use for the workspace tensors.
- Note: in order for activation chunking to work, the first dimension
of each tuple must be the number of tokens.
"""
K_dim = K * 2 if envs.VLLM_DEEPEPLL_NVFP4_DISPATCH else K
output_shape = (local_num_experts, M, K_dim)
workspace2 = (local_num_experts, M, N)
workspace1 = output_shape
return (workspace1, workspace2, output_shape)
def apply(
self,
output: torch.Tensor,
hidden_states: torch.Tensor,
w1: torch.Tensor,
w2: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
activation: MoEActivation,
global_num_experts: int,
expert_map: torch.Tensor | None,
a1q_scale: torch.Tensor | None,
a2_scale: torch.Tensor | None, # Not used
workspace13: torch.Tensor | None,
workspace2: torch.Tensor | None,
expert_tokens_meta: mk.ExpertTokensMetadata | None,
apply_router_weight_on_input: bool | None,
):
assert self.quant_dtype == "nvfp4", (
"Only nvfp4 quantization are currently supported."
)
# Ensure w1_scale and w2_scale are not None before calling view
assert self.w1_scale is not None and self.w2_scale is not None, (
"w1_scale and w2_scale must not be None for FlashInferExperts"
)
assert expert_tokens_meta is not None
expert_num_tokens = expert_tokens_meta.expert_num_tokens
assert hidden_states.ndim == 3
assert self.w1_scale.ndim == 3
assert self.w2_scale.ndim == 3
input_global_scale = (
None if envs.VLLM_DEEPEPLL_NVFP4_DISPATCH else self.a1_gscale
)
flashinfer_hidden_states = (
(hidden_states, a1q_scale)
if envs.VLLM_DEEPEPLL_NVFP4_DISPATCH
else hidden_states
)
flashinfer_cutedsl_moe_masked(
hidden_states=flashinfer_hidden_states,
input_global_scale=input_global_scale,
w1=w1,
w1_blockscale=self.w1_scale,
w1_alpha=self.g1_alphas,
w2=w2,
a2_global_scale=self.a2_gscale,
w2_blockscale=self.w2_scale,
w2_alpha=self.g2_alphas,
masked_m=expert_num_tokens,
workspace=workspace2,
out=output,
)
def get_cute_dtype(input: torch.Tensor) -> str:
if input.dtype == torch.bfloat16:
return "bfloat16"
elif input.dtype == torch.float16:
return "float16"
elif input.dtype == torch.float32:
return "float32"
else:
raise ValueError(f"Unsupported cute dtype {input.dtype}")
def flashinfer_cutedsl_moe_masked(
hidden_states: torch.Tensor | tuple[torch.Tensor, torch.Tensor],
input_global_scale: torch.Tensor,
w1: torch.Tensor,
w1_blockscale: torch.Tensor,
w1_alpha,
w2: torch.Tensor,
a2_global_scale: torch.Tensor,
w2_blockscale: torch.Tensor,
w2_alpha,
masked_m: torch.Tensor,
workspace: torch.Tensor,
out: torch.Tensor,
):
"""
Perform masked Mixture-of-Experts computation with FlashInfer's CuteDSL
kernels.
Args:
hidden_states: Either of the following case
* torch.Tensor: [num_experts, m, k], bf16
* tuple[torch.Tensor, torch.Tensor]: [num_experts, m, k // 2],
uint8, [num_experts, m, k // 16], float8_e4m3fn
input_global_scale (torch.Tensor): (l,)
w1 (torch.Tensor): fp4 weights, [l, 2 * n, k // 2], uint8
w1_blockscale (torch.Tensor): blockscale factors, e4m3,
w1_alpha (torch.Tensor): (l,)
w2 (torch.Tensor): fp4 weights, [l, k, n // 2], uint8
a2_global_scale (torch.Tensor): (l,)
w2_blockscale (torch.Tensor): blockscale factors, e4m3,
w2_alpha (torch.Tensor): (l,)
masked_m (torch.Tensor): Masked dimension indices
workspace (torch.Tensor): For gateup_output
Notes:
- Assumes max(masked_m) <= m.
"""
# === Assertions on dtypes ===
assert w1.dtype == torch.uint8, f"w1 must be uint8, got {w1.dtype}"
assert w1_blockscale.dtype == torch.float8_e4m3fn, (
f"w1_blockscale must be float8_e4m3fn, got {w1_blockscale.dtype}"
)
assert w1_alpha.dtype == torch.float32, (
f"w1_alpha must be float32, got {w1_alpha.dtype}"
)
assert w2.dtype == torch.uint8, f"w2 must be uint8, got {w2.dtype}"
assert a2_global_scale.dtype == torch.float32, (
f"a2_global_scale must be float32, got {a2_global_scale.dtype}"
)
assert w2_blockscale.dtype == torch.float8_e4m3fn, (
f"w2_blockscale must be float8_e4m3fn, got {w2_blockscale.dtype}"
)
assert w2_alpha.dtype == torch.float32, (
f"w2_alpha must be float32, got {w2_alpha.dtype}"
)
# === Assertions on shapes ===
n = w2.shape[-1] * 2 # intermediate dimension
if isinstance(hidden_states, tuple):
assert input_global_scale is None, (
"input_global_scale is needed when input needs quant"
)
aq = hidden_states[0].view(torch.uint8)
aq_sf = hidden_states[1].view(torch.float8_e4m3fn)
# m, k_by_2, num_experts = aq.shape
num_experts, m, k_by_2 = aq.shape
k = k_by_2 * 2
aq = aq.permute(1, 2, 0)
else:
num_experts, m, k = hidden_states.shape
assert input_global_scale.dtype == torch.float32, (
f"input_global_scale must be float32, got {input_global_scale.dtype}"
)
assert input_global_scale.shape == (num_experts,), (
f"input_global_scale must be (l,), got {input_global_scale.shape}"
)
aq, aq_sf = scaled_fp4_grouped_quantize(
hidden_states,
masked_m,
input_global_scale,
)
assert w1.shape[-2] == 2 * n, f"w1 last-2 dim must be 2*n, got {w1.shape}"
assert w1.shape[-1] * 2 == k, (
f"w1 last dim * 2 must equal k, got {w1.shape[-1]} vs k={k}"
)
assert w2.shape[-2:] == (
k,
n // 2,
), f"w2 shape mismatch, got {w2.shape[-2:]}, expected {(k, n // 2)}"
assert w1_alpha.shape == (num_experts,), (
f"w1_alpha must be (l,), got {w1_alpha.shape}"
)
assert a2_global_scale.shape == (num_experts,), (
f"a2_global_scale must be (l,), got {a2_global_scale.shape}"
)
assert w2_alpha.shape == (num_experts,), (
f"w2_alpha must be (l,), got {w2_alpha.shape}"
)
workspace = workspace.permute(1, 2, 0) # requirement of kernel
sf_vec_size = 16
assert aq_sf.dtype == torch.float8_e4m3fn
assert aq.dtype == torch.uint8
ab_dtype = "float4_e2m1fn"
sf_dtype = "float8_e4m3fn"
if isinstance(hidden_states, tuple):
c_dtype = "bfloat16"
else:
c_dtype = get_cute_dtype(hidden_states)
# Gemm1
flashinfer_cutedsl_grouped_gemm_nt_masked(
(aq, aq_sf),
(w1.permute(1, 2, 0), w1_blockscale),
workspace,
masked_m,
ab_dtype=ab_dtype,
sf_dtype=sf_dtype,
c_dtype=c_dtype,
sf_vec_size=sf_vec_size,
alpha=w1_alpha.view(1, 1, num_experts),
alpha_dtype=get_cute_dtype(w1_alpha),
) # in logical [m, n, l]
# SILU and quantization
diq, diq_sf = silu_and_mul_scaled_nvfp4_experts_quantize(
workspace.permute(2, 0, 1),
masked_m,
a2_global_scale,
)
# Gemm2
out = out.permute(1, 2, 0) # requirement of kernel
flashinfer_cutedsl_grouped_gemm_nt_masked(
(diq, diq_sf),
(w2.permute(1, 2, 0), w2_blockscale),
out,
masked_m,
ab_dtype=ab_dtype,
sf_dtype=sf_dtype,
c_dtype=c_dtype,
sf_vec_size=sf_vec_size,
alpha=w2_alpha.view(1, 1, num_experts),
alpha_dtype=get_cute_dtype(w2_alpha),
) # in logical [m, k, l]
out = out.permute(2, 0, 1)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/fused_moe/flashinfer_cutedsl_moe.py",
"license": "Apache License 2.0",
"lines": 313,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/v1/attention/backends/mla/aiter_triton_mla.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from vllm.v1.attention.backends.mla.rocm_aiter_mla import AiterMLABackend, AiterMLAImpl
class AiterTritonMLABackend(AiterMLABackend):
@staticmethod
def get_name() -> str:
return "AITER_TRITON_MLA"
@staticmethod
def get_impl_cls() -> type["AiterTritonMLAImpl"]:
return AiterTritonMLAImpl
class AiterTritonMLAImpl(AiterMLAImpl):
def __init__(
self,
num_heads: int,
head_size: int,
scale: float,
num_kv_heads: int,
alibi_slopes: list[float] | None,
sliding_window: int | None,
kv_cache_dtype: str,
logits_soft_cap: float | None,
attn_type: str,
kv_sharing_target_layer_name: str | None,
# MLA Specific Arguments
**mla_args,
) -> None:
super().__init__(
num_heads,
head_size,
scale,
num_kv_heads,
alibi_slopes,
sliding_window,
kv_cache_dtype,
logits_soft_cap,
attn_type,
kv_sharing_target_layer_name,
**mla_args,
)
from aiter.ops.triton.mha import flash_attn_varlen_func
self.flash_attn_varlen_func = flash_attn_varlen_func
def _flash_attn_varlen_diff_headdims(
self, q, k, v, return_softmax_lse=False, softmax_scale=None, **kwargs
):
result = self.flash_attn_varlen_func( # type: ignore[call-arg]
q,
k,
v,
softmax_scale=softmax_scale,
return_lse=return_softmax_lse,
**kwargs,
)
# Transpose the LSE if Triton MHA is used:
# (q.shape[0], num_q_heads) to (num_q_heads, q.shape[0])
if type(result) is tuple and return_softmax_lse:
output, lse = result
lse = lse.T.contiguous()
return (output, lse)
return result
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/attention/backends/mla/aiter_triton_mla.py",
"license": "Apache License 2.0",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/config/test_config_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from dataclasses import dataclass
from enum import Enum
import pytest
from vllm.config.utils import get_hash_factors, hash_factors, normalize_value
# Helpers
def endswith_fqname(obj, suffix: str) -> bool:
# normalize_value(type) returns fully-qualified name
# Compare suffix to avoid brittle import paths.
out = normalize_value(obj)
return isinstance(out, str) and out.endswith(suffix)
def expected_path(p_str: str = ".") -> str:
import pathlib
p = pathlib.Path(p_str)
return p.expanduser().resolve().as_posix()
# Minimal dataclass to test get_hash_factors.
# Avoid importing heavy vLLM configs.
@dataclass
class SimpleConfig:
a: object
b: object | None = None
class DummyLogprobsMode(Enum):
RAW_LOGITS = "raw_logits"
def test_hash_factors_deterministic():
"""Test that hash_factors produces consistent SHA-256 hashes"""
factors = {"a": 1, "b": "test"}
hash1 = hash_factors(factors)
hash2 = hash_factors(factors)
assert hash1 == hash2
# Dict key insertion order should not affect the hash.
factors_reordered = {"b": "test", "a": 1}
assert hash_factors(factors_reordered) == hash1
assert len(hash1) == 64
assert all(c in "0123456789abcdef" for c in hash1)
@pytest.mark.parametrize(
"inp, expected",
[
(None, None),
(True, True),
(1, 1),
(1.0, 1.0),
("x", "x"),
(b"ab", "6162"),
(bytearray(b"ab"), "6162"),
([1, 2], (1, 2)),
({"b": 2, "a": 1}, (("a", 1), ("b", 2))),
],
)
def test_normalize_value_matrix(inp, expected):
"""Parametric input→expected normalization table."""
assert normalize_value(inp) == expected
def test_normalize_value_enum():
# Enums normalize to (module.QualName, value).
# DummyLogprobsMode uses a string payload.
out = normalize_value(DummyLogprobsMode.RAW_LOGITS)
assert isinstance(out, tuple)
assert out[0].endswith("DummyLogprobsMode")
# Expect string payload 'raw_logits'.
assert out[1] == "raw_logits"
def test_normalize_value_set_order_insensitive():
# Sets are unordered; normalize_value sorts elements for determinism.
assert normalize_value({3, 1, 2}) == normalize_value({1, 2, 3})
def test_normalize_value_path_normalization():
from pathlib import Path # local import to avoid global dependency
# Paths expand/resolve to absolute strings.
# Stabilizes hashing across working dirs.
assert normalize_value(Path(".")) == expected_path(".")
def test_normalize_value_uuid_and_to_json():
# Objects may normalize via uuid() or to_json_string().
class HasUUID:
def uuid(self):
return "test-uuid"
class ToJson:
def to_json_string(self):
return '{"x":1}'
assert normalize_value(HasUUID()) == "test-uuid"
assert normalize_value(ToJson()) == '{"x":1}'
@pytest.mark.parametrize(
"bad",
[
(lambda x: x),
(type("CallableInstance", (), {"__call__": lambda self: 0}))(),
(lambda: (lambda: 0))(), # nested function instance
],
)
def test_error_cases(bad):
"""Inputs expected to raise TypeError."""
# Reject functions/lambdas/callable instances
# to avoid under-hashing.
with pytest.raises(TypeError):
normalize_value(bad)
def test_enum_vs_int_disambiguation():
# int stays primitive
nf_int = normalize_value(1)
assert nf_int == 1
# enum becomes ("module.QualName", value)
nf_enum = normalize_value(DummyLogprobsMode.RAW_LOGITS)
assert isinstance(nf_enum, tuple) and len(nf_enum) == 2
enum_type, enum_val = nf_enum
assert enum_type.endswith(".DummyLogprobsMode")
assert enum_val == "raw_logits"
# Build factor dicts from configs with int vs enum
f_int = get_hash_factors(SimpleConfig(1), set())
f_enum = get_hash_factors(SimpleConfig(DummyLogprobsMode.RAW_LOGITS), set())
# The int case remains a primitive value
assert f_int["a"] == 1
# The enum case becomes a tagged tuple ("module.QualName", "raw_logits")
assert isinstance(f_enum["a"], tuple) and f_enum["a"][1] == "raw_logits"
# Factor dicts must differ so we don't collide primitives with Enums.
assert f_int != f_enum
# Hash digests must differ correspondingly
assert hash_factors(f_int) != hash_factors(f_enum)
# Hash functions produce stable hex strings
h_int = hash_factors(f_int)
h_enum = hash_factors(f_enum)
assert isinstance(h_int, str) and len(h_int) == 64
assert isinstance(h_enum, str) and len(h_enum) == 64
def test_classes_are_types():
"""Types normalize to FQNs; include real vLLM types."""
# Only classes allowed; functions/lambdas are rejected.
# Canonical form is the fully-qualified name.
assert isinstance(normalize_value(str), str)
class LocalDummy:
pass
assert endswith_fqname(LocalDummy, ".LocalDummy")
def test_envs_compile_factors_stable():
"""Test that envs.compile_factors() hash is stable across fresh initializations.
Uses subprocesses to ensure env vars with dynamic defaults (like UUIDs)
are freshly generated each time, verifying they're properly ignored.
"""
import subprocess
import sys
code = """
import sys
import logging
logging.disable(logging.CRITICAL)
from vllm import envs
from vllm.config.utils import hash_factors
print(hash_factors(envs.compile_factors()))
"""
def get_hash_in_subprocess():
result = subprocess.run(
[sys.executable, "-c", code],
capture_output=True,
text=True,
check=True,
env={**dict(__import__("os").environ), "VLLM_LOGGING_LEVEL": "ERROR"},
)
return result.stdout.strip()
hash1 = get_hash_in_subprocess()
hash2 = get_hash_in_subprocess()
assert hash1 == hash2, (
"compile_factors hash differs between fresh initializations - "
"dynamic env vars may not be properly ignored"
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/config/test_config_utils.py",
"license": "Apache License 2.0",
"lines": 156,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/logging_utils/lazy.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Callable
from typing import Any
class lazy:
"""Wrap a zero-argument callable evaluated only during log formatting."""
__slots__ = ("_factory",)
def __init__(self, factory: Callable[[], Any]) -> None:
self._factory = factory
def __str__(self) -> str:
return str(self._factory())
def __repr__(self) -> str:
return str(self)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/logging_utils/lazy.py",
"license": "Apache License 2.0",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/quantization/test_cpu_wna16.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
from vllm.platforms import current_platform
if not current_platform.is_cpu():
pytest.skip("skipping CPU-only tests", allow_module_level=True)
MODELS = [
"TheBloke/TinyLlama-1.1B-Chat-v1.0-AWQ",
"TheBloke/TinyLlama-1.1B-Chat-v1.0-GPTQ", # with g_idx
"Qwen/Qwen1.5-0.5B-Chat-GPTQ-Int4", # without g_idx
]
DTYPE = ["bfloat16"]
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("dtype", DTYPE)
def test_cpu_quant(vllm_runner, model, dtype):
with vllm_runner(model, dtype=dtype) as llm:
output = llm.generate_greedy(["The capital of France is"], max_tokens=32)
assert output
print(output)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/quantization/test_cpu_wna16.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_executor/layers/quantization/cpu_wna16.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from typing import Any
import torch
from safetensors.torch import _TYPES as _SAFETENSORS_TO_TORCH_DTYPE
from vllm._custom_ops import (
cpu_gemm_wna16,
)
from vllm.logger import init_logger
from vllm.model_executor.layers.linear import (
LinearBase,
LinearMethodBase,
UnquantizedLinearMethod,
)
from vllm.model_executor.layers.quantization import QuantizationMethods
from vllm.model_executor.layers.quantization.base_config import (
QuantizationConfig,
QuantizeMethodBase,
)
from vllm.model_executor.layers.quantization.utils.quant_utils import (
is_layer_skipped,
pack_cols,
unpack_cols,
)
from vllm.model_executor.layers.vocab_parallel_embedding import ParallelLMHead
from vllm.model_executor.models.utils import WeightsMapper
from vllm.model_executor.parameter import (
GroupQuantScaleParameter,
PackedvLLMParameter,
)
from vllm.platforms import current_platform
from vllm.transformers_utils.config import get_safetensors_params_metadata
logger = init_logger(__name__)
class CPUAWQConfig(QuantizationConfig):
"""Config class for CPU AWQ"""
def __init__(
self,
weight_bits: int,
group_size: int,
zero_point: bool,
lm_head_quantized: bool,
modules_to_not_convert: list[str] | None,
full_config: dict[str, Any],
) -> None:
super().__init__()
assert weight_bits == 4
self.pack_factor = 32 // weight_bits # packed into int32
self.group_size = group_size
self.zero_point = zero_point
self.lm_head_quantized = lm_head_quantized
self.weight_bits = weight_bits
self.modules_to_not_convert = modules_to_not_convert or []
self.full_config = full_config
def __repr__(self) -> str:
return (
f"AWQMarlinConfig("
f"group_size={self.group_size}, "
f"zero_point={self.zero_point}, "
f"lm_head_quantized={self.lm_head_quantized}, "
f"modules_to_not_convert={self.modules_to_not_convert})"
)
@classmethod
def get_name(cls) -> "QuantizationMethods":
return "cpu_awq"
@classmethod
def get_supported_act_dtypes(cls) -> list[torch.dtype]:
return [torch.half, torch.bfloat16]
@classmethod
def get_min_capability(cls) -> int:
return -1
@classmethod
def get_config_filenames(cls) -> list[str]:
return ["quantize_config.json"]
@classmethod
def from_config(cls, config: dict[str, Any]) -> "CPUAWQConfig":
weight_bits = cls.get_from_keys(config, ["bits"])
group_size = cls.get_from_keys(config, ["group_size"])
zero_point = cls.get_from_keys(config, ["zero_point"])
lm_head_quantized = cls.get_from_keys_or(config, ["lm_head"], default=False)
modules_to_not_convert = cls.get_from_keys_or(
config, ["modules_to_not_convert"], None
)
return cls(
weight_bits,
group_size,
zero_point,
lm_head_quantized,
modules_to_not_convert,
config,
)
@classmethod
def override_quantization_method(
cls, hf_quant_cfg, user_quant
) -> "QuantizationMethods | None":
quant_method = hf_quant_cfg.get("quant_method", "").lower()
if current_platform.is_cpu() and (quant_method == "awq"):
return cls.get_name()
return None
def get_quant_method(
self, layer: torch.nn.Module, prefix: str
) -> "QuantizeMethodBase | None":
if isinstance(layer, LinearBase) or (
isinstance(layer, ParallelLMHead) and self.lm_head_quantized
):
if is_layer_skipped(
prefix,
self.modules_to_not_convert,
self.packed_modules_mapping,
skip_with_substr=True,
):
return UnquantizedLinearMethod()
return CPUAWQLinearMethod(self)
return None
def apply_vllm_mapper(self, hf_to_vllm_mapper: "WeightsMapper"):
if self.modules_to_not_convert:
self.modules_to_not_convert = hf_to_vllm_mapper.apply_list(
self.modules_to_not_convert
)
def maybe_update_config(self, model_name: str, revision: str | None = None):
if self.modules_to_not_convert:
return
unquant_dtypes = [torch.float16, torch.bfloat16, torch.float32]
metadata = get_safetensors_params_metadata(model_name, revision=revision)
layers = {param_name.rsplit(".", 1)[0] for param_name in metadata}
quant_layers: set[str] = {
param_name.rsplit(".", 1)[0]
for param_name, info in metadata.items()
if (dtype := info.get("dtype", None))
and _SAFETENSORS_TO_TORCH_DTYPE[dtype] not in unquant_dtypes
}
self.modules_to_not_convert = list(layers - quant_layers)
class CPUAWQLinearMethod(LinearMethodBase):
"""Linear method for CPU AWQ.
Args:
quant_config: The CPU AWQ quantization config.
"""
def __init__(self, quant_config: CPUAWQConfig) -> None:
self.quant_config = quant_config
assert self.quant_config.zero_point
def create_weights(
self,
layer: torch.nn.Module,
input_size_per_partition: int,
output_partition_sizes: list[int],
input_size: int,
output_size: int,
params_dtype: torch.dtype,
**extra_weight_attrs,
) -> None:
del output_size
output_size_per_partition = sum(output_partition_sizes)
weight_loader = extra_weight_attrs.get("weight_loader")
# Normalize group_size
if self.quant_config.group_size != -1:
group_size = self.quant_config.group_size
else:
group_size = input_size
qweight = PackedvLLMParameter(
data=torch.empty(
input_size_per_partition,
output_size_per_partition // self.quant_config.pack_factor,
dtype=torch.int32,
),
input_dim=0,
output_dim=1,
packed_dim=1,
packed_factor=self.quant_config.pack_factor,
weight_loader=weight_loader,
)
num_groups = input_size_per_partition // group_size
qzeros = PackedvLLMParameter(
data=torch.empty(
num_groups,
output_size_per_partition // self.quant_config.pack_factor,
dtype=torch.int32,
),
input_dim=0,
output_dim=1,
packed_dim=1,
packed_factor=self.quant_config.pack_factor,
weight_loader=weight_loader,
)
scales = GroupQuantScaleParameter(
data=torch.empty(
num_groups,
output_size_per_partition,
dtype=params_dtype,
),
input_dim=0,
output_dim=1,
weight_loader=weight_loader,
)
layer.register_parameter("qweight", qweight)
layer.register_parameter("qzeros", qzeros)
layer.register_parameter("scales", scales)
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
torch.set_printoptions(profile="full", linewidth=5000, sci_mode=False)
packed_weight = layer.qweight.data
packed_zeros = layer.qzeros.data
group_num = packed_zeros.size(0)
bits = self.quant_config.weight_bits
pack_factor = int(self.quant_config.pack_factor)
input_size, packed_output_size = packed_weight.size()
output_size = packed_output_size * pack_factor
isa_hint = _get_isa_hint(layer.scales.dtype)
layer.isa_hint = isa_hint
interleave_map = (0, 4, 1, 5, 2, 6, 3, 7)
weight = unpack_cols(
packed_weight,
bits,
input_size,
output_size,
)
zeros = unpack_cols(
packed_zeros,
bits,
group_num,
output_size,
)
weight = (
weight.view(input_size, -1, pack_factor)[:, :, interleave_map]
.reshape(input_size, output_size)
.contiguous()
)
zeros = (
zeros.view(group_num, -1, pack_factor)[:, :, interleave_map]
.reshape(group_num, output_size)
.contiguous()
)
zeros = pack_cols(zeros, bits, group_num, output_size).contiguous()
# make 16 output channel as a block and transpose to
# the make the block contigous
weight = pack_cols(weight, bits, input_size, output_size)
weight = (
weight.view(input_size, -1, 16 // pack_factor)
.permute(1, 0, 2)
.reshape(-1, input_size * 16 // pack_factor)
.contiguous()
)
layer.qweight.data = weight
layer.qzeros.data = zeros
def apply(
self,
layer: torch.nn.Module,
x: torch.Tensor,
bias: torch.Tensor | None = None,
) -> torch.Tensor:
x = cpu_gemm_wna16(
input=x,
q_weight=layer.qweight,
scales=layer.scales,
zeros=layer.qzeros,
g_idx=None,
bias=bias,
pack_factor=8,
isa_hint=layer.isa_hint,
)
return x
def _get_isa_hint(dtype: torch.dtype) -> str:
supports_amx = torch._C._cpu._is_amx_tile_supported()
if supports_amx and dtype in (torch.bfloat16,):
return "amx"
else:
return "vec"
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/quantization/cpu_wna16.py",
"license": "Apache License 2.0",
"lines": 264,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:examples/offline_inference/rlhf_online_quant.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Demonstrates reinforcement learning from human feedback (RLHF) using vLLM and Ray.
The script separates training and inference workloads onto distinct GPUs
so that Ray can manage process placement and inter-process communication.
A Hugging Face Transformer model occupies GPU 0 for training, whereas a
tensor-parallel vLLM inference engine occupies GPU 1–2.
The example performs the following steps:
* Load the training model on GPU 0.
* Split the inference model across GPUs 1–2 using vLLM's tensor parallelism
and Ray placement groups.
* Generate text from a list of prompts using the inference engine.
* Update the weights of the training model and broadcast the updated weights
to the inference engine by using a Ray collective RPC group. Note that
for demonstration purposes we simply zero out the weights.
For a production-ready implementation that supports multiple training and
inference replicas, see the OpenRLHF framework:
https://github.com/OpenRLHF/OpenRLHF
This example assumes a single-node cluster with three GPUs, but Ray
supports multi-node clusters. vLLM expects the GPUs are only used for vLLM
workloads. Residual GPU activity interferes with vLLM memory profiling and
causes unexpected behavior.
"""
import json
import os
import ray
import torch
from ray.util.placement_group import placement_group
from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy
from rlhf_utils import stateless_init_process_group
from torchao.core.config import config_to_dict
from torchao.quantization import (
Float8DynamicActivationFloat8WeightConfig,
PerRow,
)
from transformers import AutoModelForCausalLM
from vllm import LLM, SamplingParams
from vllm.utils.network_utils import get_ip, get_open_port
class MyLLM(LLM):
"""Configure the vLLM worker for Ray placement group execution."""
def __init__(self, *args, **kwargs):
# Remove the top-level CUDA_VISIBLE_DEVICES variable set by Ray
# so that vLLM can manage its own device placement within the worker.
os.environ.pop("CUDA_VISIBLE_DEVICES", None)
super().__init__(*args, **kwargs)
# Load the OPT-125M model onto GPU 0 for the training workload.
train_model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m")
train_model.to("cuda:0")
# Initialize Ray and set the visible devices. The vLLM engine will
# be placed on GPUs 1 and 2.
os.environ["CUDA_VISIBLE_DEVICES"] = "1,2"
ray.init()
# Create a placement group that reserves GPU 1–2 for the vLLM inference engine.
# Learn more about Ray placement groups:
# https://docs.ray.io/en/latest/ray-core/scheduling/placement-group.html
pg_inference = placement_group([{"GPU": 1, "CPU": 0}] * 2)
ray.get(pg_inference.ready())
scheduling_inference = PlacementGroupSchedulingStrategy(
placement_group=pg_inference,
placement_group_capture_child_tasks=True,
placement_group_bundle_index=0,
)
# Launch the vLLM inference engine. The `enforce_eager` flag reduces
# start-up latency.
# generate torchao quantization config for RL rollout
# see https://github.com/vllm-project/vllm/pull/23014 for instructions to
# use serialized config files instead of passing around json string
config = Float8DynamicActivationFloat8WeightConfig(granularity=PerRow())
json_str = json.dumps(config_to_dict(config))
llm = ray.remote(
num_cpus=0,
num_gpus=0,
scheduling_strategy=scheduling_inference,
)(MyLLM).remote(
model="facebook/opt-125m",
hf_overrides={"quantization_config_dict_json": json_str},
enforce_eager=True,
worker_extension_cls="rlhf_utils.WorkerExtension",
tensor_parallel_size=2,
distributed_executor_backend="ray",
)
# Generate text from the prompts.
prompts = [
"Hello, my name is",
"The president of the United States is",
"The capital of France is",
"The future of AI is",
]
sampling_params = SamplingParams(temperature=0)
outputs = ray.get(llm.generate.remote(prompts, sampling_params))
print("-" * 50)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}\nGenerated text: {generated_text!r}")
print("-" * 50)
# Set up the communication channel between the training process and the
# inference engine.
master_address = get_ip()
master_port = get_open_port()
handle = llm.collective_rpc.remote(
"init_weight_update_group", args=(master_address, master_port, 1, 3)
)
model_update_group = stateless_init_process_group(
master_address, master_port, 0, 3, torch.device("cuda:0")
)
ray.get(handle)
# Simulate a training step by zeroing out all model weights.
# In a real RLHF training loop the weights would be updated using the gradient
# from an RL objective such as PPO on a reward model.
for name, p in train_model.named_parameters():
p.data.zero_()
# Synchronize the updated weights to the inference engine.
for name, p in train_model.named_parameters():
dtype_name = str(p.dtype).split(".")[-1]
handle = llm.collective_rpc.remote(
"update_weight", args=(name, dtype_name, p.shape)
)
model_update_group.broadcast(p, src=0, stream=torch.cuda.current_stream())
ray.get(handle)
# Verify that the inference weights have been updated.
assert all(ray.get(llm.collective_rpc.remote("check_weights_changed")))
# Generate text with the updated model. The output is expected to be nonsense
# because the weights are zero.
outputs_updated = ray.get(llm.generate.remote(prompts, sampling_params))
print("-" * 50)
for output in outputs_updated:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}\nGenerated text: {generated_text!r}")
print("-" * 50)
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/offline_inference/rlhf_online_quant.py",
"license": "Apache License 2.0",
"lines": 132,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/models/multimodal/generation/test_multimodal_gguf.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import os
os.environ["TOKENIZERS_PARALLELISM"] = "true"
from typing import Any, NamedTuple
import pytest
from huggingface_hub import hf_hub_download
from pytest import MarkDecorator
from transformers import AutoModelForImageTextToText
from tests.quantization.utils import is_quant_method_supported
from vllm.assets.image import ImageAsset
from vllm.multimodal.image import rescale_image_size
from vllm.utils.torch_utils import set_default_torch_num_threads
from ....conftest import IMAGE_ASSETS, HfRunner, VllmRunner
from ...utils import check_logprobs_close
class GGUFMMTestConfig(NamedTuple):
original_model: str
gguf_repo: str
gguf_backbone: str
gguf_mmproj: str
prompt: list[str]
image_names: list[str] # Store names, load PIL images at runtime
max_model_len: int = 4096
marks: list[MarkDecorator] = []
mm_processor_kwargs: dict[str, Any] = {}
@property
def gguf_model(self):
hf_hub_download(self.gguf_repo, filename=self.gguf_mmproj)
return hf_hub_download(self.gguf_repo, filename=self.gguf_backbone)
# Common prompts aligned with test_common.py "gemma3" entry format
_GEMMA3_PROMPTS = IMAGE_ASSETS.prompts(
{
"stop_sign": (
"<bos><start_of_turn>user\n"
"<start_of_image>What's the content in the center of the image?"
"<end_of_turn>\n<start_of_turn>model\n"
),
"cherry_blossom": (
"<bos><start_of_turn>user\n"
"<start_of_image>What is the season?"
"<end_of_turn>\n<start_of_turn>model\n"
),
}
)
# Image asset names - load at runtime to avoid pickle issues with subprocess
_GEMMA3_IMAGE_NAMES = ["stop_sign", "cherry_blossom"]
# Regular multimodal (no pan-and-scan) - uses QAT Q4_0 GGUF
GEMMA3_CONFIG = GGUFMMTestConfig(
original_model="google/gemma-3-4b-it",
gguf_repo="google/gemma-3-4b-it-qat-q4_0-gguf",
gguf_backbone="gemma-3-4b-it-q4_0.gguf",
gguf_mmproj="mmproj-model-f16-4B.gguf",
prompt=_GEMMA3_PROMPTS,
image_names=_GEMMA3_IMAGE_NAMES,
max_model_len=4096,
marks=[pytest.mark.core_model],
mm_processor_kwargs={},
)
# Pan-and-scan multimodal - uses unquantized BF16 GGUF
GEMMA3_CONFIG_PAN_AND_SCAN = GGUFMMTestConfig(
original_model="google/gemma-3-4b-it",
gguf_repo="unsloth/gemma-3-4b-it-GGUF",
gguf_backbone="gemma-3-4b-it-BF16.gguf",
gguf_mmproj="mmproj-BF16.gguf",
prompt=_GEMMA3_PROMPTS,
image_names=_GEMMA3_IMAGE_NAMES,
max_model_len=4096,
marks=[pytest.mark.core_model],
mm_processor_kwargs={"do_pan_and_scan": True},
)
MODELS_TO_TEST = [GEMMA3_CONFIG, GEMMA3_CONFIG_PAN_AND_SCAN]
def run_multimodal_gguf_test(
hf_runner: type[HfRunner],
vllm_runner: type[VllmRunner],
model: GGUFMMTestConfig,
dtype: str,
max_tokens: int,
num_logprobs: int,
):
# Load images at runtime (inside subprocess) to avoid pickle issues
images = [ImageAsset(name).pil_image for name in model.image_names]
size_factors = [0.25, 0.5, 1.0]
inputs_per_image = [
(
[prompt for _ in size_factors],
[rescale_image_size(image, factor) for factor in size_factors],
)
for image, prompt in zip(images, model.prompt)
]
# NOTE: Run vLLM first to avoid CUDA init issues with multiprocessing fork.
# Run GGUF model via vLLM.
with (
set_default_torch_num_threads(1),
vllm_runner(
model_name=model.gguf_model,
enforce_eager=True,
tokenizer_name=model.original_model,
dtype=dtype,
max_model_len=model.max_model_len,
mm_processor_kwargs=model.mm_processor_kwargs,
) as gguf_model,
):
gguf_outputs_per_case = [
gguf_model.generate_greedy_logprobs(
prompts,
max_tokens,
num_logprobs=num_logprobs,
images=images,
)
for prompts, images in inputs_per_image
]
# Then run HfRunner for HuggingFace baseline comparison.
with hf_runner(
model.original_model,
dtype=dtype,
auto_cls=AutoModelForImageTextToText,
) as hf_model:
hf_outputs_per_case = [
hf_model.generate_greedy_logprobs_limit(
prompts,
max_tokens,
num_logprobs=num_logprobs,
images=images,
)
for prompts, images in inputs_per_image
]
for hf_outputs, gguf_outputs in zip(hf_outputs_per_case, gguf_outputs_per_case):
check_logprobs_close(
outputs_0_lst=hf_outputs,
outputs_1_lst=gguf_outputs,
name_0="hf",
name_1="gguf",
)
@pytest.mark.skipif(
not is_quant_method_supported("gguf"),
reason="gguf is not supported on this GPU type.",
)
@pytest.mark.parametrize(
"model",
[
pytest.param(test_config, marks=test_config.marks)
for test_config in MODELS_TO_TEST
],
)
@pytest.mark.parametrize("dtype", ["bfloat16"])
@pytest.mark.parametrize("max_tokens", [32])
@pytest.mark.parametrize("num_logprobs", [10])
def test_gemma3_mm_gguf(
hf_runner: type[HfRunner],
vllm_runner: type[VllmRunner],
model: GGUFMMTestConfig,
dtype: str,
max_tokens: int,
num_logprobs: int,
) -> None:
run_multimodal_gguf_test(
hf_runner, vllm_runner, model, dtype, max_tokens, num_logprobs
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/models/multimodal/generation/test_multimodal_gguf.py",
"license": "Apache License 2.0",
"lines": 158,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/transformers_utils/gguf_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""GGUF utility functions."""
from functools import cache
from os import PathLike
from pathlib import Path
import gguf
import regex as re
from gguf.constants import Keys, VisionProjectorType
from gguf.quants import GGMLQuantizationType
from transformers import Gemma3Config, PretrainedConfig, SiglipVisionConfig
from vllm.logger import init_logger
from .repo_utils import list_filtered_repo_files
logger = init_logger(__name__)
@cache
def check_gguf_file(model: str | PathLike) -> bool:
"""Check if the file is a GGUF model."""
model = Path(model)
if not model.is_file():
return False
elif model.suffix == ".gguf":
return True
try:
with model.open("rb") as f:
header = f.read(4)
return header == b"GGUF"
except Exception as e:
logger.debug("Error reading file %s: %s", model, e)
return False
@cache
def is_remote_gguf(model: str | Path) -> bool:
"""Check if the model is a remote GGUF model."""
pattern = r"^[a-zA-Z0-9][a-zA-Z0-9._-]*/[a-zA-Z0-9][a-zA-Z0-9._-]*:[A-Za-z0-9_+-]+$"
model = str(model)
if re.fullmatch(pattern, model):
_, quant_type = model.rsplit(":", 1)
return is_valid_gguf_quant_type(quant_type)
return False
# Common suffixes used in GGUF file naming conventions
# e.g., Q4_K_M, Q3_K_S, Q5_K_L, Q2_K_XL
_GGUF_QUANT_SUFFIXES = ("_M", "_S", "_L", "_XL", "_XS", "_XXS")
def is_valid_gguf_quant_type(gguf_quant_type: str) -> bool:
"""Check if the quant type is a valid GGUF quant type.
Supports both exact GGML quant types (e.g., Q4_K, IQ1_S) and
extended naming conventions (e.g., Q4_K_M, Q3_K_S, Q5_K_L).
"""
# Check for exact match first
if getattr(GGMLQuantizationType, gguf_quant_type, None) is not None:
return True
# Check for extended naming conventions (e.g., Q4_K_M -> Q4_K)
for suffix in _GGUF_QUANT_SUFFIXES:
if gguf_quant_type.endswith(suffix):
base_type = gguf_quant_type[: -len(suffix)]
if getattr(GGMLQuantizationType, base_type, None) is not None:
return True
return False
def split_remote_gguf(model: str | Path) -> tuple[str, str]:
"""Split the model into repo_id and quant type."""
model = str(model)
if is_remote_gguf(model):
parts = model.rsplit(":", 1)
return (parts[0], parts[1])
raise ValueError(
f"Wrong GGUF model or invalid GGUF quant type: {model}.\n"
"- It should be in repo_id:quant_type format.\n"
f"- Valid base quant types: {GGMLQuantizationType._member_names_}\n"
f"- Extended suffixes also supported: {_GGUF_QUANT_SUFFIXES}",
)
def is_gguf(model: str | Path) -> bool:
"""Check if the model is a GGUF model.
Args:
model: Model name, path, or Path object to check.
Returns:
True if the model is a GGUF model, False otherwise.
"""
model = str(model)
# Check if it's a local GGUF file
if check_gguf_file(model):
return True
# Check if it's a remote GGUF model (repo_id:quant_type format)
return is_remote_gguf(model)
def detect_gguf_multimodal(model: str) -> Path | None:
"""Check if GGUF model has multimodal projector file.
Args:
model: Model path string
Returns:
Path to mmproj file if found, None otherwise
"""
if not model.endswith(".gguf"):
return None
try:
model_path = Path(model)
if not model_path.is_file():
return None
model_dir = model_path.parent
mmproj_patterns = ["mmproj.gguf", "mmproj-*.gguf", "*mmproj*.gguf"]
for pattern in mmproj_patterns:
mmproj_files = list(model_dir.glob(pattern))
if mmproj_files:
return mmproj_files[0]
return None
except Exception:
return None
def extract_vision_config_from_gguf(mmproj_path: str) -> "SiglipVisionConfig | None":
"""Extract vision config parameters from mmproj.gguf metadata.
Reads vision encoder configuration from GGUF metadata fields using
standardized GGUF constants. Automatically detects the projector type
(e.g., gemma3, llama4) and applies model-specific parameters accordingly.
The function extracts standard CLIP vision parameters from GGUF metadata
and applies projector-type-specific customizations. For unknown projector
types, it uses safe defaults from SiglipVisionConfig.
Args:
mmproj_path: Path to mmproj.gguf file (str or Path)
Returns:
SiglipVisionConfig if extraction succeeds, None if any required
field is missing from the GGUF metadata
Raises:
Exception: Exceptions from GGUF reading (file not found, corrupted
file, etc.) propagate directly from gguf.GGUFReader
"""
reader = gguf.GGUFReader(str(mmproj_path))
# Detect projector type to apply model-specific parameters
projector_type = None
projector_type_field = reader.get_field(Keys.Clip.PROJECTOR_TYPE)
if projector_type_field:
try:
projector_type = bytes(projector_type_field.parts[-1]).decode("utf-8")
except (AttributeError, UnicodeDecodeError) as e:
logger.warning("Failed to decode projector type from GGUF: %s", e)
# Map GGUF field constants to SiglipVisionConfig parameters.
# Uses official GGUF constants from gguf-py for standardization.
# Format: {gguf_constant: (param_name, dtype)}
VISION_CONFIG_FIELDS = {
Keys.ClipVision.EMBEDDING_LENGTH: ("hidden_size", int),
Keys.ClipVision.FEED_FORWARD_LENGTH: ("intermediate_size", int),
Keys.ClipVision.BLOCK_COUNT: ("num_hidden_layers", int),
Keys.ClipVision.Attention.HEAD_COUNT: ("num_attention_heads", int),
Keys.ClipVision.IMAGE_SIZE: ("image_size", int),
Keys.ClipVision.PATCH_SIZE: ("patch_size", int),
Keys.ClipVision.Attention.LAYERNORM_EPS: ("layer_norm_eps", float),
}
# Extract and validate all required fields
config_params = {}
for gguf_key, (param_name, dtype) in VISION_CONFIG_FIELDS.items():
field = reader.get_field(gguf_key)
if field is None:
logger.warning(
"Missing required vision config field '%s' in mmproj.gguf",
gguf_key,
)
return None
# Extract scalar value from GGUF field and convert to target type
config_params[param_name] = dtype(field.parts[-1])
# Apply model-specific parameters based on projector type
if projector_type == VisionProjectorType.GEMMA3:
# Gemma3 doesn't use the vision pooling head (multihead attention)
# This is a vLLM-specific parameter used in SiglipVisionTransformer
config_params["vision_use_head"] = False
logger.info("Detected Gemma3 projector, disabling vision pooling head")
# Add other projector-type-specific customizations here as needed
# elif projector_type == VisionProjectorType.LLAMA4:
# config_params["vision_use_head"] = ...
# Create config with extracted parameters
# Note: num_channels and attention_dropout use SiglipVisionConfig defaults
# (3 and 0.0 respectively) which are correct for all models
config = SiglipVisionConfig(**config_params)
if projector_type:
logger.info(
"Extracted vision config from mmproj.gguf (projector_type: %s)",
projector_type,
)
else:
logger.info("Extracted vision config from mmproj.gguf metadata")
return config
def maybe_patch_hf_config_from_gguf(
model: str,
hf_config: PretrainedConfig,
) -> PretrainedConfig:
"""Patch HF config for GGUF models.
Applies GGUF-specific patches to HuggingFace config:
1. For multimodal models: patches architecture and vision config
2. For all GGUF models: overrides vocab_size from embedding tensor
This ensures compatibility with GGUF models that have extended
vocabularies (e.g., Unsloth) where the GGUF file contains more
tokens than the HuggingFace tokenizer config specifies.
Args:
model: Model path string
hf_config: HuggingFace config to patch in-place
Returns:
Updated HuggingFace config
"""
# Patch multimodal config if mmproj.gguf exists
mmproj_path = detect_gguf_multimodal(model)
if mmproj_path is not None:
vision_config = extract_vision_config_from_gguf(str(mmproj_path))
# Create HF config for Gemma3 multimodal
text_config = hf_config.get_text_config()
is_gemma3 = hf_config.model_type in ("gemma3", "gemma3_text")
if vision_config is not None and is_gemma3:
new_hf_config = Gemma3Config(
text_config=text_config,
vision_config=vision_config,
architectures=["Gemma3ForConditionalGeneration"],
)
hf_config = new_hf_config
return hf_config
def get_gguf_file_path_from_hf(
repo_id: str | Path,
quant_type: str,
revision: str | None = None,
) -> str:
"""Get the GGUF file path from HuggingFace Hub based on repo_id and quant_type.
Args:
repo_id: The HuggingFace repository ID (e.g., "Qwen/Qwen3-0.6B")
quant_type: The quantization type (e.g., "Q4_K_M", "F16")
revision: Optional revision/branch name
Returns:
The path to the GGUF file on HuggingFace Hub (e.g., "filename.gguf"),
"""
repo_id = str(repo_id)
gguf_patterns = [
f"*-{quant_type}.gguf",
f"*-{quant_type}-*.gguf",
f"*/*-{quant_type}.gguf",
f"*/*-{quant_type}-*.gguf",
]
matching_files = list_filtered_repo_files(
repo_id,
allow_patterns=gguf_patterns,
revision=revision,
)
if len(matching_files) == 0:
raise ValueError(
"Could not find GGUF file for repo %s with quantization %s.",
repo_id,
quant_type,
)
# Sort to ensure consistent ordering (prefer non-sharded files)
matching_files.sort(key=lambda x: (x.count("-"), x))
gguf_filename = matching_files[0]
return gguf_filename
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/transformers_utils/gguf_utils.py",
"license": "Apache License 2.0",
"lines": 241,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/afmoe.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Inference-only AfMoE model compatible with HuggingFace weights."""
import typing
from collections.abc import Callable, Iterable
from itertools import islice
import torch
from torch import nn
from vllm.compilation.decorators import support_torch_compile
from vllm.config import CacheConfig, VllmConfig, get_current_vllm_config
from vllm.distributed import (
get_ep_group,
get_pp_group,
get_tensor_model_parallel_world_size,
)
from vllm.logger import init_logger
from vllm.model_executor.layers.attention import Attention
from vllm.model_executor.layers.fused_moe.shared_fused_moe import SharedFusedMoE
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.linear import (
ColumnParallelLinear,
QKVParallelLinear,
RowParallelLinear,
)
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.rotary_embedding import get_rope
from vllm.model_executor.layers.vocab_parallel_embedding import (
ParallelLMHead,
VocabParallelEmbedding,
)
from vllm.model_executor.model_loader.weight_utils import (
default_weight_loader,
maybe_remap_kv_scale_name,
)
from vllm.model_executor.models.interfaces import (
SupportsEagle3,
SupportsLoRA,
SupportsPP,
)
from vllm.model_executor.models.llama import LlamaMLP as AfmoeMLP
from vllm.model_executor.models.utils import (
AutoWeightsLoader,
PPMissingLayer,
WeightsMapper,
extract_layer_index,
is_pp_missing_parameter,
make_empty_intermediate_tensors_factory,
make_layers,
maybe_prefix,
)
from vllm.sequence import IntermediateTensors
from vllm.v1.attention.backend import AttentionType
logger = init_logger(__name__)
class AfmoeMoE(nn.Module):
def __init__(
self,
config, # AfmoeConfig
quant_config: QuantizationConfig | None = None,
prefix: str = "",
enable_eplb: bool = False,
):
super().__init__()
self.tp_size = get_tensor_model_parallel_world_size()
self.route_scale = config.route_scale
self.score_func = config.score_func
self.route_norm = config.route_norm
self.ep_group = get_ep_group().device_group
self.ep_rank = self.ep_group.rank()
self.ep_size = self.ep_group.size()
self.n_routed_experts: int = config.num_experts
self.n_shared_experts: int = config.num_shared_experts
if config.hidden_act != "silu":
raise ValueError(
f"Unsupported activation: {config.hidden_act}. "
"Only silu is supported for now."
)
# Router gate
self.gate = nn.Linear(
config.hidden_size,
config.num_experts,
bias=False,
dtype=torch.float32,
)
self.expert_bias = nn.Parameter(
torch.empty(config.num_experts, dtype=torch.float32)
)
# Load balancing settings
vllm_config = get_current_vllm_config()
eplb_config = vllm_config.parallel_config.eplb_config
self.enable_eplb = enable_eplb
self.n_redundant_experts = eplb_config.num_redundant_experts
self.n_logical_experts = self.n_routed_experts
self.n_physical_experts = self.n_logical_experts + self.n_redundant_experts
self.n_local_physical_experts = self.n_physical_experts // self.ep_size
self.physical_expert_start = self.ep_rank * self.n_local_physical_experts
self.physical_expert_end = (
self.physical_expert_start + self.n_local_physical_experts
)
self.shared_experts = None
# Shared experts
if config.num_shared_experts > 0:
intermediate_size = config.moe_intermediate_size * config.num_shared_experts
self.shared_experts = AfmoeMLP(
hidden_size=config.hidden_size,
intermediate_size=intermediate_size,
hidden_act=config.hidden_act,
quant_config=quant_config,
reduce_results=False,
prefix=f"{prefix}.shared_experts",
)
# Routed experts using SharedFusedMoE
self.experts = SharedFusedMoE(
shared_experts=self.shared_experts,
num_experts=config.num_experts,
top_k=config.num_experts_per_tok,
hidden_size=config.hidden_size,
intermediate_size=config.moe_intermediate_size,
reduce_results=False,
renormalize=self.route_norm if self.score_func == "sigmoid" else False,
quant_config=quant_config,
use_grouped_topk=True,
num_expert_group=config.n_group,
topk_group=config.topk_group,
prefix=f"{prefix}.experts",
scoring_func=self.score_func,
routed_scaling_factor=self.route_scale,
e_score_correction_bias=self.expert_bias,
enable_eplb=self.enable_eplb,
num_redundant_experts=self.n_redundant_experts,
router_logits_dtype=torch.float32,
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
num_tokens, hidden_dim = hidden_states.shape
hidden_states = hidden_states.view(-1, hidden_dim)
router_logits = self.gate(hidden_states.to(dtype=torch.float32))
fused_moe_out = self.experts(
hidden_states=hidden_states, router_logits=router_logits
)
if self.shared_experts is not None:
shared_output, final_hidden_states = fused_moe_out
final_hidden_states = final_hidden_states + shared_output
else:
final_hidden_states = fused_moe_out
if self.tp_size > 1:
final_hidden_states = self.experts.maybe_all_reduce_tensor_model_parallel(
final_hidden_states
)
return final_hidden_states.view(num_tokens, hidden_dim)
class AfmoeAttention(nn.Module):
def __init__(
self,
config, # AfmoeConfig
layer_idx: int,
hidden_size: int,
num_heads: int,
num_kv_heads: int,
max_position_embeddings: int = 131072,
head_dim: int | None = None,
rms_norm_eps: float = 1e-05,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
attn_type: str = AttentionType.DECODER,
) -> None:
super().__init__()
self.layer_idx = layer_idx
self.hidden_size = hidden_size
tp_size = get_tensor_model_parallel_world_size()
self.total_num_heads = num_heads
assert self.total_num_heads % tp_size == 0
self.num_heads = self.total_num_heads // tp_size
self.total_num_kv_heads = num_kv_heads
if self.total_num_kv_heads >= tp_size:
# Number of KV heads is greater than TP size, so we partition
# the KV heads across multiple tensor parallel GPUs.
assert self.total_num_kv_heads % tp_size == 0
else:
# Number of KV heads is less than TP size, so we replicate
# the KV heads across multiple tensor parallel GPUs.
assert tp_size % self.total_num_kv_heads == 0
self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
self.head_dim = head_dim or (hidden_size // self.total_num_heads)
self.q_size = self.num_heads * self.head_dim
self.kv_size = self.num_kv_heads * self.head_dim
self.scaling = self.head_dim**-0.5
self.max_position_embeddings = max_position_embeddings
# Check if this is a local attention layer
self.is_local_attention = config.layer_types[layer_idx] == "sliding_attention"
self.sliding_window = config.sliding_window if self.is_local_attention else None
self.qkv_proj = QKVParallelLinear(
self.hidden_size,
self.head_dim,
self.total_num_heads,
self.total_num_kv_heads,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.qkv_proj",
)
self.o_proj = RowParallelLinear(
self.total_num_heads * self.head_dim,
self.hidden_size,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.o_proj",
)
# Gating projection
self.gate_proj = ColumnParallelLinear(
hidden_size,
self.total_num_heads * self.head_dim,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.gate_proj",
)
# Q/K normalization
self.q_norm = RMSNorm(self.head_dim, eps=config.rms_norm_eps)
self.k_norm = RMSNorm(self.head_dim, eps=config.rms_norm_eps)
# Only create rotary embeddings for local attention
if self.is_local_attention:
self.rotary_emb = get_rope(
self.head_dim,
max_position=max_position_embeddings,
rope_parameters=config.rope_parameters,
is_neox_style=True,
)
else:
self.rotary_emb = None
self.attn = Attention(
self.num_heads,
self.head_dim,
self.scaling,
num_kv_heads=self.num_kv_heads,
cache_config=cache_config,
quant_config=quant_config,
per_layer_sliding_window=self.sliding_window,
prefix=f"{prefix}.attn",
attn_type=attn_type,
)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
) -> torch.Tensor:
qkv, _ = self.qkv_proj(hidden_states)
gate, _ = self.gate_proj(hidden_states)
q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
# Apply Q/K normalization
q = self.q_norm(q.reshape(-1, self.num_heads, self.head_dim)).reshape(q.shape)
k = self.k_norm(k.reshape(-1, self.num_kv_heads, self.head_dim)).reshape(
k.shape
)
# Apply rotary embeddings only for local attention
if self.is_local_attention and self.rotary_emb is not None:
q, k = self.rotary_emb(positions, q, k)
attn_output = self.attn(q, k, v)
# Apply gating
attn_output = attn_output * torch.sigmoid(gate)
output, _ = self.o_proj(attn_output)
return output
class AfmoeDecoderLayer(nn.Module):
def __init__(
self,
config, # AfmoeConfig
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
enable_eplb: bool = False,
) -> None:
super().__init__()
self.hidden_size = config.hidden_size
max_position_embeddings = getattr(config, "max_position_embeddings", 131072)
# DecoderLayers are created with `make_layers` which passes the prefix
# with the layer's index.
self.layer_idx = extract_layer_index(prefix)
self.self_attn = AfmoeAttention(
config=config,
layer_idx=self.layer_idx,
hidden_size=self.hidden_size,
num_heads=config.num_attention_heads,
num_kv_heads=config.num_key_value_heads,
max_position_embeddings=max_position_embeddings,
head_dim=config.head_dim,
rms_norm_eps=config.rms_norm_eps,
cache_config=cache_config,
quant_config=quant_config,
prefix=f"{prefix}.self_attn",
)
# MoE or dense FFN
self.moe_enabled = self.layer_idx >= config.num_dense_layers
if self.moe_enabled:
self.mlp = AfmoeMoE(
config=config,
quant_config=quant_config,
prefix=f"{prefix}.mlp",
enable_eplb=enable_eplb,
)
else:
self.mlp = AfmoeMLP(
hidden_size=config.hidden_size,
intermediate_size=config.intermediate_size,
hidden_act=config.hidden_act,
quant_config=quant_config,
prefix=f"{prefix}.mlp",
)
self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = RMSNorm(
config.hidden_size, eps=config.rms_norm_eps
)
self.pre_mlp_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_mlp_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
residual: torch.Tensor | None,
) -> tuple[torch.Tensor, torch.Tensor]:
if residual is None:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
else:
hidden_states, residual = self.input_layernorm(hidden_states, residual)
hidden_states = self.self_attn(
positions=positions,
hidden_states=hidden_states,
)
hidden_states = self.post_attention_layernorm(hidden_states) # attn norm b
# Fully Connected
hidden_states, residual = self.pre_mlp_layernorm( # ffn norm a
hidden_states, residual
)
hidden_states = self.mlp(hidden_states)
hidden_states = self.post_mlp_layernorm(hidden_states) # ffn norm b
return hidden_states, residual
@support_torch_compile(
dynamic_arg_dims={
"input_ids": 0,
"positions": -1,
"intermediate_tensors": 0,
"inputs_embeds": 0,
}
)
class AfmoeModel(nn.Module):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
cache_config = vllm_config.cache_config
quant_config = vllm_config.quant_config
enable_eplb = vllm_config.parallel_config.enable_eplb
self.config = config
self.vocab_size = config.vocab_size
self.mup_enabled = config.mup_enabled
if get_pp_group().is_first_rank:
self.embed_tokens = VocabParallelEmbedding(
config.vocab_size, config.hidden_size, prefix=f"{prefix}.embed_tokens"
)
else:
self.embed_tokens = PPMissingLayer()
self.start_layer, self.end_layer, self.layers = make_layers(
config.num_hidden_layers,
lambda prefix: AfmoeDecoderLayer(
config=config,
cache_config=cache_config,
quant_config=quant_config,
prefix=prefix,
enable_eplb=enable_eplb,
),
prefix=f"{prefix}.layers",
)
if get_pp_group().is_last_rank:
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
else:
self.norm = PPMissingLayer()
self.aux_hidden_state_layers = tuple[int, ...]()
self.make_empty_intermediate_tensors = make_empty_intermediate_tensors_factory(
["hidden_states", "residual"], config.hidden_size
)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.embed_tokens(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor | IntermediateTensors | tuple[torch.Tensor, list[torch.Tensor]]:
if get_pp_group().is_first_rank:
if inputs_embeds is not None:
hidden_states = inputs_embeds
else:
hidden_states = self.embed_input_ids(input_ids)
# Apply muP input scaling if enabled
if self.mup_enabled:
hidden_states = hidden_states * (self.config.hidden_size**0.5)
residual = None
else:
assert intermediate_tensors is not None
hidden_states = intermediate_tensors["hidden_states"]
residual = intermediate_tensors["residual"]
aux_hidden_states = []
for idx, layer in enumerate(
islice(self.layers, self.start_layer, self.end_layer)
):
if idx in self.aux_hidden_state_layers:
aux_hidden_states.append(
hidden_states + residual if residual is not None else hidden_states
)
hidden_states, residual = layer(positions, hidden_states, residual)
if not get_pp_group().is_last_rank:
return IntermediateTensors(
{"hidden_states": hidden_states, "residual": residual}
)
hidden_states, _ = self.norm(hidden_states, residual)
if len(aux_hidden_states) > 0:
return hidden_states, aux_hidden_states
return hidden_states
def make_empty_intermediate_tensors(
self, batch_size: int, dtype: torch.dtype, device: torch.device
) -> IntermediateTensors:
return IntermediateTensors(
{
"hidden_states": torch.zeros(
(batch_size, self.config.hidden_size), dtype=dtype, device=device
),
"residual": torch.zeros(
(batch_size, self.config.hidden_size), dtype=dtype, device=device
),
}
)
def get_expert_mapping(self) -> list[tuple[str, str, int, str]]:
# Params for weights, fp8 weight scales, fp8 activation scales
# (param_name, weight_name, expert_id, shard_id)
return SharedFusedMoE.make_expert_params_mapping(
self,
ckpt_gate_proj_name="gate_proj",
ckpt_down_proj_name="down_proj",
ckpt_up_proj_name="up_proj",
num_experts=self.config.num_experts,
)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
params_dict = dict(self.named_parameters())
loaded_params: set[str] = set()
expert_params_mapping = self.get_expert_mapping()
for name, loaded_weight in weights:
for param_name, weight_name, shard_id in stacked_params_mapping:
# Skip non-stacked layers and experts (experts handled below).
if (weight_name not in name) or ("self_attn.gate_proj" in name):
continue
# We have mlp.experts[0].gate_proj in the checkpoint.
# Since we handle the experts below in expert_params_mapping,
# we need to skip here BEFORE we update the name, otherwise
# name will be updated to mlp.experts[0].gate_up_proj, which
# will then be updated below in expert_params_mapping
# for mlp.experts[0].gate_gate_up_proj, which breaks load.
if ("mlp.experts." in name) and name not in params_dict:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
if is_pp_missing_parameter(name, self):
continue
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
break
else:
is_expert_weight = False
for mapping in expert_params_mapping:
param_name, weight_name, expert_id, shard_id = mapping
if weight_name not in name:
continue
# Anyway, this is an expert weight and should not be
# attempted to load as other weights later
is_expert_weight = True
# Do not modify `name` since the loop may continue here
# Instead, create a new variable
name_mapped = name.replace(weight_name, param_name)
if is_pp_missing_parameter(name_mapped, self):
continue
param = params_dict[name_mapped]
# We should ask the weight loader to return success or not
# here since otherwise we may skip experts with other
# available replicas.
weight_loader = typing.cast(
Callable[..., bool], param.weight_loader
)
success = weight_loader(
param,
loaded_weight,
name_mapped,
shard_id=shard_id,
expert_id=expert_id,
return_success=True,
)
if success:
name = name_mapped
break
else:
if is_expert_weight:
# We've checked that this is an expert weight
# However it's not mapped locally to this rank
# So we simply skip it
continue
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
# Remapping the name of FP8 kv-scale.
name = maybe_remap_kv_scale_name(name, params_dict)
if name is None:
continue
if is_pp_missing_parameter(name, self):
continue
param = params_dict[name]
weight_loader = getattr(
param, "weight_loader", default_weight_loader
)
weight_loader(param, loaded_weight)
loaded_params.add(name)
return loaded_params
class AfmoeForCausalLM(nn.Module, SupportsPP, SupportsEagle3, SupportsLoRA):
packed_modules_mapping = {
"qkv_proj": [
"q_proj",
"k_proj",
"v_proj",
],
"gate_up_proj": [
"gate_proj",
"up_proj",
],
}
hf_to_vllm_mapper = WeightsMapper(
orig_to_new_suffix={
".router.gate.weight": ".gate.weight",
},
)
fall_back_to_pt_during_load = False
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
self.config = config
self.quant_config = quant_config
self.model = AfmoeModel(
vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")
)
if get_pp_group().is_last_rank:
self.lm_head = ParallelLMHead(
config.vocab_size, config.hidden_size, quant_config=quant_config
)
else:
self.lm_head = PPMissingLayer()
self.logits_processor = LogitsProcessor(config.vocab_size)
self.make_empty_intermediate_tensors = (
self.model.make_empty_intermediate_tensors
)
self.expert_weights = []
# Set MoE hyperparameters
self.num_moe_layers = config.num_hidden_layers - config.num_dense_layers
self.num_expert_groups = config.n_group
self.moe_layers: list[SharedFusedMoE] = []
example_moe = None
for layer in self.model.layers:
if isinstance(layer, PPMissingLayer):
continue
assert isinstance(layer, AfmoeDecoderLayer)
if layer.moe_enabled:
example_moe = layer.mlp
self.moe_layers.append(layer.mlp.experts)
if example_moe is None and self.num_moe_layers > 0:
raise RuntimeError("No AfmoeMoE layer found in model.layers.")
if example_moe is not None:
self.num_logical_experts = example_moe.n_logical_experts
self.num_physical_experts = example_moe.n_physical_experts
self.num_local_physical_experts = example_moe.n_local_physical_experts
self.num_routed_experts = example_moe.n_routed_experts
self.num_shared_experts = example_moe.n_shared_experts
self.num_redundant_experts = example_moe.n_redundant_experts
def set_eplb_state(
self,
expert_load_view: torch.Tensor,
logical_to_physical_map: torch.Tensor,
logical_replica_count: torch.Tensor,
) -> None:
for layer_idx, layer in enumerate(self.moe_layers):
# Register the expert weights.
self.expert_weights.append(layer.get_expert_weights())
layer.set_eplb_state(
moe_layer_idx=layer_idx,
expert_load_view=expert_load_view,
logical_to_physical_map=logical_to_physical_map,
logical_replica_count=logical_replica_count,
)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.model.embed_input_ids(input_ids)
def set_aux_hidden_state_layers(self, layers: tuple[int, ...]) -> None:
self.model.aux_hidden_state_layers = layers
def get_eagle3_aux_hidden_state_layers(self) -> tuple[int, ...]:
num_layers = len(self.model.layers)
return (2, num_layers // 2, num_layers - 3)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor | IntermediateTensors | tuple[torch.Tensor, list[torch.Tensor]]:
hidden_states = self.model(
input_ids, positions, intermediate_tensors, inputs_embeds
)
return hidden_states
def compute_logits(self, hidden_states: torch.Tensor) -> torch.Tensor | None:
logits = self.logits_processor(self.lm_head, hidden_states)
return logits
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
loader = AutoWeightsLoader(self)
return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper)
def get_expert_mapping(self) -> list[tuple[str, str, int, str]]:
return self.model.get_expert_mapping()
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/afmoe.py",
"license": "Apache License 2.0",
"lines": 627,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/transformers_utils/configs/afmoe.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from transformers.configuration_utils import PretrainedConfig
class AfmoeConfig(PretrainedConfig):
model_type = "afmoe"
def __init__(
self,
vocab_size: int = 200_192,
hidden_size: int = 2048,
intermediate_size: int = 6144,
moe_intermediate_size: int = 1408,
num_hidden_layers: int = 32,
num_dense_layers: int = 1,
num_attention_heads: int = 16,
num_key_value_heads: int | None = None,
head_dim: int = 128,
hidden_act: str = "silu",
max_position_embeddings: int = 131072,
initializer_range: float = 0.02,
rms_norm_eps: float = 1e-5,
use_cache: bool = True,
tie_word_embeddings: bool = False,
rope_parameters: dict | None = None,
rope_scaling: dict | None = None,
num_experts: int = 64,
num_experts_per_tok: int = 6,
num_shared_experts: int = 2,
num_expert_groups: int = 1,
num_limited_groups: int = 1,
score_func: str = "sigmoid",
route_norm: bool = True,
route_scale: float = 1.0,
global_attn_every_n_layers: int = 4,
sliding_window: int = 2048,
layer_types: list[str] | None = None,
attention_dropout: float = 0.0,
mup_enabled: bool = False,
n_group: int = 1,
topk_group: int = 1,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_dense_layers = num_dense_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads or num_attention_heads
self.head_dim = head_dim
self.hidden_act = hidden_act
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
rope_theta = kwargs.pop("rope_theta", 10000.0)
if rope_parameters is None:
rope_parameters = {"rope_type": "default", "rope_theta": rope_theta}
self.rope_parameters = rope_parameters
self.rope_scaling = rope_scaling
self.moe_intermediate_size = moe_intermediate_size
self.num_experts = num_experts
self.num_experts_per_tok = num_experts_per_tok
self.num_shared_experts = num_shared_experts
self.num_expert_groups = num_expert_groups
self.num_limited_groups = num_limited_groups
self.score_func = score_func
self.route_norm = route_norm
self.route_scale = route_scale
self.global_attn_every_n_layers = global_attn_every_n_layers
self.sliding_window = sliding_window
self.layer_types = layer_types
self.attention_dropout = attention_dropout
self.mup_enabled = mup_enabled
self.n_group = n_group
self.topk_group = topk_group
super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
__all__ = ["AfmoeConfig"]
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/transformers_utils/configs/afmoe.py",
"license": "Apache License 2.0",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/v1/determinism/test_online_batch_invariance.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
HTTP-based batch invariance test: send requests to a running
vLLM server and compare BS=1 vs BS=N results (tokens and per-step logprobs).
Environment variables:
- VLLM_TEST_MODEL: served model name (e.g., Qwen/Qwen3-1.7B / DeepSeek-R1)
- VLLM_TP_SIZE: tensor parallelism size (e.g., 4)
"""
import os
import random
import sys
from typing import Any
import openai
import pytest
from utils import BACKENDS, _random_prompt, resolve_model_name, skip_unsupported
from tests.utils import RemoteOpenAIServer
def _request_completion(
client: openai.OpenAI,
model: str,
prompt: Any,
sp: dict[str, Any],
max_retries: int = 3,
retry_backoff: float = 0.5,
) -> dict[str, Any] | None:
payload: dict[str, Any] = {"model": model, "prompt": prompt}
payload.update(sp)
for attempt in range(max_retries + 1):
try:
completion = client.completions.create(**payload)
# Convert to plain dict so downstream logic can keep using
# dict-style access just like with raw HTTP JSON.
return completion.model_dump()
except Exception as e: # pragma: no cover
if attempt < max_retries:
import time as _t
_t.sleep(retry_backoff * (2**attempt))
continue
sys.stderr.write(f"Error: {e}\n")
return None
return None
def _extract_tokens_and_logprobs(
choice: dict[str, Any],
) -> tuple[list[Any], list[float] | None]:
tokens: list[Any] = []
token_logprobs: list[float] | None = None
lp = choice.get("logprobs")
if lp and isinstance(lp, dict):
tokens = lp.get("token_ids") or lp.get("tokens") or []
token_logprobs = lp.get("token_logprobs", None)
return tokens, token_logprobs
def _compare_bs1_vs_bsn_single_process(
prompts: list[str],
sp_kwargs: dict[str, Any],
client: openai.OpenAI,
model_name: str,
) -> None:
# BS=1
bs1_tokens_per_prompt: list[list[Any]] = []
bs1_logprobs_per_prompt: list[list[float] | None] = []
for p in prompts:
resp = _request_completion(client, model_name, p, sp_kwargs)
if resp is None or not resp.get("choices"):
raise AssertionError("BS=1 empty/failed response")
choice = resp["choices"][0]
toks, lps = _extract_tokens_and_logprobs(choice)
if lps is None:
raise AssertionError(
"logprobs not returned; ensure server supports 'logprobs'"
)
bs1_tokens_per_prompt.append(list(toks))
bs1_logprobs_per_prompt.append(list(lps))
# BS=N
bsN_tokens_per_prompt: list[list[Any]] = [None] * len(prompts) # type: ignore[list-item]
bsN_logprobs_per_prompt: list[list[float] | None] = [None] * len(prompts)
resp = _request_completion(client, model_name, prompts, sp_kwargs)
if resp is None or not resp.get("choices"):
raise AssertionError("BS=N empty/failed batched response")
choices = resp.get("choices", [])
if len(choices) != len(prompts):
raise AssertionError(
f"BS=N choices length {len(choices)} != num prompts {len(prompts)}"
)
for idx, choice in enumerate(choices):
toks, lps = _extract_tokens_and_logprobs(choice)
if lps is None:
raise AssertionError(f"BS=N missing logprobs for prompt {idx}")
bsN_tokens_per_prompt[idx] = list(toks)
bsN_logprobs_per_prompt[idx] = list(lps)
# compare
for i, (tokens_bs1, tokens_bsN, logprobs_bs1, logprobs_bsN) in enumerate(
zip(
bs1_tokens_per_prompt,
bsN_tokens_per_prompt,
bs1_logprobs_per_prompt,
bsN_logprobs_per_prompt,
)
):
if tokens_bs1 != tokens_bsN:
raise AssertionError(
f"Prompt {i} (sampling): Different tokens sampled. "
f"BS=1 tokens: {tokens_bs1} BS=N tokens: {tokens_bsN}"
)
if logprobs_bs1 is None or logprobs_bsN is None:
raise AssertionError(f"Prompt {i}: Missing logprobs in one of the runs")
if len(logprobs_bs1) != len(logprobs_bsN):
raise AssertionError(
f"Prompt {i}: Different number of steps: "
f"{len(logprobs_bs1)} (BS=1) vs {len(logprobs_bsN)} (BS=N)."
)
for t, (a, b) in enumerate(zip(logprobs_bs1, logprobs_bsN)):
if a != b:
diff = abs(a - b)
raise AssertionError(
f"Prompt {i} Step {t}: Bitwise mismatch "
f"(abs diff={diff:.6e}). "
f"BS=1 tokens: {tokens_bs1} BS=N tokens: {tokens_bsN}"
)
@skip_unsupported
@pytest.mark.parametrize("backend", BACKENDS)
def test_logprobs_bitwise_batch_invariance_bs1_vs_bsN(
backend: str,
) -> None:
random.seed(int(os.getenv("VLLM_TEST_SEED", "12345")))
model_name = resolve_model_name(backend)
prompts_all = [_random_prompt(10, 50) for _ in range(32)]
sp_kwargs: dict[str, Any] = {
"temperature": 0.6,
"top_p": 1.0,
"max_tokens": 8,
"seed": 42,
"logprobs": 5,
}
tp_size = os.getenv("VLLM_TP_SIZE", "1")
server_args: list[str] = [
"--max-model-len=8192",
"--max-num-seqs=32",
f"--attention-backend={backend}",
]
if tp_size:
server_args += ["-tp", tp_size]
with RemoteOpenAIServer(model_name, server_args) as server:
client = server.get_client()
_compare_bs1_vs_bsn_single_process(
prompts=prompts_all,
sp_kwargs=sp_kwargs,
client=client,
model_name=model_name,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/determinism/test_online_batch_invariance.py",
"license": "Apache License 2.0",
"lines": 149,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/v1/determinism/utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import os
import random
import pytest
import torch
from vllm.platforms import current_platform
from vllm.v1.attention.backends.fa_utils import flash_attn_supports_mla
skip_unsupported = pytest.mark.skipif(
not (current_platform.is_cuda() and current_platform.has_device_capability(80)),
# Supports testing on Ampere and Ada Lovelace devices.
# Note: For devices with SM < 90, batch invariance does not support CUDA Graphs.
reason="Requires CUDA and >= Ampere (SM80)",
)
BACKENDS: list[str] = [
"FLASH_ATTN",
"TRITON_ATTN",
"TRITON_MLA",
]
# FlashInfer temporarily disabled due to invariant CTA sizes.
# See FlashInfer issue #2424
# if has_flashinfer():
# BACKENDS.append("FLASHINFER")
if flash_attn_supports_mla():
BACKENDS.append("FLASH_ATTN_MLA")
DEFAULT_MODEL = "Qwen/Qwen3-1.7B"
MLA_MODEL = "deepseek-ai/DeepSeek-V2-Lite-Chat"
def resolve_model_name(backend: str) -> str:
"""Resolve the model name for the given backend."""
model = os.getenv("VLLM_TEST_MODEL", DEFAULT_MODEL)
if backend.endswith("MLA") and model == DEFAULT_MODEL:
return MLA_MODEL
return model
def _random_prompt(min_words: int = 1024, max_words: int = 1024 * 2) -> str:
# Generate more realistic prompts that will actually produce varied tokens
# Use a mix of common English text patterns
prompt_templates = [
# Question-answer style
"Question: What is the capital of France?\nAnswer: The capital of France is",
"Q: How does photosynthesis work?\nA: Photosynthesis is the process by which",
"User: Can you explain quantum mechanics?\nAssistant: Quantum mechanics is",
# Story/narrative style
"Once upon a time in a distant galaxy, there lived",
"The old man walked slowly down the street, remembering",
"In the year 2157, humanity finally discovered",
# Technical/code style
"To implement a binary search tree in Python, first we need to",
"The algorithm works by iterating through the array and",
"Here's how to optimize database queries using indexing:",
# Factual/informative style
"The Renaissance was a period in European history that",
"Climate change is caused by several factors including",
"The human brain contains approximately 86 billion neurons which",
# Conversational style
"I've been thinking about getting a new laptop because",
"Yesterday I went to the store and bought",
"My favorite thing about summer is definitely",
]
# Pick a random template
base_prompt = random.choice(prompt_templates)
if max_words < min_words:
max_words = min_words
target_words = random.randint(min_words, max_words)
if target_words > 50:
# For longer prompts, repeat context
padding_text = (
" This is an interesting topic that deserves more explanation. "
# TODO: Update to * (target_words // 10) to better align with word ratio
* (target_words // 50)
)
base_prompt = padding_text + base_prompt
return base_prompt
def _extract_step_logprobs(request_output):
if getattr(request_output, "outputs", None):
inner = request_output.outputs[0]
if hasattr(inner, "logprobs") and inner.logprobs is not None:
t = torch.tensor(
[
inner.logprobs[i][tid].logprob
for i, tid in enumerate(inner.token_ids)
],
dtype=torch.float32,
)
return t, inner.token_ids
return None, None
def is_device_capability_below_90() -> bool:
return not current_platform.has_device_capability(90)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/determinism/utils.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/model_executor/test_eagle_quantization.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from unittest.mock import Mock, patch
import pytest
import torch
from vllm.config import LoadConfig, ModelConfig, SpeculativeConfig, VllmConfig
from vllm.model_executor.models.utils import get_draft_quant_config
from vllm.platforms import current_platform
DEVICES = (
[f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2)]
if current_platform.is_cuda_alike()
else ["cpu"]
)
def test_get_draft_quant_config_with_draft_model():
mock_draft_model_config = Mock(spec=ModelConfig)
mock_load_config = Mock(spec=LoadConfig)
mock_speculative_config = Mock(spec=SpeculativeConfig)
mock_speculative_config.draft_model_config = mock_draft_model_config
mock_vllm_config = Mock(spec=VllmConfig)
mock_vllm_config.speculative_config = mock_speculative_config
mock_vllm_config.load_config = mock_load_config
mock_quant_config = Mock()
with patch.object(
VllmConfig, "get_quantization_config", return_value=mock_quant_config
):
result = get_draft_quant_config(mock_vllm_config)
# Verify the function calls get_quantization_config with draft model config
VllmConfig.get_quantization_config.assert_called_once_with(
mock_draft_model_config, mock_load_config
)
assert result == mock_quant_config
def test_get_draft_quant_config_without_draft_model():
mock_speculative_config = Mock(spec=SpeculativeConfig)
mock_speculative_config.draft_model_config = None
mock_vllm_config = Mock(spec=VllmConfig)
mock_vllm_config.speculative_config = mock_speculative_config
mock_vllm_config.load_config = Mock(spec=LoadConfig)
result = get_draft_quant_config(mock_vllm_config)
assert result is None
@torch.inference_mode()
@pytest.mark.parametrize("device", DEVICES)
def test_fc_layer_quant_config_usage(default_vllm_config, dist_init, device) -> None:
import torch
from vllm.model_executor.layers.linear import ReplicatedLinear
if current_platform.is_cuda_alike():
torch.cuda.set_device(device)
torch.set_default_device(device)
input_size = 256
output_size = 128
fc_no_quant = ReplicatedLinear(
input_size=input_size,
output_size=output_size,
bias=False,
params_dtype=torch.float16,
quant_config=None,
prefix="fc",
)
assert fc_no_quant.quant_config is None
assert fc_no_quant.input_size == input_size
assert fc_no_quant.output_size == output_size
mock_quant_config = Mock()
fc_with_quant = ReplicatedLinear(
input_size=input_size,
output_size=output_size,
bias=False,
params_dtype=torch.float16,
quant_config=mock_quant_config,
prefix="fc",
)
assert fc_with_quant.quant_config == mock_quant_config
# Check forward pass
x = torch.randn(2, input_size, dtype=torch.float16)
output, _ = fc_no_quant(x)
assert output.shape == (2, output_size)
def test_kv_cache_scale_name_handling():
# Mock a quant config that supports cache scales
mock_quant_config = Mock()
mock_quant_config.get_cache_scale = Mock(return_value="layers.0.self_attn.kv_scale")
# Condition check in load_weights
name = "layers.0.self_attn.k_proj.weight"
scale_name = mock_quant_config.get_cache_scale(name)
# Check if get_cache_scale is called and returns expected value
mock_quant_config.get_cache_scale.assert_called_once_with(name)
assert scale_name == "layers.0.self_attn.kv_scale"
def test_kv_cache_scale_name_no_scale():
# Mock a quant config that returns None for get_cache_scale
mock_quant_config = Mock()
mock_quant_config.get_cache_scale = Mock(return_value=None)
name = "layers.0.mlp.gate_proj.weight"
scale_name = mock_quant_config.get_cache_scale(name)
# Should return None for weights that don't have cache scales
assert scale_name is None
def test_maybe_remap_kv_scale_name():
from vllm.model_executor.model_loader.weight_utils import maybe_remap_kv_scale_name
params_dict = {
"layers.0.self_attn.kv_scale": Mock(),
"layers.1.self_attn.kv_scale": Mock(),
}
name = "layers.0.self_attn.some_scale"
remapped = maybe_remap_kv_scale_name(name, params_dict)
assert remapped in params_dict or remapped == name or remapped is None
def test_load_weights_kv_scale_handling():
kv_scale_param = Mock()
kv_scale_param.weight_loader = Mock()
params_dict = {
"layers.0.self_attn.kv_scale": kv_scale_param,
}
mock_quant_config = Mock()
mock_quant_config.get_cache_scale = Mock(return_value="layers.0.self_attn.kv_scale")
# Load_weights logic for KV cache scales
name = "layers.0.self_attn.k_proj.weight"
loaded_weight_tensor = torch.tensor([1.0, 2.0])
if mock_quant_config is not None:
scale_name = mock_quant_config.get_cache_scale(name)
if scale_name:
param = params_dict[scale_name]
assert param is kv_scale_param
weight_to_load = (
loaded_weight_tensor
if loaded_weight_tensor.dim() == 0
else loaded_weight_tensor[0]
)
assert scale_name == "layers.0.self_attn.kv_scale"
assert weight_to_load == loaded_weight_tensor[0]
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/model_executor/test_eagle_quantization.py",
"license": "Apache License 2.0",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/distributed/test_multiproc_executor.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Integration tests for MultiprocExecutor at the executor level.
This test directly tests the executor without going through the LLM interface,
focusing on executor initialization, RPC calls, and distributed execution.
"""
import multiprocessing
import os
from tests.utils import multi_gpu_test
from vllm.config import VllmConfig
from vllm.engine.arg_utils import EngineArgs
from vllm.utils import get_open_port
from vllm.v1.core.sched.output import SchedulerOutput
from vllm.v1.executor.multiproc_executor import MultiprocExecutor
MODEL = "facebook/opt-125m"
def create_vllm_config(
tensor_parallel_size: int = 1,
pipeline_parallel_size: int = 1,
max_model_len: int = 256,
gpu_memory_utilization: float = 0.3,
distributed_executor_backend: str = "mp",
nnodes: int = 1,
node_rank: int = 0,
master_port: int = 0,
) -> VllmConfig:
"""Create a VllmConfig for testing using EngineArgs."""
engine_args = EngineArgs(
model=MODEL,
tensor_parallel_size=tensor_parallel_size,
pipeline_parallel_size=pipeline_parallel_size,
max_model_len=max_model_len,
gpu_memory_utilization=gpu_memory_utilization,
distributed_executor_backend=distributed_executor_backend,
enforce_eager=True,
)
vllm_config = engine_args.create_engine_config()
# Override distributed node settings if needed
if nnodes > 1 or node_rank > 0:
vllm_config.parallel_config.nnodes = nnodes
vllm_config.parallel_config.node_rank = node_rank
vllm_config.parallel_config.master_port = master_port
if nnodes > 1:
vllm_config.parallel_config.disable_custom_all_reduce = True
return vllm_config
def create_test_scheduler_output(num_requests: int = 1) -> SchedulerOutput:
"""Create a minimal SchedulerOutput for testing."""
# This is a simplified version - in practice you'd need proper
# SchedulerOutput construction based on the actual vLLM v1 API
return SchedulerOutput(
scheduled_new_reqs=[],
scheduled_resumed_reqs=[],
scheduled_running_reqs=[],
num_scheduled_tokens={},
total_num_scheduled_tokens=0,
)
def test_multiproc_executor_initialization():
"""Test that MultiprocExecutor can be initialized with proper config."""
vllm_config = create_vllm_config(
tensor_parallel_size=1,
pipeline_parallel_size=1,
)
# Create executor - this should initialize workers
executor = MultiprocExecutor(vllm_config=vllm_config)
# Verify executor properties
assert executor.world_size == 1, "World size should be 1 for single GPU"
assert executor.local_world_size == 1, "Local world size should be 1"
assert hasattr(executor, "workers"), "Executor should have workers"
assert len(executor.workers) == 1, "Should have 1 worker for single GPU"
# Clean up
executor.shutdown()
@multi_gpu_test(num_gpus=2)
def test_multiproc_executor_initialization_tensor_parallel():
"""Test MultiprocExecutor initialization with tensor parallelism."""
vllm_config = create_vllm_config(
tensor_parallel_size=2,
pipeline_parallel_size=1,
)
# Create executor
executor = MultiprocExecutor(vllm_config=vllm_config)
# Verify executor properties
assert executor.world_size == 2, "World size should be 2 for TP=2"
assert executor.local_world_size == 2, "Local world size should be 2"
assert len(executor.workers) == 2, "Should have 2 workers for TP=2"
# Verify output rank calculation
output_rank = executor._get_output_rank()
assert output_rank == 0, "Output rank should be 0 for TP=2, PP=1"
# Clean up
executor.shutdown()
@multi_gpu_test(num_gpus=2)
def test_multiproc_executor_collective_rpc():
"""Test collective RPC calls to all workers."""
vllm_config = create_vllm_config(
tensor_parallel_size=2,
pipeline_parallel_size=1,
)
# Create executor
executor = MultiprocExecutor(vllm_config=vllm_config)
try:
# Test check_health RPC - should work without errors
executor.check_health()
# Test that RPC works correctly
# Note: We're just testing that the RPC mechanism works,
# not testing actual model execution here
assert not executor.is_failed, "Executor should not be in failed state"
finally:
# Clean up
executor.shutdown()
def test_multiproc_executor_failure_callback():
"""Test failure callback registration and invocation."""
vllm_config = create_vllm_config(
tensor_parallel_size=1,
pipeline_parallel_size=1,
)
executor = MultiprocExecutor(vllm_config=vllm_config)
try:
# Test callback registration
callback_invoked = []
def test_callback():
callback_invoked.append(True)
# Register callback
executor.register_failure_callback(test_callback)
# Callback should not be invoked yet
assert len(callback_invoked) == 0, "Callback should not be invoked immediately"
# Simulate failure
executor.is_failed = True
# Register another callback - should be invoked immediately
executor.register_failure_callback(test_callback)
assert len(callback_invoked) == 1, (
"Callback should be invoked when executor is failed"
)
finally:
# Clean up
executor.shutdown()
@multi_gpu_test(num_gpus=2)
def test_multiproc_executor_worker_monitor():
"""Test that worker monitor is set up correctly."""
vllm_config = create_vllm_config(
tensor_parallel_size=2,
pipeline_parallel_size=1,
)
executor = MultiprocExecutor(vllm_config=vllm_config)
try:
# Verify all worker processes are alive
for worker in executor.workers:
assert worker.proc.is_alive(), f"Worker rank {worker.rank} should be alive"
# Verify executor is not in failed state
assert not executor.is_failed, "Executor should not be in failed state"
finally:
# Clean up
executor.shutdown()
# After shutdown, workers should be terminated
import time
time.sleep(0.5) # Give processes time to terminate
for worker in executor.workers:
assert not worker.proc.is_alive(), (
f"Worker rank {worker.rank} should terminate after shutdown"
)
@multi_gpu_test(num_gpus=2)
def test_multiproc_executor_get_response_message_queues():
"""Test message queue retrieval for different ranks."""
vllm_config = create_vllm_config(
tensor_parallel_size=2,
pipeline_parallel_size=1,
)
executor = MultiprocExecutor(vllm_config=vllm_config)
try:
# Get all message queues
all_queues = executor.get_response_mqs()
assert len(all_queues) == 2, "Should have 2 message queues for 2 workers"
# Get message queue for specific rank
rank0_queue = executor.get_response_mqs(unique_reply_rank=0)
assert len(rank0_queue) == 1, "Should have 1 message queue for rank 0"
rank1_queue = executor.get_response_mqs(unique_reply_rank=1)
assert len(rank1_queue) == 1, "Should have 1 message queue for rank 1"
finally:
# Clean up
executor.shutdown()
def test_multiproc_executor_shutdown_cleanup():
"""Test that shutdown properly cleans up resources."""
vllm_config = create_vllm_config(
tensor_parallel_size=1,
pipeline_parallel_size=1,
)
executor = MultiprocExecutor(vllm_config=vllm_config)
# Verify executor is set up
assert hasattr(executor, "workers"), "Executor should have workers"
assert len(executor.workers) > 0, "Should have at least one worker"
# Shutdown
executor.shutdown()
# Verify cleanup
import time
time.sleep(0.5) # Give processes time to terminate
for worker in executor.workers:
assert not worker.proc.is_alive(), "Worker processes should be terminated"
# Verify shutdown event is set
assert executor.shutdown_event.is_set(), "Shutdown event should be set"
# Multiple shutdowns should be safe (idempotent)
executor.shutdown()
executor.shutdown()
@multi_gpu_test(num_gpus=4)
def test_multiproc_executor_pipeline_parallel():
"""Test MultiprocExecutor with pipeline parallelism."""
vllm_config = create_vllm_config(
tensor_parallel_size=2,
pipeline_parallel_size=2,
)
executor = MultiprocExecutor(vllm_config=vllm_config)
try:
# Verify executor properties
assert executor.world_size == 4, "World size should be 4 for TP=2, PP=2"
assert len(executor.workers) == 4, "Should have 4 workers"
# Verify output rank calculation
# For TP=2, PP=2: output should be from the last PP stage (ranks 2-3)
# Specifically rank 2 (first rank of last PP stage)
output_rank = executor._get_output_rank()
assert output_rank == 2, "Output rank should be 2 (first rank of last PP stage)"
# Verify max_concurrent_batches for pipeline parallel
assert executor.max_concurrent_batches == 2, (
"Max concurrent batches should equal PP size"
)
finally:
# Clean up
executor.shutdown()
def test_multiproc_executor_properties():
"""Test various executor properties and configurations."""
vllm_config = create_vllm_config(
tensor_parallel_size=1,
pipeline_parallel_size=1,
)
executor = MultiprocExecutor(vllm_config=vllm_config)
try:
# Test supports_pp property
assert MultiprocExecutor.supports_pp is True, (
"MultiprocExecutor should support pipeline parallelism"
)
# Test world_size calculation
assert executor.world_size == (
executor.parallel_config.tensor_parallel_size
* executor.parallel_config.pipeline_parallel_size
), "World size should equal TP * PP"
# Test local_world_size calculation
assert executor.local_world_size == (
executor.parallel_config.world_size // executor.parallel_config.nnodes
), "Local world size should be world_size / nnodes"
finally:
# Clean up
executor.shutdown()
@multi_gpu_test(num_gpus=4)
def test_multiproc_executor_multi_node():
"""
Test MultiprocExecutor with multi-node configuration.
This simulates 2 nodes with TP=4:
- Node 0 (rank 0): Uses GPUs 0,1 (CUDA_VISIBLE_DEVICES=0,1) with TP=2
- Node 1 (rank 1): Uses GPUs 2,3 (CUDA_VISIBLE_DEVICES=2,3) with TP=2
Total world_size = 4, nnodes = 2
"""
port = get_open_port()
# symm_mem does not work for simulating multi instance in single node
os.environ["VLLM_ALLREDUCE_USE_SYMM_MEM"] = "0"
def run_node(node_rank: int, result_queue: multiprocessing.Queue, port: int):
"""Run a single node's executor."""
executor = None
try:
# Set CUDA_VISIBLE_DEVICES for this node
if node_rank == 0:
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
else:
os.environ["CUDA_VISIBLE_DEVICES"] = "2,3"
# Create config for this node
vllm_config = create_vllm_config(
tensor_parallel_size=4, # Total TP across all nodes
pipeline_parallel_size=1,
nnodes=2, # 2 nodes
node_rank=node_rank,
master_port=port, # same port
)
# Create executor for this node
executor = MultiprocExecutor(vllm_config=vllm_config)
# Verify node-specific properties
assert executor.world_size == 4, (
f"World size should be 4 on node {node_rank}"
)
assert executor.local_world_size == 2, (
f"Local world size should be 2 on node {node_rank}"
)
assert len(executor.workers) == 2, (
f"Should have 2 local workers on node {node_rank}"
)
# Verify worker ranks are correct for this node
expected_ranks = [node_rank * 2, node_rank * 2 + 1]
actual_ranks = sorted([w.rank for w in executor.workers])
assert actual_ranks == expected_ranks, (
f"Node {node_rank} should have workers "
f"with ranks {expected_ranks}, got {actual_ranks}"
)
# Verify all workers are alive
for worker in executor.workers:
assert worker.proc.is_alive(), (
f"Worker rank {worker.rank} should be alive on node {node_rank}"
)
# executor.gen
# Put success result in queue BEFORE shutdown to avoid hanging
result_queue.put({"node": node_rank, "success": True})
import time
time.sleep(2)
executor.shutdown()
except Exception as e:
# Put failure result in queue
result_queue.put({"node": node_rank, "success": False, "error": str(e)})
raise e
finally:
if executor is not None:
executor.shutdown()
# Create a queue to collect results from both processes
result_queue: multiprocessing.Queue[dict[str, int | bool]] = multiprocessing.Queue()
# Start both node processes
processes = []
for node_rank in range(2):
p = multiprocessing.Process(
target=run_node,
args=(node_rank, result_queue, port),
name=f"Node{node_rank}",
)
p.start()
processes.append(p)
# Wait for both processes to complete
all_completed = True
for p in processes:
p.join(timeout=60)
if p.is_alive():
p.terminate()
p.join(timeout=20)
if p.is_alive():
p.kill()
p.join()
all_completed = False
# Check results from both nodes
results: list[dict[str, int | bool]] = []
while len(results) < 2:
try:
result = result_queue.get(timeout=1)
results.append(result)
except Exception:
pass
assert all_completed, "Not all processes completed successfully"
assert len(results) == 2, f"Expected 2 results, got {len(results)}"
assert results[0]["success"], f"Node 0 failed: {results[0]}"
assert results[1]["success"], f"Node 1 failed: {results[1]}"
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/distributed/test_multiproc_executor.py",
"license": "Apache License 2.0",
"lines": 349,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:benchmarks/benchmark_batch_invariance.py | #!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Benchmark to measure the performance overhead of VLLM_BATCH_INVARIANT mode.
This benchmark runs the same workload twice:
1. With VLLM_BATCH_INVARIANT=0 (baseline)
2. With VLLM_BATCH_INVARIANT=1 (batch invariant mode)
And reports the timing and throughput metrics for comparison.
Environment variables:
VLLM_BENCH_MODEL: Model to benchmark (default: "Qwen/Qwen3-1.7B")
VLLM_BENCH_TP_SIZE: Tensor parallel size (default: 1, use 8 for deepseek)
VLLM_BENCH_BATCH_SIZE: Max batch size (default: 128)
VLLM_BENCH_NUM_TRIALS: Number of trials to run (default: 5)
VLLM_BENCH_MIN_PROMPT: Min prompt length in words (default: 1024)
VLLM_BENCH_MAX_PROMPT: Max prompt length in words (default: 2048)
VLLM_BENCH_MAX_TOKENS: Max tokens to generate (default: 128)
VLLM_BENCH_TEMPERATURE: Temperature for sampling (default: 0.0)
VLLM_BENCH_GPU_MEMORY_UTILIZATION: GPU memory utilization (default: 0.4)
VLLM_BENCH_MAX_MODEL_LEN: Max model length (default: 5120)
VLLM_BENCH_BACKEND: Attention backend (default: FLASH_ATTN)
Example usage:
# Benchmark qwen3 (default)
python benchmarks/benchmark_batch_invariance.py
# Benchmark deepseek with 8 GPUs
VLLM_BENCH_MODEL="deepseek-ai/DeepSeek-V3" VLLM_BENCH_TP_SIZE=8 \\
python benchmarks/benchmark_batch_invariance.py
# Quick test with fewer trials
VLLM_BENCH_NUM_TRIALS=2 VLLM_BENCH_BATCH_SIZE=32 \\
python benchmarks/benchmark_batch_invariance.py
"""
import contextlib
import os
import random
import time
from vllm import LLM, SamplingParams
from vllm.platforms import current_platform
def _random_prompt(min_words: int = 1024, max_words: int = 1024 * 2) -> str:
"""Generate a random prompt for benchmarking."""
prompt_templates = [
"Question: What is the capital of France?\nAnswer: The capital of France is",
"Q: How does photosynthesis work?\nA: Photosynthesis is the process by which",
"User: Can you explain quantum mechanics?\nAssistant: Quantum mechanics is",
"Once upon a time in a distant galaxy, there lived",
"The old man walked slowly down the street, remembering",
"In the year 2157, humanity finally discovered",
"To implement a binary search tree in Python, first we need to",
"The algorithm works by iterating through the array and",
"Here's how to optimize database queries using indexing:",
"The Renaissance was a period in European history that",
"Climate change is caused by several factors including",
"The human brain contains approximately 86 billion neurons which",
"I've been thinking about getting a new laptop because",
"Yesterday I went to the store and bought",
"My favorite thing about summer is definitely",
]
base_prompt = random.choice(prompt_templates)
if max_words < min_words:
max_words = min_words
target_words = random.randint(min_words, max_words)
if target_words > 50:
padding_text = (
" This is an interesting topic that deserves more explanation. "
* (target_words // 50)
)
base_prompt = base_prompt + padding_text
return base_prompt
def run_benchmark_with_batch_invariant(
model: str,
tp_size: int,
max_batch_size: int,
num_trials: int,
min_prompt: int,
max_prompt: int,
max_tokens: int,
temperature: float,
gpu_mem_util: float,
max_model_len: int,
backend: str,
batch_invariant: bool,
seed: int = 12345,
) -> dict:
"""
Run the benchmark with the specified configuration.
Returns a dict with timing and throughput metrics.
"""
random.seed(seed)
# Set environment variables
if batch_invariant:
os.environ["VLLM_BATCH_INVARIANT"] = "1"
else:
os.environ["VLLM_BATCH_INVARIANT"] = "0"
print(f"\n{'=' * 80}")
print(f"BENCHMARK: VLLM_BATCH_INVARIANT={int(batch_invariant)}")
print(f" Model: {model}")
print(f" TP Size: {tp_size}")
print(f" Backend: {backend}")
print(f" Max Batch Size: {max_batch_size}")
print(f" Trials: {num_trials}")
print(f" Max Tokens: {max_tokens}")
print(f"{'=' * 80}\n")
sampling = SamplingParams(
temperature=temperature,
top_p=0.95,
max_tokens=max_tokens,
seed=20240919,
)
needle_prompt = "There once was a "
llm = None
try:
# Create LLM engine
start_init = time.perf_counter()
llm = LLM(
model=model,
max_num_seqs=max_batch_size,
gpu_memory_utilization=gpu_mem_util,
max_model_len=max_model_len,
dtype="bfloat16",
tensor_parallel_size=tp_size,
attention_config={"backend": backend},
enable_prefix_caching=False,
)
init_time = time.perf_counter() - start_init
print(f"Engine initialization time: {init_time:.2f}s\n")
# Generate baseline
print("Generating baseline (warmup)...")
baseline_out = llm.generate([needle_prompt], sampling)
assert len(baseline_out) == 1
baseline_text = baseline_out[0].outputs[0].text
print(f"Baseline output: '{baseline_text[:50]}...'\n")
# Run trials and measure timing
trial_times: list[float] = []
total_tokens = 0
total_prompts = 0
for trial in range(num_trials):
# Create a batch
prompts: list[str] = []
batch_size = random.randint(max_batch_size // 2, max_batch_size)
needle_pos = random.randint(0, batch_size - 1)
for i in range(batch_size):
if i == needle_pos:
prompts.append(needle_prompt)
else:
prompts.append(_random_prompt(min_prompt, max_prompt))
# Measure time for this trial
start_time = time.perf_counter()
outputs = llm.generate(prompts, sampling)
trial_time = time.perf_counter() - start_time
trial_times.append(trial_time)
total_prompts += len(prompts)
# Count tokens
for output in outputs:
if output.outputs:
total_tokens += len(output.outputs[0].token_ids)
print(
f"Trial {trial + 1}/{num_trials}: "
f"batch_size={batch_size}, "
f"time={trial_time:.2f}s"
)
# Verify needle output still matches
needle_output = outputs[needle_pos]
assert needle_output.prompt == needle_prompt
# Compute statistics
avg_time = sum(trial_times) / len(trial_times)
min_time = min(trial_times)
max_time = max(trial_times)
throughput = total_tokens / sum(trial_times)
prompts_per_sec = total_prompts / sum(trial_times)
print(f"\n{'=' * 80}")
print("RESULTS:")
print(f" Average time per trial: {avg_time:.2f}s")
print(f" Min time: {min_time:.2f}s")
print(f" Max time: {max_time:.2f}s")
print(f" Total tokens generated: {total_tokens}")
print(f" Total prompts processed: {total_prompts}")
print(f" Throughput: {throughput:.2f} tokens/s")
print(f" Prompts/s: {prompts_per_sec:.2f}")
print(f"{'=' * 80}\n")
return {
"init_time": init_time,
"avg_time": avg_time,
"min_time": min_time,
"max_time": max_time,
"total_tokens": total_tokens,
"total_prompts": total_prompts,
"throughput": throughput,
"prompts_per_sec": prompts_per_sec,
"trial_times": trial_times,
}
finally:
# Cleanup
if llm is not None:
with contextlib.suppress(Exception):
llm.shutdown()
def main():
# Check platform support
if not (current_platform.is_cuda() and current_platform.has_device_capability(90)):
print("ERROR: Requires CUDA and >= Hopper (SM90)")
print(f"Current platform: {current_platform.device_type}")
if current_platform.is_cuda():
print(f"Device capability: {current_platform.get_device_capability()}")
return 1
# Read configuration from environment
model = os.getenv("VLLM_BENCH_MODEL", "Qwen/Qwen3-1.7B")
tp_size = int(os.getenv("VLLM_BENCH_TP_SIZE", "1"))
max_batch_size = int(os.getenv("VLLM_BENCH_BATCH_SIZE", "128"))
num_trials = int(os.getenv("VLLM_BENCH_NUM_TRIALS", "5"))
min_prompt = int(os.getenv("VLLM_BENCH_MIN_PROMPT", "1024"))
max_prompt = int(os.getenv("VLLM_BENCH_MAX_PROMPT", "2048"))
max_tokens = int(os.getenv("VLLM_BENCH_MAX_TOKENS", "128"))
temperature = float(os.getenv("VLLM_BENCH_TEMPERATURE", "0.0"))
gpu_mem_util = float(os.getenv("VLLM_BENCH_GPU_MEMORY_UTILIZATION", "0.4"))
max_model_len = int(os.getenv("VLLM_BENCH_MAX_MODEL_LEN", "5120"))
backend = os.getenv("VLLM_BENCH_BACKEND", "FLASH_ATTN")
print("\n" + "=" * 80)
print("VLLM BATCH INVARIANCE BENCHMARK")
print("=" * 80)
print("\nConfiguration:")
print(f" Model: {model}")
print(f" Tensor Parallel Size: {tp_size}")
print(f" Attention Backend: {backend}")
print(f" Max Batch Size: {max_batch_size}")
print(f" Number of Trials: {num_trials}")
print(f" Prompt Length Range: {min_prompt}-{max_prompt} words")
print(f" Max Tokens to Generate: {max_tokens}")
print(f" Temperature: {temperature}")
print(f" GPU Memory Utilization: {gpu_mem_util}")
print(f" Max Model Length: {max_model_len}")
print("=" * 80)
# Run benchmark WITHOUT batch invariance (baseline)
print("\n" + "=" * 80)
print("PHASE 1: Running WITHOUT batch invariance (baseline)")
print("=" * 80)
baseline_results = run_benchmark_with_batch_invariant(
model=model,
tp_size=tp_size,
max_batch_size=max_batch_size,
num_trials=num_trials,
min_prompt=min_prompt,
max_prompt=max_prompt,
max_tokens=max_tokens,
temperature=temperature,
gpu_mem_util=gpu_mem_util,
max_model_len=max_model_len,
backend=backend,
batch_invariant=False,
)
# Run benchmark WITH batch invariance
print("\n" + "=" * 80)
print("PHASE 2: Running WITH batch invariance")
print("=" * 80)
batch_inv_results = run_benchmark_with_batch_invariant(
model=model,
tp_size=tp_size,
max_batch_size=max_batch_size,
num_trials=num_trials,
min_prompt=min_prompt,
max_prompt=max_prompt,
max_tokens=max_tokens,
temperature=temperature,
gpu_mem_util=gpu_mem_util,
max_model_len=max_model_len,
backend=backend,
batch_invariant=True,
)
# Compare results
print("\n" + "=" * 80)
print("COMPARISON: Batch Invariance vs Baseline")
print("=" * 80)
init_overhead_pct = (
(batch_inv_results["init_time"] - baseline_results["init_time"])
/ baseline_results["init_time"]
* 100
)
time_overhead_pct = (
(batch_inv_results["avg_time"] - baseline_results["avg_time"])
/ baseline_results["avg_time"]
* 100
)
throughput_change_pct = (
(batch_inv_results["throughput"] - baseline_results["throughput"])
/ baseline_results["throughput"]
* 100
)
print("\nInitialization Time:")
print(f" Baseline: {baseline_results['init_time']:.2f}s")
print(f" Batch Invariant: {batch_inv_results['init_time']:.2f}s")
print(f" Overhead: {init_overhead_pct:+.2f}%")
print("\nAverage Trial Time:")
print(f" Baseline: {baseline_results['avg_time']:.2f}s")
print(f" Batch Invariant: {batch_inv_results['avg_time']:.2f}s")
print(f" Overhead: {time_overhead_pct:+.2f}%")
print("\nThroughput (tokens/s):")
print(f" Baseline: {baseline_results['throughput']:.2f}")
print(f" Batch Invariant: {batch_inv_results['throughput']:.2f}")
print(f" Change: {throughput_change_pct:+.2f}%")
print("\nPrompts/s:")
print(f" Baseline: {baseline_results['prompts_per_sec']:.2f}")
print(f" Batch Invariant: {batch_inv_results['prompts_per_sec']:.2f}")
print("\n" + "=" * 80)
print("SUMMARY")
print("=" * 80)
if time_overhead_pct > 0:
print(
f"Batch invariance mode adds approximately {time_overhead_pct:.1f}% "
"overhead"
)
else:
print(
f"Batch invariance mode is approximately {-time_overhead_pct:.1f}% "
"faster (unexpected!)"
)
if abs(throughput_change_pct) < 1.0:
print("Throughput difference is negligible (< 1%)")
elif throughput_change_pct < 0:
print(
f"Throughput decreased by {-throughput_change_pct:.1f}% "
"with batch invariance"
)
else:
print(
f"Throughput increased by {throughput_change_pct:.1f}% "
"with batch invariance (unexpected!)"
)
print("=" * 80 + "\n")
return 0
if __name__ == "__main__":
exit(main())
| {
"repo_id": "vllm-project/vllm",
"file_path": "benchmarks/benchmark_batch_invariance.py",
"license": "Apache License 2.0",
"lines": 328,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/rocm/aiter/test_grouped_quant.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# This is a test for the AITER group_fp8_quant op.
# It tests if the AITER op is
# 1. correctly defined the relationship between
# implementation and fake function
# 2. can be used with torch.compile
# 3. can be used with CUDA graphs
# This file will be skipped if AITER is not installed
# and the platform is not ROCm.
import importlib.util
import pytest
import torch
# this import statement is needed to ensure the ops are registered
from vllm._aiter_ops import rocm_aiter_ops
from vllm.platforms import current_platform
# Check if aiter package is installed
aiter_available = importlib.util.find_spec("aiter") is not None
pytestmark = pytest.mark.skipif(
not (current_platform.is_rocm() and aiter_available),
reason="AITER ops are only available on ROCm with aiter package installed",
)
def test_rocm_aiter_group_fp8_quant_fake_implementation():
"""Test that the fake implementation is correctly
defined for torch.ops.vllm.rocm_aiter_group_fp8_quant."""
# Create test tensors
M = 128
N = 4096
group_size = 128
input_tensor = torch.randn((M, N), dtype=torch.bfloat16, device="cuda")
# Verify the op's fake implementation using torch.library.opcheck
# This checks that the fake function returns tensors with correct shapes and dtypes
torch.library.opcheck(
torch.ops.vllm.rocm_aiter_group_fp8_quant,
(input_tensor, group_size),
test_utils=("test_faketensor",),
)
def test_rocm_aiter_group_fp8_quant_torch_compile_with_cudagraph():
"""Test that rocm_aiter_ops.group_fp8_quant
with group size 128 can be used with
torch.compile in cudagraph mode."""
# Create test tensors
M = 128
N = 4096
group_size = 128
input_tensor = torch.randn((M, N), dtype=torch.bfloat16, device="cuda")
# Define a function that uses the op
def group_fp8_quant_fn(x):
return rocm_aiter_ops.group_fp8_quant(x, group_size)
# Compile with cudagraph mode
compiled_fn = torch.compile(
group_fp8_quant_fn,
fullgraph=True,
backend="inductor",
mode="reduce-overhead",
dynamic=False,
)
# Run eager mode
x_fp8_eager, scales_eager = group_fp8_quant_fn(input_tensor)
# Run compiled version (first run will trigger compilation)
x_fp8_compiled, scales_compiled = compiled_fn(input_tensor)
# Verify shapes match
assert x_fp8_compiled.shape == x_fp8_eager.shape
assert scales_compiled.shape == scales_eager.shape
# Verify expected shapes
assert x_fp8_compiled.shape == (M, N)
expected_scale_cols = (N + group_size - 1) // group_size
assert scales_compiled.shape == (M, expected_scale_cols)
# Verify results match
assert torch.allclose(
x_fp8_compiled.to(torch.float32),
x_fp8_eager.to(torch.float32),
rtol=1e-2,
atol=1e-2,
)
assert torch.allclose(scales_compiled, scales_eager, rtol=1e-3, atol=1e-3)
# Test with different input (reusing compiled graph)
input_tensor_2 = torch.randn((M, N), dtype=torch.bfloat16, device="cuda")
x_fp8_eager_2, scales_eager_2 = group_fp8_quant_fn(input_tensor_2)
x_fp8_compiled_2, scales_compiled_2 = compiled_fn(input_tensor_2)
# Verify second run also produces correct results
assert torch.allclose(
x_fp8_compiled_2.to(torch.float32),
x_fp8_eager_2.to(torch.float32),
rtol=1e-2,
atol=1e-2,
)
assert torch.allclose(scales_compiled_2, scales_eager_2, rtol=1e-3, atol=1e-3)
def test_rocm_aiter_group_fp8_quant_different_shapes():
"""Test rocm_aiter_ops.group_fp8_quant with different input shapes."""
group_size = 128
test_shapes = [
(64, 2048),
(256, 8192),
(32, 1024),
(512, 4096),
]
for M, N in test_shapes:
input_tensor = torch.randn((M, N), dtype=torch.bfloat16, device="cuda")
x_fp8, scales = rocm_aiter_ops.group_fp8_quant(input_tensor, group_size)
# Verify shapes
assert x_fp8.shape == (M, N)
expected_scale_cols = (N + group_size - 1) // group_size
assert scales.shape == (M, expected_scale_cols)
# Verify dtypes
from aiter import dtypes
assert x_fp8.dtype == dtypes.fp8
assert scales.dtype == torch.float32
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/rocm/aiter/test_grouped_quant.py",
"license": "Apache License 2.0",
"lines": 108,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:examples/online_serving/token_generation_client.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import httpx
from transformers import AutoTokenizer
GEN_ENDPOINT = "http://localhost:8000/inference/v1/generate"
DUMMY_API_KEY = "empty"
MODEL_NAME = "Qwen/Qwen3-0.6B"
transport = httpx.HTTPTransport()
headers = {"Authorization": f"Bearer {DUMMY_API_KEY}"}
client = httpx.Client(
transport=transport,
base_url=GEN_ENDPOINT,
timeout=600,
headers=headers,
)
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "How many countries are in the EU?"},
]
def main(client):
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
token_ids = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
enable_thinking=False,
return_dict=True,
).input_ids
payload = {
"model": MODEL_NAME,
"token_ids": token_ids,
"sampling_params": {"max_tokens": 24, "temperature": 0.2, "detokenize": False},
"stream": False,
}
resp = client.post(GEN_ENDPOINT, json=payload)
resp.raise_for_status()
data = resp.json()
print(data)
print("-" * 50)
print("Token generation results:")
res = tokenizer.decode(data["choices"][0]["token_ids"])
print(res)
print("-" * 50)
if __name__ == "__main__":
main(client)
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/online_serving/token_generation_client.py",
"license": "Apache License 2.0",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/entrypoints/openai/test_serving_tokens.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import os
import httpx
import pytest
import pytest_asyncio
from transformers import AutoTokenizer
from vllm.config import ModelConfig
from vllm.config.utils import getattr_iter
from vllm.v1.engine.detokenizer import check_stop_strings
from ...utils import RemoteOpenAIServer
MODEL_NAME = "Qwen/Qwen3-0.6B"
GEN_ENDPOINT = "/inference/v1/generate"
def get_vocab_size(model_name):
config = ModelConfig(
model=model_name,
seed=0,
dtype="bfloat16",
)
return config.get_vocab_size()
@pytest.fixture(scope="module")
def tokenizer():
return AutoTokenizer.from_pretrained(MODEL_NAME)
@pytest.fixture(scope="module")
def messages():
return [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "How many countries are in the EU?"},
]
@pytest.fixture(scope="module")
def server(request):
args = [
"--dtype",
"bfloat16",
"--max-model-len",
"1024",
"--enforce-eager",
# On ROCm (e.g. MI355X/gfx950), bf16 GEMM results can differ by
# 1 ULP when the batch dimension (M) changes, because different M
# values cause the Tensile backend to select different tile
# configurations with different fp32 accumulation orders. With
# prefix caching, cache-miss prefills compute all tokens in one
# pass (large M) while cache-hit requests compute only the
# uncached suffix (small M), seeding a divergence that amplifies
# through the residual stream and flips argmax tokens.
# See: https://github.com/vllm-project/vllm/issues/33123
#
# Either disable prefix caching entirely, or enable it with
# --deterministic-prefix-caching which forces cache-miss prefills
# to split at block boundaries so the suffix GEMM shape is always
# identical regardless of cache state.
#
# Option A: disable prefix caching
"--no-enable-prefix-caching",
#
# Option B: deterministic prefix caching
# "--enable-prefix-caching",
# "--deterministic-prefix-caching",
]
extra_args = getattr(request, "param", None)
if extra_args is not None:
args = args + (
list(extra_args)
if isinstance(extra_args, (list, tuple))
else [str(extra_args)]
)
envs = os.environ.copy()
# See: https://github.com/vllm-project/vllm/pull/33493#issuecomment-3888060787
envs["VLLM_ROCM_USE_SKINNY_GEMM"] = "0"
with RemoteOpenAIServer(MODEL_NAME, args, env_dict=envs) as remote_server:
yield remote_server
@pytest_asyncio.fixture
async def client(server: RemoteOpenAIServer):
transport = httpx.AsyncHTTPTransport(uds=server.uds) if server.uds else None
headers = {"Authorization": f"Bearer {server.DUMMY_API_KEY}"}
async with httpx.AsyncClient(
transport=transport,
base_url=server.url_root,
timeout=600,
headers=headers,
) as c:
yield c
@pytest.mark.asyncio
async def test_generate_endpoint(client):
payload = {
"model": MODEL_NAME,
"token_ids": [1, 2, 3],
"sampling_params": {"max_tokens": 5},
"stream": False,
}
resp = await client.post(GEN_ENDPOINT, json=payload)
resp.raise_for_status()
data = resp.json()
assert "choices" in data
@pytest.mark.asyncio
@pytest.mark.parametrize("logprobs_value", [0, 1, 5])
async def test_generate_logprobs(client, logprobs_value):
payload = {
"model": MODEL_NAME,
"token_ids": [1, 2, 3],
"sampling_params": {
"max_tokens": 5,
"temperature": 0.0,
"logprobs": logprobs_value,
},
"stream": False,
}
resp = await client.post(GEN_ENDPOINT, json=payload)
resp.raise_for_status()
data = resp.json()
choice = data["choices"][0]
assert choice["logprobs"] is not None
logprobs_content = choice["logprobs"]["content"]
assert len(logprobs_content) == len(choice["token_ids"])
for entry in logprobs_content:
assert "logprob" in entry
assert len(entry["top_logprobs"]) >= 1
assert len(entry["top_logprobs"]) == max(logprobs_value, 1)
@pytest.mark.asyncio
async def test_same_response_as_chat_completions(client, tokenizer, messages):
token_ids = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
enable_thinking=False, # default with Qwen3
return_dict=True, # default with Transformers v5
).input_ids
for ignore_eos in [True, False]:
payload = {
"model": MODEL_NAME,
"token_ids": token_ids,
"sampling_params": {
"max_tokens": 24,
"temperature": 0.0,
# NOTE coordinator will set this to skip detokenization
"detokenize": False,
"ignore_eos": ignore_eos,
},
"stream": False,
}
generate_resp = await client.post(GEN_ENDPOINT, json=payload)
generate_data = generate_resp.json()
gen_token_ids = generate_data["choices"][0]["token_ids"]
generate_res = tokenizer.decode(gen_token_ids, skip_special_tokens=True)
payload = {
"model": MODEL_NAME,
"messages": messages,
"max_tokens": 24,
"temperature": 0.0,
"stream": False,
"ignore_eos": ignore_eos,
"chat_template_kwargs": {"enable_thinking": False},
}
completions_resp = await client.post("/v1/chat/completions", json=payload)
completions_data = completions_resp.json()
completions_res = completions_data["choices"][0]["message"]["content"]
if ignore_eos:
# When ignoring EOS, only compare up to the first EOS token
# Post-EOS generation is undefined and may differ
eos_tokens = {
tokenizer.eos_token_id,
*getattr_iter(
tokenizer,
[
"extra_special_tokens_ids", # Transformers v5
"additional_special_tokens_ids", # Transformers v4
],
[],
),
}
# Find first EOS in generated tokens
eos_pos = None
for i, tid in enumerate(gen_token_ids):
if tid in eos_tokens:
eos_pos = i
break
if eos_pos is not None:
gen_token_ids_truncated = gen_token_ids[:eos_pos]
generate_res = tokenizer.decode(
gen_token_ids_truncated, skip_special_tokens=True
)
# Truncate completions_res to same length for comparison
completions_res = completions_res[: len(generate_res)]
assert generate_res == completions_res
@pytest.mark.asyncio
async def test_stop_string_workflow(client, tokenizer, messages):
token_ids = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
enable_thinking=False, # default with Qwen3
return_dict=True, # default with Transformers v5
).input_ids
payload = {
"model": MODEL_NAME,
"token_ids": token_ids,
"sampling_params": {
"max_tokens": 24,
"temperature": 0.0,
"detokenize": False,
# stop strings are only supported when detokenize is True.
"stop": ["27 member"],
},
# TODO stream test is much more interesting
"stream": False,
}
with pytest.raises(httpx.HTTPStatusError):
generate_resp = await client.post(GEN_ENDPOINT, json=payload)
generate_resp.raise_for_status()
payload["sampling_params"]["stop"] = None
generate_resp = await client.post(
GEN_ENDPOINT, json=payload, headers={"X-Request-Id": "42"}
)
generate_data = generate_resp.json()
generate_res = tokenizer.decode(
generate_data["choices"][0]["token_ids"], skip_special_tokens=True
)
# NOTE This is under the responsibility of the coordinator
# stop_checker = StopChecker(
# max_model_len=1024, get_tokenizer_for_seq=lambda _: tokenizer
# )
stop_str, truncate_to = check_stop_strings(
generate_res, len(generate_res), ["27 member"], False
)
assert stop_str == "27 member"
# abort request that hit stop string (requires tokens-only mode)
# res = await client.post("/abort_requests", json={"request_ids": ["generate-tokens-42"]}) # noqa: E501
# res.raise_for_status()
generate_res = generate_res[:truncate_to]
# Get stop_str response from chat completions
payload = {
"model": MODEL_NAME,
"messages": messages,
"max_tokens": 24,
"temperature": 0.0,
"stream": False,
"stop": ["27 member"],
"chat_template_kwargs": dict(enable_thinking=False),
}
completions_resp = await client.post("/v1/chat/completions", json=payload)
completions_data = completions_resp.json()
completions_res = completions_data["choices"][0]["message"]["content"]
assert generate_res == completions_res
@pytest.mark.asyncio
@pytest.mark.parametrize(
"server",
[
[
"--enable-lora",
"--lora-modules",
"Alice=charent/self_cognition_Alice",
"Bob=charent/self_cognition_Bob",
"--max-lora-rank",
"64",
"--max-cpu-loras",
"2",
]
],
indirect=True,
)
async def test_generate_with_lora_adapter(client, tokenizer, messages):
# Verify adapters are listed
models_resp = await client.get("/v1/models")
models_resp.raise_for_status()
models = {m["id"] for m in models_resp.json().get("data", [])}
assert {"Alice", "Bob"}.issubset(models)
# Generate using a LoRA adapter by specifying its name as the model
payload = {
"model": "Alice",
"token_ids": [1, 2, 3],
"sampling_params": {"max_tokens": 5},
"stream": False,
}
resp = await client.post(GEN_ENDPOINT, json=payload)
resp.raise_for_status()
data = resp.json()
assert "choices" in data
token_ids = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
enable_thinking=False, # default with Qwen3
return_dict=True, # default with Transformers v5
).input_ids
payload = {
"model": "Alice",
"token_ids": token_ids,
"sampling_params": {
"max_tokens": 24,
"temperature": 0.0,
"detokenize": False,
},
"stream": False,
}
generate_resp = await client.post(GEN_ENDPOINT, json=payload)
generate_data = generate_resp.json()
generate_res = tokenizer.decode(
generate_data["choices"][0]["token_ids"], skip_special_tokens=True
)
payload = {
"model": "Alice",
"messages": messages,
"max_tokens": 24,
"temperature": 0.0,
"stream": False,
"chat_template_kwargs": dict(enable_thinking=False),
}
completions_resp = await client.post("/v1/chat/completions", json=payload)
completions_data = completions_resp.json()
completions_res = completions_data["choices"][0]["message"]["content"]
assert generate_res == completions_res
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/entrypoints/openai/test_serving_tokens.py",
"license": "Apache License 2.0",
"lines": 308,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_executor/layers/conv.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Conv Layer Class."""
import math
from typing import Literal
import torch
import torch.nn as nn
import torch.nn.functional as F
from vllm.model_executor.custom_op import CustomOp
from vllm.utils.torch_utils import is_torch_equal
class ConvLayerBase(CustomOp):
"""Conv layer base class."""
num_dim: int
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int | tuple[int, ...],
stride: int | tuple[int, ...] = 1,
padding: int | tuple[int, ...] | Literal["same", "valid"] = 0,
dilation: int | tuple[int, ...] = 1,
groups: int = 1,
bias: bool = True,
padding_mode: Literal["zeros", "reflect", "replicate", "circular"] = "zeros",
*,
params_dtype: torch.dtype | None = None,
) -> None:
super().__init__()
if params_dtype is None:
params_dtype = torch.get_default_dtype()
valid_padding_strings = {"same", "valid"}
if isinstance(padding, str) and padding not in valid_padding_strings:
raise ValueError(
f"Invalid padding string '{padding}'. "
f"Expected one of {valid_padding_strings}."
)
if padding == "same":
padding = (
kernel_size // 2
if isinstance(kernel_size, int)
else tuple(k // 2 for k in kernel_size)
)
elif padding == "valid":
padding = 0
kernel_size = (
(kernel_size,) * self.num_dim
if isinstance(kernel_size, int)
else kernel_size
)
stride = (stride,) * self.num_dim if isinstance(stride, int) else stride
padding = (padding,) * self.num_dim if isinstance(padding, int) else padding
dilation = (dilation,) * self.num_dim if isinstance(dilation, int) else dilation
if padding == "same" and any(s != 1 for s in stride):
raise ValueError("padding='same' is not supported for strided convolutions")
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.padding_mode = padding_mode
self.enable_linear = (
(self.kernel_size == self.stride)
and not any(self.padding)
and self.groups == 1
)
self.input_size = in_channels * math.prod(self.kernel_size)
self.weight = nn.Parameter(
torch.empty(
out_channels,
in_channels // groups,
*kernel_size,
dtype=params_dtype,
),
)
if bias:
self.bias = nn.Parameter(torch.empty(self.out_channels, dtype=params_dtype))
else:
self.register_parameter("bias", None)
def extra_repr(self) -> str:
s = f"in_channels={self.in_channels}, "
s += f"out_channels={self.out_channels}, "
s += f"kernel_size={self.kernel_size}, "
s += f"stride={self.stride}, "
s += f"padding={self.padding}, "
s += f"bias={self.bias is not None}"
return s
# --8<-- [start:conv2d]
@CustomOp.register("conv2d")
class Conv2dLayer(ConvLayerBase):
"""Conv layer with Conv2d."""
# --8<-- [end:conv2d]
num_dim = 2
def _forward_mulmat(self, x: torch.Tensor) -> torch.Tensor:
assert x.dim() == 4
B, C, H, W = x.shape
K1, K2 = self.kernel_size
H, W = H // K1, W // K2
x = x.unfold(2, K1, K1).unfold(3, K2, K2)
x = x.permute(0, 2, 3, 1, 4, 5).reshape(-1, self.input_size)
x = F.linear(
x,
self.weight.view(self.out_channels, self.input_size),
self.bias,
)
x = x.view(B, H, W, self.out_channels).permute(0, 3, 1, 2)
return x
def _forward_conv(self, x: torch.Tensor) -> torch.Tensor:
assert x.dim() == 4
x = F.conv2d(
x,
self.weight,
self.bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
)
return x
def forward_native(self, x: torch.Tensor) -> torch.Tensor:
"""Expected input shape: (batch_size, in_channels, height, width)"""
assert x.dim() == 4
if self.enable_linear:
return self._forward_mulmat(x)
else:
return self._forward_conv(x)
def forward_cuda(self, x: torch.Tensor) -> torch.Tensor:
# By default, we use CUDNN's convolution ops with optimization.
return self._forward_conv(x)
class CausalConv2dLayer(Conv2dLayer):
"""
A causal version of nn.Conv2d where each location in the 2D matrix would
have no access to locations on its right or down
All arguments are the same as nn.Conv2d except padding which should be
set as None
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int,
padding: int = 0,
dilation: int = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = "zeros",
*,
params_dtype: torch.dtype | None = None,
) -> None:
if padding is not None:
raise ValueError(
"Argument padding should be set to None for CausalConv2dLayer."
)
self._left_padding: int = kernel_size - 1
self._right_padding: int = stride - 1
padding = 0
super().__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
bias,
padding_mode,
params_dtype=params_dtype,
)
def forward(
self,
x: torch.Tensor,
) -> torch.Tensor:
x = F.pad(x, pad=(self._left_padding, self._right_padding, 0, 0))
x = super().forward(x)
return x
# --8<-- [start:conv3d]
@CustomOp.register("conv3d")
class Conv3dLayer(ConvLayerBase):
"""Conv layer with Conv3d."""
# --8<-- [end:conv3d]
num_dim = 3
def _forward_mulmat(self, x: torch.Tensor) -> torch.Tensor:
assert x.dim() == 5
B, C, T, H, W = x.shape
K1, K2, K3 = self.kernel_size
T, H, W = T // K1, H // K2, W // K3
x = x.unfold(2, K1, K1).unfold(3, K2, K2).unfold(4, K3, K3)
x = x.permute(0, 2, 3, 4, 1, 5, 6, 7).reshape(-1, self.input_size)
x = F.linear(
x,
self.weight.view(self.out_channels, self.input_size),
self.bias,
)
x = x.view(B, T, H, W, self.out_channels).permute(0, 4, 1, 2, 3)
return x
def _forward_conv(self, x: torch.Tensor) -> torch.Tensor:
assert x.dim() == 5
x = F.conv3d(
x,
self.weight,
self.bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
)
return x
def forward_native(self, x: torch.Tensor) -> torch.Tensor:
"""Expected input shape: (batch_size, in_channels, time, height, width)"""
if self.enable_linear:
return self._forward_mulmat(x)
else:
return self._forward_conv(x)
def forward_cuda(self, x: torch.Tensor) -> torch.Tensor:
# PyTorch2.9.0 disabled CUDNN's Conv3D, which caused a
# significant performance regression.
# See: https://github.com/vllm-project/vllm/issues/27406
# and https://github.com/pytorch/pytorch/issues/166122
# By default, we use CUDNN's convolution ops with optimization.
if self.enable_linear and (is_torch_equal("2.9.0") or is_torch_equal("2.9.1")):
return self._forward_mulmat(x)
return self._forward_conv(x)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/conv.py",
"license": "Apache License 2.0",
"lines": 224,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/compile/test_graph_partition.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import operator
import pytest
import torch
from torch.fx.experimental.proxy_tensor import make_fx
from vllm.compilation.backends import split_graph
from vllm.compilation.passes.fx_utils import find_op_nodes
# This import automatically registers `torch.ops.silly.attention`
from . import silly_attention # noqa: F401
def test_getitem_moved_to_producer_subgraph():
"""
Test that getitem operations are moved to the same subgraph as their input,
preventing tuple inputs to submodules.
"""
def model_fn(x: torch.Tensor) -> torch.Tensor:
# torch.split returns a tuple, creating real getitem operations
# Should become first submodule that produces tuple
chunks = torch.split(x, x.shape[0] // 2, dim=0)
# Following ops should become second submodule that consumes tuple
result_0 = torch.relu(chunks[0])
result_1 = torch.relu(chunks[1])
return torch.cat([result_0, result_1], dim=0)
x = torch.randn(4, 3)
gm = make_fx(model_fn)(x)
has_getitem = any(
node.op == "call_function" and node.target == operator.getitem
for node in gm.graph.nodes
)
assert has_getitem, "Test setup failed: graph should contain getitem operations"
# Split on tuple producer aten::split
split_ops = ["aten::split.Tensor"]
split_gm, split_items = split_graph(gm, split_ops)
assert len(split_items) == 2, "Graph should be split into 2 submodules"
for split_item in split_items:
submodule = split_item.graph
getitem_on_placeholder = []
for node in submodule.graph.nodes:
if (
node.op == "call_function"
and node.target == operator.getitem
and node.args[0].op == "placeholder"
):
getitem_on_placeholder.append(node)
assert len(getitem_on_placeholder) == 0, (
f"Submodule {split_item.submod_name} has getitem operations on "
f"placeholder nodes: {[n.name for n in getitem_on_placeholder]}. "
"This means tuple inputs were not properly eliminated."
)
new_x = torch.randn(4, 3)
output_original = gm(new_x)
output_split = split_gm(new_x)
assert torch.allclose(output_original, output_split), "Output mismatch"
def test_no_tuple_inputs_with_multiple_consumers():
"""
Test that when a tuple is consumed by multiple split operations,
getitem operations are properly moved to avoid tuple inputs.
"""
def model_fn(x: torch.Tensor) -> torch.Tensor:
# torch.split returns a tuple, creating real getitem operations
# Should become first submodule that produces tuple
chunks = torch.split(x, x.shape[0] // 2, dim=0)
# These should become second submodule consuming tuple
result_1 = torch.relu(chunks[0])
result_2 = torch.relu(chunks[1])
# Artificial graph splitting point to create another
# independent submodule that consumes tuple later
# This would become the third submodule
result_1 = torch.sigmoid(result_1)
# Fourth submodule that consumes tuple
result = torch.cat([chunks[0], chunks[1], result_1, result_2])
return result
x = torch.randn(4, 3)
gm = make_fx(model_fn)(x)
has_getitem = any(
node.op == "call_function" and node.target == operator.getitem
for node in gm.graph.nodes
)
assert has_getitem, "Test setup failed: graph should contain getitem operations"
split_ops = ["aten::split.Tensor", "aten::sigmoid"]
split_gm, split_items = split_graph(gm, split_ops)
assert len(split_items) == 4, "Graph should be split into 4 submodules"
for split_item in split_items:
submodule = split_item.graph
for node in submodule.graph.nodes:
if (
node.op == "call_function"
and node.target == operator.getitem
and node.args[0].op == "placeholder"
):
pytest.fail(
f"Submodule {split_item.submod_name} has getitem on "
f"placeholder {node.args[0].name}, indicating it receives "
"a tuple input"
)
new_x = torch.randn(4, 3)
output_original = gm(new_x)
output_split = split_gm(new_x)
assert torch.allclose(output_original, output_split), "Output mismatch after split"
def test_consecutive_ops_in_split():
"""
Test that consecutive splitting operations are grouped into the same subgraph
"""
def model_fn(x: torch.Tensor) -> torch.Tensor:
"""
Define a simple model where consecutive operations create opportunities
for splitting subgraphs.
"""
# Apply silly attention followed by consecutive operations
intermediate = torch.relu(x)
attn_inout = torch.sqrt(intermediate)
torch.ops.silly.attention(intermediate, intermediate, attn_inout, attn_inout)
final_result = torch.sigmoid(attn_inout)
return final_result
torch.set_default_device("cuda")
# Create the traced FX graph for the model
x = torch.randn(8, 4)
gm = make_fx(model_fn)(x)
# Assert presence of the expected operations in the setup
assert (
len(list(find_op_nodes(torch.ops.aten.relu, gm.graph))) == 1
and len(list(find_op_nodes(torch.ops.aten.sqrt, gm.graph))) == 1
), "Test setup failed: Expected sqrt and relu operations in the graph."
# Configure split operations to test
splitting_ops = ["silly::attention", "aten::sqrt"]
split_gm, split_items = split_graph(gm, splitting_ops)
# Validate the number of partitions
assert len(split_items) == 3, (
"Consecutive splitting operations were not grouped correctly."
)
# Validate that correctness is preserved
new_x = torch.randn(8, 4)
output_original = gm(new_x)
output_split = split_gm(new_x)
assert torch.allclose(output_original, output_split), (
"Output mismatch after splitting."
)
# Check the splitting item has 2 nodes exactly (relu and attn)
splitting_items = list(s for s in split_items if s.is_splitting_graph)
assert len(splitting_items) == 1, "Expecting a single splitting graph"
print(splitting_items[0].graph.graph)
splitting_gm = splitting_items[0].graph
assert len(splitting_gm.graph.nodes) == 4, "Expecting 4 nodes in splitting graph"
assert [node.op for node in splitting_gm.graph.nodes] == ["placeholder"] + 2 * [
"call_function"
] + ["output"]
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/compile/test_graph_partition.py",
"license": "Apache License 2.0",
"lines": 146,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/v1/e2e/test_context_length.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Tests for vLLM `vllm/v1/engine/processor.Processor._validate_model_input()`
handling of maximum context length for decoder models.
This test ensures:
- A prompt that is one token shorter than the model's maximum context length
can be processed successfully when requesting one additional token.
- A prompt that reaches the model's maximum context length throws a
`ValueError` when requesting at least one additional token.
"""
import pytest
from tests.conftest import VllmRunner
from tests.utils import create_new_process_for_each_test
@create_new_process_for_each_test()
@pytest.mark.parametrize("model, max_model_len", [("JackFram/llama-160m", 2048)])
@pytest.mark.parametrize(
"prompt_len, max_tokens",
[
(2047, 1), # prompt_len = max_model_len - 1 -> allowed
(2048, 1), # prompt_len = max_model_len -> not allowed
],
)
def test_decoder_max_context_length_validation(
model: str,
max_model_len: int,
vllm_runner: type[VllmRunner],
prompt_len: int,
max_tokens: int,
) -> None:
"""Check vLLM decoder model input validation for edge cases where
the prompt length is (almost) equal to the max model length."""
prompt_ids = [[43] * prompt_len]
with vllm_runner(
model_name=model,
tokenizer_name=model,
max_model_len=max_model_len,
max_num_seqs=1,
tensor_parallel_size=1,
) as vllm_model:
if prompt_len + max_tokens <= max_model_len:
# Should succeed as constraints are met
vllm_model.generate_greedy(prompt_ids, max_tokens)
else:
# Should raise the ValueError defined in
# vllm/v1/engine/processor.Processor_validate_model_input()
expected_msg = (
f"The decoder prompt (length {prompt_len}) plus the number of "
f"requested output tokens (at least 1) is longer than "
f"the maximum model length of {max_model_len}. "
"Make sure that `max_model_len` is no smaller than the number of "
"text tokens (prompt + requested output tokens)."
)
with pytest.raises(ValueError) as excinfo:
vllm_model.generate_greedy(prompt_ids, max_tokens)
assert expected_msg in str(excinfo.value)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/e2e/test_context_length.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/entrypoints/test_responses_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
from openai.types.chat import ChatCompletionMessageParam
from openai.types.responses.response_function_tool_call import ResponseFunctionToolCall
from openai.types.responses.response_function_tool_call_output_item import (
ResponseFunctionToolCallOutputItem,
)
from openai.types.responses.response_output_message import ResponseOutputMessage
from openai.types.responses.response_output_text import ResponseOutputText
from openai.types.responses.response_reasoning_item import (
Content,
ResponseReasoningItem,
Summary,
)
from vllm.entrypoints.constants import MCP_PREFIX
from vllm.entrypoints.openai.responses.utils import (
_construct_single_message_from_response_item,
_maybe_combine_reasoning_and_tool_call,
construct_chat_messages_with_tool_call,
convert_tool_responses_to_completions_format,
should_continue_final_message,
)
class TestResponsesUtils:
"""Tests for convert_tool_responses_to_completions_format function."""
def test_convert_tool_responses_to_completions_format(self):
"""Test basic conversion of a flat tool schema to nested format."""
input_tool = {
"type": "function",
"name": "get_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {"type": "string"},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
},
"required": ["location", "unit"],
},
}
result = convert_tool_responses_to_completions_format(input_tool)
assert result == {"type": "function", "function": input_tool}
def test_construct_chat_messages_with_tool_call(self):
"""Test construction of chat messages with tool calls."""
reasoning_item = ResponseReasoningItem(
id="lol",
summary=[],
type="reasoning",
content=[
Content(
text="Leroy Jenkins",
type="reasoning_text",
)
],
encrypted_content=None,
status=None,
)
mcp_tool_item = ResponseFunctionToolCall(
id="mcp_123",
call_id="call_123",
type="function_call",
status="completed",
name="python",
arguments='{"code": "123+456"}',
)
input_items = [reasoning_item, mcp_tool_item]
messages = construct_chat_messages_with_tool_call(input_items)
assert len(messages) == 1
message = messages[0]
assert message["role"] == "assistant"
assert message["reasoning"] == "Leroy Jenkins"
assert message["tool_calls"][0]["id"] == "call_123"
assert message["tool_calls"][0]["function"]["name"] == "python"
assert (
message["tool_calls"][0]["function"]["arguments"] == '{"code": "123+456"}'
)
def test_construct_single_message_from_response_item(self):
item = ResponseReasoningItem(
id="lol",
summary=[],
type="reasoning",
content=[
Content(
text="Leroy Jenkins",
type="reasoning_text",
)
],
encrypted_content=None,
status=None,
)
formatted_item = _construct_single_message_from_response_item(item)
assert formatted_item["role"] == "assistant"
assert formatted_item["reasoning"] == "Leroy Jenkins"
item = ResponseReasoningItem(
id="lol",
summary=[
Summary(
text='Hmm, the user has just started with a simple "Hello,"',
type="summary_text",
)
],
type="reasoning",
content=None,
encrypted_content=None,
status=None,
)
formatted_item = _construct_single_message_from_response_item(item)
assert formatted_item["role"] == "assistant"
assert (
formatted_item["reasoning"]
== 'Hmm, the user has just started with a simple "Hello,"'
)
tool_call_output = ResponseFunctionToolCallOutputItem(
id="temp_id",
type="function_call_output",
call_id="temp",
output="1234",
status="completed",
)
formatted_item = _construct_single_message_from_response_item(tool_call_output)
assert formatted_item["role"] == "tool"
assert formatted_item["content"] == "1234"
assert formatted_item["tool_call_id"] == "temp"
item = ResponseReasoningItem(
id="lol",
summary=[],
type="reasoning",
content=None,
encrypted_content="TOP_SECRET_MESSAGE",
status=None,
)
with pytest.raises(ValueError):
_construct_single_message_from_response_item(item)
output_item = ResponseOutputMessage(
id="msg_bf585bbbe3d500e0",
content=[
ResponseOutputText(
annotations=[],
text="dongyi",
type="output_text",
logprobs=None,
)
],
role="assistant",
status="completed",
type="message",
)
formatted_item = _construct_single_message_from_response_item(output_item)
assert formatted_item["role"] == "assistant"
assert formatted_item["content"] == "dongyi"
class TestShouldContinueFinalMessage:
"""Tests for should_continue_final_message function.
This function enables Anthropic-style partial message completion, where
users can provide an incomplete assistant message and have the model
continue from where it left off.
"""
def test_string_input_returns_false(self):
"""String input is always a user message, so should not continue."""
assert should_continue_final_message("Hello, world!") is False
def test_empty_list_returns_false(self):
"""Empty list should not continue."""
assert should_continue_final_message([]) is False
def test_completed_message_returns_false(self):
"""Completed message should not be continued."""
output_item = ResponseOutputMessage(
id="msg_123",
content=[
ResponseOutputText(
annotations=[],
text="The answer is 42.",
type="output_text",
logprobs=None,
)
],
role="assistant",
status="completed",
type="message",
)
assert should_continue_final_message([output_item]) is False
def test_in_progress_message_returns_true(self):
"""In-progress message should be continued.
This is the key use case for partial message completion.
Example: The user provides "The best answer is (" and wants
the model to continue from there.
"""
output_item = ResponseOutputMessage(
id="msg_123",
content=[
ResponseOutputText(
annotations=[],
text="The best answer is (",
type="output_text",
logprobs=None,
)
],
role="assistant",
status="in_progress",
type="message",
)
assert should_continue_final_message([output_item]) is True
def test_incomplete_message_returns_true(self):
"""Incomplete message should be continued."""
output_item = ResponseOutputMessage(
id="msg_123",
content=[
ResponseOutputText(
annotations=[],
text="The answer",
type="output_text",
logprobs=None,
)
],
role="assistant",
status="incomplete",
type="message",
)
assert should_continue_final_message([output_item]) is True
def test_in_progress_reasoning_returns_true(self):
"""In-progress reasoning should be continued."""
reasoning_item = ResponseReasoningItem(
id="reasoning_123",
summary=[],
type="reasoning",
content=[
Content(
text="Let me think about this...",
type="reasoning_text",
)
],
encrypted_content=None,
status="in_progress",
)
assert should_continue_final_message([reasoning_item]) is True
def test_incomplete_reasoning_returns_true(self):
"""Incomplete reasoning should be continued."""
reasoning_item = ResponseReasoningItem(
id="reasoning_123",
summary=[],
type="reasoning",
content=[
Content(
text="Let me think",
type="reasoning_text",
)
],
encrypted_content=None,
status="incomplete",
)
assert should_continue_final_message([reasoning_item]) is True
reasoning_item = {
"id": "reasoning_123",
"summary": [],
"type": "reasoning",
"content": [],
"status": "incomplete",
}
assert should_continue_final_message([reasoning_item]) is True
def test_completed_reasoning_returns_false(self):
"""Completed reasoning should not be continued."""
reasoning_item = ResponseReasoningItem(
id="reasoning_123",
summary=[],
type="reasoning",
content=[
Content(
text="I have thought about this.",
type="reasoning_text",
)
],
encrypted_content=None,
status="completed",
)
assert should_continue_final_message([reasoning_item]) is False
def test_reasoning_with_none_status_returns_false(self):
"""Reasoning with None status should not be continued."""
reasoning_item = ResponseReasoningItem(
id="reasoning_123",
summary=[],
type="reasoning",
content=[
Content(
text="Some reasoning",
type="reasoning_text",
)
],
encrypted_content=None,
status=None,
)
assert should_continue_final_message([reasoning_item]) is False
def test_only_last_item_matters(self):
"""Only the last item in the list determines continuation."""
completed_item = ResponseOutputMessage(
id="msg_1",
content=[
ResponseOutputText(
annotations=[],
text="Complete message.",
type="output_text",
logprobs=None,
)
],
role="assistant",
status="completed",
type="message",
)
in_progress_item = ResponseOutputMessage(
id="msg_2",
content=[
ResponseOutputText(
annotations=[],
text="Partial message...",
type="output_text",
logprobs=None,
)
],
role="assistant",
status="in_progress",
type="message",
)
# In-progress as last item -> should continue
assert should_continue_final_message([completed_item, in_progress_item]) is True
# Completed as last item -> should not continue
assert (
should_continue_final_message([in_progress_item, completed_item]) is False
)
def test_tool_call_returns_false(self):
"""Tool calls should not trigger continuation."""
tool_call = ResponseFunctionToolCall(
id="fc_123",
call_id="call_123",
type="function_call",
status="in_progress",
name="get_weather",
arguments='{"location": "NYC"}',
)
assert should_continue_final_message([tool_call]) is False
tool_call = {
"id": "msg_123",
"call_id": "call_123",
"type": "function_call",
"status": "in_progress",
"name": "get_weather",
"arguments": '{"location": "NYC"}',
}
assert should_continue_final_message([tool_call]) is False
# Tests for dict inputs (e.g., from curl requests)
def test_dict_in_progress_message_returns_true(self):
"""Dict with in_progress status should be continued (curl input)."""
dict_item = {
"id": "msg_123",
"type": "message",
"role": "assistant",
"status": "in_progress",
"content": [{"type": "output_text", "text": "The answer is ("}],
}
assert should_continue_final_message([dict_item]) is True
def test_dict_incomplete_message_returns_true(self):
"""Dict with incomplete status should be continued (curl input)."""
dict_item = {
"id": "msg_123",
"type": "message",
"role": "assistant",
"status": "incomplete",
"content": [{"type": "output_text", "text": "Partial answer"}],
}
assert should_continue_final_message([dict_item]) is True
def test_dict_completed_message_returns_false(self):
"""Dict with completed status should not be continued (curl input)."""
dict_item = {
"id": "msg_123",
"type": "message",
"role": "assistant",
"status": "completed",
"content": [{"type": "output_text", "text": "Complete answer."}],
}
assert should_continue_final_message([dict_item]) is False
def test_dict_reasoning_in_progress_returns_true(self):
"""Dict reasoning item with in_progress status should be continued."""
dict_item = {
"id": "reasoning_123",
"type": "reasoning",
"status": "in_progress",
"content": [{"type": "reasoning_text", "text": "Let me think..."}],
}
assert should_continue_final_message([dict_item]) is True
def test_dict_without_status_returns_false(self):
"""Dict without status field should not be continued."""
dict_item = {
"id": "msg_123",
"type": "message",
"role": "assistant",
"content": [{"type": "output_text", "text": "Some text"}],
}
assert should_continue_final_message([dict_item]) is False
def test_dict_with_none_status_returns_false(self):
"""Dict with None status should not be continued."""
dict_item = {
"id": "msg_123",
"type": "message",
"role": "assistant",
"status": None,
"content": [{"type": "output_text", "text": "Some text"}],
}
assert should_continue_final_message([dict_item]) is False
class TestMaybeCombineReasoningAndToolCall:
"""Tests for _maybe_combine_reasoning_and_tool_call function."""
def test_returns_none_when_item_id_is_none(self):
"""
Test fix from PR #31999: when item.id is None, should return None
instead of raising TypeError on startswith().
"""
item = ResponseFunctionToolCall(
type="function_call",
id=None, # This was causing TypeError before the fix
call_id="call_123",
name="test_function",
arguments="{}",
)
messages: list[ChatCompletionMessageParam] = []
result = _maybe_combine_reasoning_and_tool_call(item, messages)
assert result is None
def test_returns_none_when_id_does_not_start_with_mcp_prefix(self):
"""Test that non-MCP tool calls are not combined."""
item = ResponseFunctionToolCall(
type="function_call",
id="regular_id", # Does not start with MCP_PREFIX
call_id="call_123",
name="test_function",
arguments="{}",
)
messages = [{"role": "assistant", "reasoning": "some reasoning"}]
result = _maybe_combine_reasoning_and_tool_call(item, messages)
assert result is None
def test_returns_none_when_last_message_is_not_assistant(self):
"""Test that non-assistant last message returns None."""
item = ResponseFunctionToolCall(
type="function_call",
id=f"{MCP_PREFIX}tool_id",
call_id="call_123",
name="test_function",
arguments="{}",
)
messages = [{"role": "user", "content": "hello"}]
result = _maybe_combine_reasoning_and_tool_call(item, messages)
assert result is None
def test_returns_none_when_last_message_has_no_reasoning(self):
"""Test that assistant message without reasoning returns None."""
item = ResponseFunctionToolCall(
type="function_call",
id=f"{MCP_PREFIX}tool_id",
call_id="call_123",
name="test_function",
arguments="{}",
)
messages = [{"role": "assistant", "content": "some content"}]
result = _maybe_combine_reasoning_and_tool_call(item, messages)
assert result is None
def test_combines_reasoning_and_mcp_tool_call(self):
"""Test successful combination of reasoning message and MCP tool call."""
item = ResponseFunctionToolCall(
type="function_call",
id=f"{MCP_PREFIX}tool_id",
call_id="call_123",
name="test_function",
arguments='{"arg": "value"}',
)
messages = [{"role": "assistant", "reasoning": "I need to call this tool"}]
result = _maybe_combine_reasoning_and_tool_call(item, messages)
assert result is not None
assert result["role"] == "assistant"
assert result["reasoning"] == "I need to call this tool"
assert "tool_calls" in result
assert len(result["tool_calls"]) == 1
assert result["tool_calls"][0]["id"] == "call_123"
assert result["tool_calls"][0]["function"]["name"] == "test_function"
assert result["tool_calls"][0]["function"]["arguments"] == '{"arg": "value"}'
assert result["tool_calls"][0]["type"] == "function"
def test_returns_none_for_non_function_tool_call_type(self):
"""Test that non-ResponseFunctionToolCall items return None."""
# Pass a dict instead of ResponseFunctionToolCall
item = {"type": "message", "content": "hello"}
messages = [{"role": "assistant", "reasoning": "some reasoning"}]
result = _maybe_combine_reasoning_and_tool_call(item, messages)
assert result is None
def test_returns_none_when_id_is_empty_string(self):
"""Test that empty string id returns None (falsy check)."""
item = ResponseFunctionToolCall(
type="function_call",
id="", # Empty string is falsy
call_id="call_123",
name="test_function",
arguments="{}",
)
messages = [{"role": "assistant", "reasoning": "some reasoning"}]
result = _maybe_combine_reasoning_and_tool_call(item, messages)
assert result is None
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/entrypoints/test_responses_utils.py",
"license": "Apache License 2.0",
"lines": 496,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/v1/engine/test_parallel_sampling.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from vllm import SamplingParams
from vllm.outputs import CompletionOutput
from vllm.sampling_params import RequestOutputKind
from vllm.v1.engine import EngineCoreRequest
from vllm.v1.engine.parallel_sampling import ParentRequest
def test_parent_request_to_output_stream() -> None:
parent_request = ParentRequest(make_request(SamplingParams(n=2)))
parent_request.child_requests = {"child_id_0", "child_id_1"}
output_0 = CompletionOutput(
index=0, text="child 0", token_ids=[], cumulative_logprob=None, logprobs=None
)
output_1 = CompletionOutput(
index=1, text="child 1", token_ids=[], cumulative_logprob=None, logprobs=None
)
# Request not finished
assert ([output_0], False) == parent_request.get_outputs("child_id_0", output_0)
assert ([output_1], False) == parent_request.get_outputs("child_id_1", output_1)
assert ([output_0], False) == parent_request.get_outputs("child_id_0", output_0)
assert ([output_1], False) == parent_request.get_outputs("child_id_1", output_1)
# output_1 finished
output_1.finish_reason = "ended"
assert ([output_0], False) == parent_request.get_outputs("child_id_0", output_0)
assert ([output_1], False) == parent_request.get_outputs("child_id_1", output_1)
# Finished output_1 had already returned, DO NOT returned again
assert ([output_0], False) == parent_request.get_outputs("child_id_0", output_0)
assert parent_request.get_outputs("child_id_1", output_1) == ([], False)
# output_0 finished
output_0.finish_reason = "ended"
assert ([output_0], True) == parent_request.get_outputs("child_id_0", output_0)
assert parent_request.get_outputs("child_id_1", output_1) == ([], True)
# Finished output_0 had already returned, DO NOT returned again
assert parent_request.get_outputs("child_id_0", output_0) == ([], True)
assert parent_request.get_outputs("child_id_1", output_1) == ([], True)
def test_parent_request_to_output_final_only() -> None:
parent_request = ParentRequest(
make_request(SamplingParams(n=2, output_kind=RequestOutputKind.FINAL_ONLY))
)
parent_request.child_requests = {"child_id_0", "child_id_1"}
output_0 = CompletionOutput(
index=0, text="child 0", token_ids=[], cumulative_logprob=None, logprobs=None
)
output_1 = CompletionOutput(
index=1, text="child 1", token_ids=[], cumulative_logprob=None, logprobs=None
)
# Request not finished, return nothing
assert parent_request.get_outputs("child_id_0", output_0) == ([], False)
assert parent_request.get_outputs("child_id_1", output_1) == ([], False)
# output_1 finished, but outputs won't be returned until all child requests finished
output_1.finish_reason = "ended"
assert parent_request.get_outputs("child_id_0", output_0) == ([], False)
assert parent_request.get_outputs("child_id_1", output_1) == ([], False)
# output_0 finished, as all child requests finished, the output would be returned
output_0.finish_reason = "ended"
assert ([output_0, output_1], True) == parent_request.get_outputs(
"child_id_0", output_0
)
assert ([output_0, output_1], True) == parent_request.get_outputs(
"child_id_1", output_1
)
def make_request(sampling_params: SamplingParams) -> EngineCoreRequest:
return EngineCoreRequest(
request_id="parent_id",
external_req_id="ext_parent_id",
prompt_token_ids=None,
mm_features=None,
sampling_params=sampling_params,
pooling_params=None,
arrival_time=0.0,
lora_request=None,
cache_salt=None,
data_parallel_rank=None,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/engine/test_parallel_sampling.py",
"license": "Apache License 2.0",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/v1/core/test_priority_scheduler_random.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import random
import uuid
import pytest
from vllm.config import VllmConfig
from vllm.multimodal.inputs import (
MultiModalFeatureSpec,
MultiModalKwargsItem,
PlaceholderRange,
)
from vllm.sampling_params import SamplingParams
from vllm.utils.hashing import get_hash_fn_by_name
from vllm.v1.core.kv_cache_utils import get_request_block_hasher, init_none_hash
from vllm.v1.core.sched.output import SchedulerOutput
from vllm.v1.outputs import DraftTokenIds, ModelRunnerOutput
from vllm.v1.request import Request
from .test_scheduler import create_scheduler_with_priority
from .utils import EOS_TOKEN_ID
pytestmark = pytest.mark.cpu_test
def _create_random_request(
max_tokens_range: tuple[int, int],
num_tokens_range: tuple[int, int],
arrival_time_range: tuple[float, float],
priority_range: tuple[int, int],
num_mm_item_range: tuple[int, int],
vllm_config: VllmConfig,
):
max_tokens = random.randint(*max_tokens_range)
num_tokens = random.randint(*num_tokens_range)
priority = random.randint(*priority_range)
arrival_time = random.uniform(*arrival_time_range)
num_mm_item = random.randint(*num_mm_item_range)
mm_positions: list[PlaceholderRange] = []
for mm_start in sorted(
random.sample(range(num_tokens), min(num_mm_item, num_tokens))
):
if mm_start + 10 > num_tokens:
continue
mm_positions.append(PlaceholderRange(offset=mm_start, length=10))
request_id = uuid.uuid4().hex
sampling_params = SamplingParams(ignore_eos=False, max_tokens=max_tokens)
sampling_params.update_from_generation_config({}, EOS_TOKEN_ID)
mm_features = []
for j, position in enumerate(mm_positions):
identifier = f"{request_id}_hash_{j}"
mm_feature = MultiModalFeatureSpec(
data=MultiModalKwargsItem.dummy(),
mm_position=position,
identifier=identifier,
modality="image",
)
mm_features.append(mm_feature)
prompt_token_ids = random.choices(range(100), k=num_tokens)
caching_hash_fn = get_hash_fn_by_name(
vllm_config.cache_config.prefix_caching_hash_algo
)
init_none_hash(caching_hash_fn)
block_hasher = get_request_block_hasher(
vllm_config.cache_config.block_size, caching_hash_fn
)
request = Request(
request_id=request_id,
prompt_token_ids=prompt_token_ids,
sampling_params=sampling_params,
pooling_params=None,
mm_features=mm_features if mm_features else None,
arrival_time=arrival_time,
priority=priority,
block_hasher=block_hasher,
)
return request
def _mock_execute_model(
scheduler_output: SchedulerOutput, num_output_tokens_range: tuple[int, int]
) -> ModelRunnerOutput:
request_ids: list[str] = []
request_ids.extend(req.req_id for req in scheduler_output.scheduled_new_reqs)
request_ids.extend(scheduler_output.scheduled_cached_reqs.req_ids)
random.shuffle(request_ids)
num_output_tokens = [
random.randint(*num_output_tokens_range) for _ in range(len(request_ids))
]
sampled_token_ids = [
[random.randint(0, 100) for _ in range(num_tokens)]
for num_tokens in num_output_tokens
]
return ModelRunnerOutput(
req_ids=request_ids,
req_id_to_index={req_id: i for i, req_id in enumerate(request_ids)},
sampled_token_ids=sampled_token_ids,
logprobs=None,
prompt_logprobs_dict={},
pooler_output=[],
)
def _mock_draft_token_ids(
scheduler_output: SchedulerOutput,
num_output_tokens_range: tuple[int, int],
seen_request_prompt_length: dict[str, int],
) -> DraftTokenIds:
request_ids: list[str] = []
sampled_token_ids: list[list[int]] = []
for request in scheduler_output.scheduled_new_reqs:
assert request.req_id not in seen_request_prompt_length
seen_request_prompt_length[request.req_id] = len(request.prompt_token_ids or [])
if request.num_computed_tokens >= seen_request_prompt_length[request.req_id]:
num_tokens = random.randint(*num_output_tokens_range)
request_ids.append(request.req_id)
sampled_token_ids.append(
[random.randint(0, 100) for _ in range(num_tokens)]
)
for req_id, num_computed_tokens in zip(
scheduler_output.scheduled_cached_reqs.req_ids,
scheduler_output.scheduled_cached_reqs.num_computed_tokens,
):
if num_computed_tokens >= seen_request_prompt_length[req_id]:
num_tokens = random.randint(*num_output_tokens_range)
request_ids.append(req_id)
sampled_token_ids.append(
[random.randint(0, 100) for _ in range(num_tokens)]
)
return DraftTokenIds(req_ids=request_ids, draft_token_ids=sampled_token_ids)
def _chech_valid_scheduler_output(
scheduler_output: SchedulerOutput,
seen_request_ids: set[str],
seen_mm_hashes: set[str],
):
for req in scheduler_output.scheduled_new_reqs:
assert req.req_id not in seen_request_ids
seen_request_ids.add(req.req_id)
for req_id in scheduler_output.scheduled_cached_reqs.req_ids:
assert req_id in seen_request_ids
req_ids = set[str]()
req_ids.update(req.req_id for req in scheduler_output.scheduled_new_reqs)
req_ids.update(scheduler_output.scheduled_cached_reqs.req_ids)
assert set(scheduler_output.num_scheduled_tokens.keys()) == req_ids
assert (
sum(scheduler_output.num_scheduled_tokens.values())
== scheduler_output.total_num_scheduled_tokens
)
assert set(scheduler_output.scheduled_spec_decode_tokens.keys()) <= req_ids
assert set(scheduler_output.scheduled_encoder_inputs.keys()) <= req_ids
for req in scheduler_output.scheduled_new_reqs:
for mm_feature in req.mm_features:
seen_mm_hashes.add(mm_feature.identifier)
for mm_hash in scheduler_output.free_encoder_mm_hashes:
assert mm_hash in seen_mm_hashes
assert scheduler_output.finished_req_ids <= seen_request_ids
@pytest.mark.parametrize("enable_prefix_caching", [True, False])
@pytest.mark.parametrize("num_speculative_tokens", [None, 1, 5])
@pytest.mark.parametrize(
("max_input_tokens", "max_output_tokens", "max_num_seqs", "num_blocks"),
[
# Standard profile
(5000, 500, 256, 10000),
# Generation heavy + high max_num_seqs + low num_blocks -> Many preemptions
(500, 5000, 1024, 1000),
],
ids=["standard", "preemption"],
)
def test_priority_scheduling_blast(
enable_prefix_caching: bool,
num_speculative_tokens: int | None,
max_input_tokens: int,
max_output_tokens: int,
max_num_seqs: int,
num_blocks: int,
):
random.seed(42)
seen_request_prompt_length = dict[str, int]()
seen_request_ids = set[str]()
seen_mm_hashes = set[str]()
scheduler = create_scheduler_with_priority(
model="Qwen/Qwen2.5-VL-3B-Instruct",
max_num_seqs=max_num_seqs,
enable_prefix_caching=enable_prefix_caching,
num_blocks=num_blocks,
num_speculative_tokens=num_speculative_tokens,
)
num_initial_requests = 10
for _ in range(num_initial_requests):
req = _create_random_request(
max_tokens_range=(1, max_output_tokens),
num_tokens_range=(1, max_input_tokens),
arrival_time_range=(0, 1),
priority_range=(-3, 3),
num_mm_item_range=(0, 2),
vllm_config=scheduler.vllm_config,
)
scheduler.add_request(req)
num_initial_requests = 2
for _ in range(num_initial_requests):
req = _create_random_request(
max_tokens_range=(1, max_output_tokens),
num_tokens_range=(1, max_input_tokens),
arrival_time_range=(0, 0),
priority_range=(4, 4),
num_mm_item_range=(0, 2),
vllm_config=scheduler.vllm_config,
)
scheduler.add_request(req)
for _ in range(20000):
if len(scheduler.waiting) == 0:
num_new_requests = random.randint(0, 2)
for _ in range(num_new_requests):
req = _create_random_request(
max_tokens_range=(1, max_output_tokens),
num_tokens_range=(1, max_input_tokens),
arrival_time_range=(0, 1),
priority_range=(-3, 3),
num_mm_item_range=(0, 2),
vllm_config=scheduler.vllm_config,
)
scheduler.add_request(req)
scheduler_output = scheduler.schedule()
_chech_valid_scheduler_output(
scheduler_output, seen_request_ids, seen_mm_hashes
)
model_output = _mock_execute_model(
scheduler_output,
num_output_tokens_range=(1, 1 + (num_speculative_tokens or 0)),
)
scheduler.update_from_output(scheduler_output, model_output)
if num_speculative_tokens is not None:
scheduler.update_draft_token_ids(
_mock_draft_token_ids(
scheduler_output,
(0, num_speculative_tokens),
seen_request_prompt_length,
)
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/core/test_priority_scheduler_random.py",
"license": "Apache License 2.0",
"lines": 230,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/multi_process_adapter.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import os
from collections.abc import Iterable
from dataclasses import dataclass
from itertools import islice
from typing import Any
import torch
import zmq
from lmcache.utils import _lmcache_nvtx_annotate, init_logger
from lmcache.v1.multiprocess.custom_types import (
CudaIPCWrapper,
IPCCacheEngineKey,
KVCache,
)
from lmcache.v1.multiprocess.mq import MessageQueueClient, MessagingFuture
from lmcache.v1.multiprocess.protocol import RequestType, get_response_class
logger = init_logger(__name__)
def wrap_kv_caches(kv_caches: dict[str, torch.Tensor]) -> KVCache:
logger.info("KV caches keys are %s", list(kv_caches.keys()))
return [CudaIPCWrapper(tensor) for tensor in kv_caches.values()]
def striding_block_hashes(
block_hashes: list[bytes], blocks_in_chunk: int
) -> Iterable[bytes]:
"""Extract chunk-level hashes from block hashes by striding.
In hash-based vLLM, each vLLM block has its own hash. LMCache chunks
span ``blocks_in_chunk`` consecutive blocks. The representative hash
for a chunk is the hash of the **last** block in that chunk (because
each block hash already encodes its prefix). So we start at index
``blocks_in_chunk - 1`` and stride by ``blocks_in_chunk``.
"""
return islice(block_hashes, blocks_in_chunk - 1, None, blocks_in_chunk)
def send_lmcache_request(
mq_client: MessageQueueClient,
request_type: RequestType,
payloads: list[Any],
) -> MessagingFuture[Any]:
"""
Helper function to send the request to the LMCache multiprocess server
Args:
mq_client: The LMCache multiprocess mode message queue client
request_type: The request type
payloads: The request payloads
Returns:
A messaging future for the request
"""
future = mq_client.submit_request(
request_type, payloads, get_response_class(request_type)
)
return future
def get_lmcache_chunk_size(
mq_client: MessageQueueClient,
) -> int:
"""
Helper function to get the LMCache chunk size from the server
Args:
mq_client: The LMCache multiprocess mode message queue client
Returns:
An integer representing the LMCache chunk size
"""
future = send_lmcache_request(mq_client, RequestType.GET_CHUNK_SIZE, [])
chunk_size = future.result()
return chunk_size
@dataclass
class LoadStoreOp:
block_ids: list[int]
"""Block ids for the load/store operation"""
token_ids: list[int] | None = None
"""Token IDs for the load/store operation (token mode)"""
block_hashes: list[bytes] | None = None
"""Block hashes for the load/store operation (hash mode)"""
start: int = 0
"""Start token index (token mode only)"""
end: int = 0
"""End token index (token mode only)"""
def __len__(self) -> int:
return len(self.block_ids)
StoreResult = bool
RetrieveResult = list[bool]
LookupResult = int
class LMCacheMPSchedulerAdapter:
def __init__(
self,
server_url: str,
context: zmq.Context,
model_name: str,
world_size: int,
kv_rank: int,
vllm_block_size: int,
):
"""
Args:
server_url: The server URL for the LMCache message queue
context: The ZMQ context
model_name: The model name used for LMCache keys
world_size: The world size used for LMCache keys
kv_rank: The kv rank used for LMCache keys
vllm_block_size: The block size used in vLLM
"""
self.mq_client = MessageQueueClient(server_url, context)
# Request futures
self.lookup_futures: dict[str, MessagingFuture[LookupResult]] = {}
self.model_name = model_name
self.world_size = world_size
self.worker_id = kv_rank
# Read chunk size from lmcache
self.chunk_size = get_lmcache_chunk_size(self.mq_client)
assert self.chunk_size % vllm_block_size == 0, (
"LMCache chunk size should be a multiple of vLLM block size"
)
self.blocks_in_chunk = self.chunk_size // vllm_block_size
@_lmcache_nvtx_annotate
def maybe_submit_lookup_request(
self,
request_id: str,
block_hashes: list[bytes] | None = None,
token_ids: list[int] | None = None,
) -> None:
"""
Submit a new lookup request to LMCache if there is no ongoing request.
Supports both token-based and hash-based vLLM:
- token_ids: token IDs (token-based vLLM) -> single token-mode key
- block_hashes: block hashes (hash-based vLLM) -> strided hash-mode keys
Exactly one of block_hashes or token_ids must be provided.
Args:
request_id: The ID of the lookup request. The same ID indicates it's
from the same request
block_hashes: Block hashes to lookup from LMCache (hash mode)
token_ids: Token IDs to lookup from LMCache (token mode)
Returns:
None
Notes:
This function will have a side-effect: submitting a look up request to
LMCache, which will essentially 'lock' the KV cache chunks in the LMCache
for later retrieve operations.
In the meantime, this function will record the lookup request, and the
status of the look up request can be checked by `check_lookup_result`.
"""
if request_id in self.lookup_futures:
# Skip if there is already a lookup request
return
assert (block_hashes is None) != (token_ids is None), (
"Exactly one of block_hashes or token_ids must be provided"
)
if block_hashes is not None:
# Hash mode: stride block hashes -> N hash-mode keys
chunk_hashes = list(
striding_block_hashes(block_hashes, self.blocks_in_chunk)
)
keys = [
self._create_hash_key(ch, request_id=request_id) for ch in chunk_hashes
]
else:
# Token mode: truncate to chunk-aligned length
assert token_ids is not None
aligned_end = (len(token_ids) // self.chunk_size) * self.chunk_size
if aligned_end == 0:
return
keys = [
self._create_key(
token_ids,
start=0,
end=aligned_end,
request_id=request_id,
).no_worker_id_version()
]
future = send_lmcache_request(
self.mq_client,
RequestType.LOOKUP,
[keys],
)
self.lookup_futures[request_id] = future
@_lmcache_nvtx_annotate
def check_lookup_result(self, request_id: str) -> int | None:
"""
Check the result of a previously submitted lookup request.
Args:
request_id: The ID of the lookup request submitted in
`maybe_submit_lookup_request`
Returns:
An integer representing the total number of tokens matched
in LMCache (prefix matching), or
None if the lookup request is not finished yet.
"""
assert request_id in self.lookup_futures, (
f"Lookup request for request_id={request_id} has not been submitted"
)
future = self.lookup_futures[request_id]
if not future.query():
return None
result = future.result()
num_chunks = result
return num_chunks * self.chunk_size
def num_blocks_per_chunk(self) -> int:
"""
Returns:
The number of vllm blocks in a LMCache data chunk
"""
return self.blocks_in_chunk
def cleanup_lookup_result(self, request_id: str) -> None:
"""
Clean up lookup future for a finished request to prevent memory leak.
Args:
request_id: The ID of the finished request.
"""
self.lookup_futures.pop(request_id, None)
def end_session(self, request_id: str) -> None:
"""
Notify LMCache server to remove the session for a finished request.
Args:
request_id: The ID of the finished request.
"""
send_lmcache_request(
self.mq_client,
RequestType.END_SESSION,
[request_id],
)
# Helper functions
def _create_key(
self,
token_ids: list[int],
start: int = 0,
end: int = 0,
request_id: str | None = None,
) -> IPCCacheEngineKey:
"""Convert token IDs to an IPC cache engine key"""
return IPCCacheEngineKey(
model_name=self.model_name,
world_size=self.world_size,
worker_id=self.worker_id,
token_ids=tuple(token_ids),
start=start,
end=end,
request_id=request_id,
)
def _create_hash_key(
self, chunk_hash: bytes, request_id: str | None = None
) -> IPCCacheEngineKey:
"""Create a hash-mode IPC cache engine key"""
return IPCCacheEngineKey(
model_name=self.model_name,
world_size=self.world_size,
worker_id=None,
chunk_hash=chunk_hash,
request_id=request_id,
)
class LMCacheMPWorkerAdapter:
def __init__(
self,
server_url: str,
context: zmq.Context,
model_name: str,
world_size: int,
kv_rank: int,
vllm_block_size: int,
):
self.mq_client = MessageQueueClient(server_url, context)
# Instance id for GPU worker
self.instance_id = os.getpid()
# Registered kv caches from vLLM
self.kv_caches: dict[str, torch.Tensor] = {}
# Request futures
# request_id -> (future, other merged requests)
self.store_futures: dict[
str, tuple[MessagingFuture[StoreResult], list[str]]
] = {}
self.retrieve_futures: dict[
str, tuple[MessagingFuture[RetrieveResult], list[str]]
] = {}
# The store requests that have finished execution in LMCache
self.finished_stores: set[str] = set()
# The finished request ids that are passed via vLLM and also
# have corresponding store requests submitted to LMCache before
self.previously_finished: set[str] = set()
self.model_name = model_name
self.world_size = world_size
self.worker_id = kv_rank
# Read chunk size from lmcache
chunk_size = get_lmcache_chunk_size(self.mq_client)
assert chunk_size % vllm_block_size == 0, (
"LMCache chunk size should be a multiple of vLLM block size"
)
self.blocks_in_chunk = chunk_size // vllm_block_size
def register_kv_caches(self, kv_caches: dict[str, torch.Tensor]):
"""
Register the kv caches with LMCache server
Args:
kv_caches: A dict of kv caches to register. The keys are the
layer names and the values are the corresponding tensors.
"""
# Register kv cache and send the request
self.kv_caches = kv_caches
logger.info("Registering kv caches")
future = send_lmcache_request(
self.mq_client,
RequestType.REGISTER_KV_CACHE,
[self.instance_id, wrap_kv_caches(kv_caches)],
)
future.result()
@_lmcache_nvtx_annotate
def submit_store_request(
self, request_id: str, op: LoadStoreOp, event: torch.cuda.Event
):
"""
Submit a KV cache store request to LMCache
Args:
request_id: The ID of the request
op: The LoadStoreOp describing the store operation.
event: The CUDA event that is recorded after the current
model inference step
"""
if op.block_hashes is not None:
# Hash mode
chunk_hashes = list(
striding_block_hashes(op.block_hashes, self.blocks_in_chunk)
)
keys = [
self._create_hash_key(ch, request_id=request_id) for ch in chunk_hashes
]
else:
# Token mode
assert op.token_ids is not None
keys = [
self._create_key(op.token_ids, op.start, op.end, request_id=request_id)
]
future = send_lmcache_request(
self.mq_client,
RequestType.STORE,
[keys, self.instance_id, op.block_ids, event.ipc_handle()],
).to_cuda_future()
self.store_futures[request_id] = (future, [])
@_lmcache_nvtx_annotate
def submit_retrieve_request(
self, request_id: str, op: LoadStoreOp, event: torch.cuda.Event
):
"""
Submit a KV cache retrieve request to LMCache
Args:
request_id: The ID of the request
op: The LoadStoreOp describing the retrieve operation.
event: The CUDA event that is recorded after the current
model inference step
"""
if op.block_hashes is not None:
# Hash mode
chunk_hashes = list(
striding_block_hashes(op.block_hashes, self.blocks_in_chunk)
)
keys = [
self._create_hash_key(ch, request_id=request_id) for ch in chunk_hashes
]
else:
# Token mode
assert op.token_ids is not None
keys = [
self._create_key(op.token_ids, op.start, op.end, request_id=request_id)
]
future = send_lmcache_request(
self.mq_client,
RequestType.RETRIEVE,
[keys, self.instance_id, op.block_ids, event.ipc_handle()],
).to_cuda_future()
self.retrieve_futures[request_id] = (future, [])
@_lmcache_nvtx_annotate
def batched_submit_store_requests(
self,
request_ids: list[str],
ops: list[LoadStoreOp],
event: torch.cuda.Event,
):
"""
Submit a batched store request to LMCache
Args:
request_ids: The IDs of the requests
ops: The LoadStoreOps describing the store operations. Should have
the same length as request_ids
event: The CUDA event that is recorded after the current
model inference step
"""
all_keys: list[IPCCacheEngineKey] = []
block_ids: list[int] = []
for request_id, op in zip(request_ids, ops, strict=False):
if op.block_hashes is not None:
chunk_hashes = list(
striding_block_hashes(op.block_hashes, self.blocks_in_chunk)
)
keys = [
self._create_hash_key(ch, request_id=request_id)
for ch in chunk_hashes
]
all_keys.extend(keys)
else:
assert op.token_ids is not None
all_keys.append(
self._create_key(
op.token_ids, op.start, op.end, request_id=request_id
)
)
block_ids.extend(op.block_ids)
future = send_lmcache_request(
self.mq_client,
RequestType.STORE,
[
all_keys,
self.instance_id,
block_ids,
event.ipc_handle(),
],
).to_cuda_future()
self.store_futures[request_ids[0]] = (future, list(request_ids[1:]))
@_lmcache_nvtx_annotate
def batched_submit_retrieve_requests(
self,
request_ids: list[str],
ops: list[LoadStoreOp],
event: torch.cuda.Event,
):
"""
Submit a batched retrieve request to LMCache
Args:
request_ids: The IDs of the requests
ops: The LoadStoreOps describing the retrieve operations. Should have
the same length as request_ids
event: The CUDA event that is recorded after the current
model inference step
"""
all_keys: list[IPCCacheEngineKey] = []
block_ids: list[int] = []
for request_id, op in zip(request_ids, ops, strict=False):
if op.block_hashes is not None:
chunk_hashes = list(
striding_block_hashes(op.block_hashes, self.blocks_in_chunk)
)
keys = [
self._create_hash_key(ch, request_id=request_id)
for ch in chunk_hashes
]
all_keys.extend(keys)
else:
assert op.token_ids is not None
all_keys.append(
self._create_key(
op.token_ids, op.start, op.end, request_id=request_id
)
)
block_ids.extend(op.block_ids)
future = send_lmcache_request(
self.mq_client,
RequestType.RETRIEVE,
[
all_keys,
self.instance_id,
block_ids,
event.ipc_handle(),
],
).to_cuda_future()
self.retrieve_futures[request_ids[0]] = (future, list(request_ids[1:]))
@_lmcache_nvtx_annotate
def get_finished(
self, finished_req_ids_from_engine: set[str]
) -> tuple[set[str] | None, set[str] | None]:
"""
Check and get the finished store and retrieve requests.
Args:
finished_req_ids_from_engine: the set of request ids that are
reported as finished from the vLLM engine side.
Returns:
A tuple of two sets:
- The first set contains the finished store request ids. The returned
store request ids MUST be seen before in the
`finished_req_ids_from_engine`.
- The second set contains the finished retrieve request ids.
Notes:
When enabling async scheduling in vLLM, the same request ID may appear
multiple times in `finished_req_ids_from_engine`. The adapter should
take care of deduplicating the request IDs and only return the request
IDs that have not been returned before.
"""
finished_stores = set()
finished_retrieves = set()
for request_id, (s_future, other_reqs) in self.store_futures.items():
if not s_future.query():
continue
s_result = s_future.result()
finished_stores.add(request_id)
finished_stores.update(other_reqs)
if not s_result:
# TODO: add error handling here
logger.error(
"Something went wrong when processing the "
"store request for request_id=%s",
request_id,
)
for request_id, (r_future, other_reqs) in self.retrieve_futures.items():
if not r_future.query():
continue
r_result = r_future.result()
finished_retrieves.add(request_id)
finished_retrieves.update(other_reqs)
if not all(r_result):
# TODO: add error handing here
logger.error(
"Something went wrong when processing the "
"retrieve request for request_id=%s, result=%s",
request_id,
r_result,
)
# Remove the finished requests from the tracking dicts
for request_id in finished_stores:
self.store_futures.pop(request_id, None)
for request_id in finished_retrieves:
self.retrieve_futures.pop(request_id, None)
# Update the internal states
self.finished_stores.update(finished_stores)
ret_stores = set()
for req_id in finished_req_ids_from_engine:
if req_id in self.finished_stores or req_id in self.store_futures:
self.previously_finished.add(req_id)
else:
ret_stores.add(req_id)
# Calculate the final finished stores
ret_stores.update(self._update_and_get_finished_store())
return ret_stores, finished_retrieves
def num_blocks_per_chunk(self) -> int:
"""
Returns:
The number of vllm blocks in a LMCache data chunk
"""
return self.blocks_in_chunk
def shutdown(self):
"""
Shutdown the LMCache MP worker adapter
"""
logger.info("Unregistering kv caches")
send_lmcache_request(
self.mq_client, RequestType.UNREGISTER_KV_CACHE, [self.instance_id]
).result()
self.mq_client.close()
# Helper functions
def _update_and_get_finished_store(
self,
) -> set[str]:
"""Converge the internal states about finished stores
and returns the 'safe finished store request ids' back
"""
safe_finished_s = self.finished_stores.intersection(self.previously_finished)
self.finished_stores.difference_update(self.previously_finished)
self.previously_finished.difference_update(safe_finished_s)
return safe_finished_s
def _create_key(
self,
token_ids: list[int],
start: int = 0,
end: int = 0,
request_id: str | None = None,
) -> IPCCacheEngineKey:
"""Convert token IDs to an IPC cache engine key"""
return IPCCacheEngineKey(
model_name=self.model_name,
world_size=self.world_size,
worker_id=self.worker_id,
token_ids=tuple(token_ids),
start=start,
end=end,
request_id=request_id,
)
def _create_hash_key(
self, chunk_hash: bytes, request_id: str | None = None
) -> IPCCacheEngineKey:
"""Create a hash-mode IPC cache engine key"""
return IPCCacheEngineKey(
model_name=self.model_name,
world_size=self.world_size,
worker_id=self.worker_id,
chunk_hash=chunk_hash,
request_id=request_id,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/multi_process_adapter.py",
"license": "Apache License 2.0",
"lines": 577,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/distributed/kv_transfer/kv_connector/v1/lmcache_mp_connector.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import enum
from collections.abc import Iterable
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, Literal
import torch
import zmq
from lmcache.integration.vllm.utils import mla_enabled
from lmcache.utils import init_logger as lmcache_init_logger
from vllm.config import VllmConfig
from vllm.distributed.kv_transfer.kv_connector.v1.base import (
KVConnectorBase_V1,
KVConnectorMetadata,
KVConnectorRole,
)
from vllm.v1.attention.backend import AttentionMetadata
from vllm.v1.core.sched.output import SchedulerOutput
from vllm.v1.outputs import KVConnectorOutput
from vllm.v1.request import RequestStatus
from vllm.v1.utils import ConstantList
try:
from lmcache.integration.vllm.vllm_multi_process_adapter import (
LMCacheMPSchedulerAdapter,
LMCacheMPWorkerAdapter,
LoadStoreOp,
)
except ImportError:
from vllm.distributed.kv_transfer.kv_connector.v1.lmcache_integration import (
LMCacheMPSchedulerAdapter,
LMCacheMPWorkerAdapter,
LoadStoreOp,
)
if TYPE_CHECKING:
from vllm.distributed.kv_events import KVCacheEvent
from vllm.distributed.kv_transfer.kv_connector.v1.metrics import (
KVConnectorPromMetrics,
KVConnectorStats,
PromMetric,
PromMetricT,
)
from vllm.forward_context import ForwardContext
from vllm.v1.core.kv_cache_manager import KVCacheBlocks
from vllm.v1.core.kv_cache_utils import BlockHash
from vllm.v1.kv_cache_interface import KVCacheConfig
from vllm.v1.request import Request
logger = lmcache_init_logger(__name__)
# Helper functions
def reformat_block_ids(block_ids: tuple[list[int], ...] | None) -> list[int]:
if block_ids is None:
return []
assert isinstance(block_ids, tuple), (
f"Expected block_ids to be a tuple of lists, but got {type(block_ids)}"
)
if len(block_ids) > 1:
raise RuntimeError(
"LMCacheMPConnector only works without hybrid kv cache manager. "
"Please pass --disable-hybrid-kv-cache-manager when starting vllm"
)
return block_ids[0]
def extract_world_size_and_kv_rank(
world_size: int,
rank: int,
vllm_config: VllmConfig,
) -> tuple[int, int]:
"""
Convert the rank for the MLA.
"""
use_mla = mla_enabled(vllm_config.model_config)
if not use_mla:
return world_size, rank
else:
# Tensor parallel does not change the KV caches for MLA models.
# So we need to "exclude" the effect of TP on rank and world size
tp_size = vllm_config.parallel_config.tensor_parallel_size
# vLLM constructs TP groups first, and then construct other
# parallel groups on top of TP groups.
# for example, TP=4, PP=2,
# TP group: [0, 1, 2, 3], [4, 5, 6, 7]
# PP group: [0, 4], [1, 5], [2, 6], [3, 7]
# So we can "exclude" the effect of TP by rank // tp_size.
return world_size // tp_size, rank // tp_size
def create_scheduler_adapter(
server_url: str, zmq_context: zmq.Context, vllm_config: VllmConfig
) -> LMCacheMPSchedulerAdapter:
world_size, kv_rank = extract_world_size_and_kv_rank(
vllm_config.parallel_config.world_size,
vllm_config.parallel_config.rank,
vllm_config,
)
return LMCacheMPSchedulerAdapter(
server_url,
zmq_context,
vllm_config.model_config.model,
world_size,
kv_rank,
vllm_config.cache_config.block_size,
)
def create_worker_adapter(
server_url: str, zmq_context: zmq.Context, vllm_config: VllmConfig
) -> LMCacheMPWorkerAdapter:
world_size, kv_rank = extract_world_size_and_kv_rank(
vllm_config.parallel_config.world_size,
vllm_config.parallel_config.rank,
vllm_config,
)
return LMCacheMPWorkerAdapter(
server_url,
zmq_context,
vllm_config.model_config.model,
world_size,
kv_rank,
vllm_config.cache_config.block_size,
)
class LMCacheMPRequestState(enum.Enum):
"""
State machine:
PREFETCHING -- update_state_after_alloc --> WAITING_FOR_LOAD
WAITING_FOR_LOAD -- process_loading_requests --> READY
"""
PREFETCHING = enum.auto()
WAITING_FOR_LOAD = enum.auto()
READY = enum.auto()
@dataclass
class LMCacheMPRequestTracker:
# NOTE: this class used vLLM data structures, should be part of
# vLLM integration code
request_id: str
# Read-only lists to track the token ids and block hashes
all_token_ids: ConstantList[int]
block_hashes: ConstantList["BlockHash"]
# Block ids and hashes will be updated at update_states_after_alloc and
# during the generation
allocated_block_ids: list[int] = field(default_factory=list)
# Number of scheduled tokens in this request. We keep tracking this to
# avoid saving half-full blocks.
num_scheduled_tokens: int = 0
# Number of blocks stored will be initialized when lookup the external
# hit tokens and will be updated when processing new requests and cached
# requests.
num_stored_blocks: int = 0
# Staging load operation -- save vllm and lmcache hit tokens during lookup
num_vllm_hit_blocks: int = 0
num_lmcache_hit_blocks: int = 0
# Main state
state: LMCacheMPRequestState = LMCacheMPRequestState.PREFETCHING
def __init__(self, request: "Request"):
self.request_id = request.request_id
self.all_token_ids = request.all_token_ids
self.block_hashes = ConstantList(request.block_hashes)
self.allocated_block_ids = []
self.num_stored_blocks = 0
self.num_vllm_hit_blocks = 0
self.num_lmcache_hit_blocks = 0
self.state = LMCacheMPRequestState.PREFETCHING
####
# Check the state of the request
####
def needs_retrieve(self) -> bool:
"""Check whether the current request needs retrieve, will be used
update_stage_after_alloc"""
return (
self.num_lmcache_hit_blocks > self.num_vllm_hit_blocks
and self.state != LMCacheMPRequestState.READY
)
def is_ready_for_retrieving(self) -> bool:
"""Check whether the current request is ready for retrieving,
will be used in process_loading_requests"""
return (
self.state == LMCacheMPRequestState.WAITING_FOR_LOAD
and self.needs_retrieve()
)
####
# Update internal states
####
def increase_num_scheduled_tokens(self, num_new_tokens: int):
self.num_scheduled_tokens += num_new_tokens
def increase_num_stored_blocks(self, num_new_blocks: int):
"""Increase the number of stored blocks for the current request
This function will be called when processing the cached requests.
"""
self.num_stored_blocks += num_new_blocks
def append_block_ids(
self,
new_block_ids: list[int],
):
"""Update the block ids for the current request
This function will be called when processing the cached requests.
"""
self.allocated_block_ids.extend(new_block_ids)
####
# For debugging
####
def __repr__(self) -> str:
return (
f"LMCacheMPRequestTracker(request_id={self.request_id}, "
f"num_tokens={len(self.all_token_ids)}, "
f"num_block_hashes={len(self.block_hashes)}, "
f"num_allocated_blocks={len(self.allocated_block_ids)}, "
f"num_stored_blocks={self.num_stored_blocks}, "
f"vllm_hit_blocks={self.num_vllm_hit_blocks}, "
f"lmcache_hit_blocks={self.num_lmcache_hit_blocks}, "
f"state={self.state})"
)
def __str__(self) -> str:
return self.__repr__()
@dataclass
class LMCacheMPRequestMetadata:
request_id: str
direction: Literal["STORE", "RETRIEVE"]
op: LoadStoreOp
@staticmethod
def GetStoreMetadata(
tracker: LMCacheMPRequestTracker,
blocks_in_chunk: int,
vllm_block_size: int,
) -> "LMCacheMPRequestMetadata | None":
"""
Generate the store metadata for the current request tracker.
Args:
tracker: The request tracker to generate the metadata from.
blocks_in_chunk: the number of blocks in a LMCache data chunk
vllm_block_size: the block size used in vLLM
"""
# Store the blocks that has block hashes
# NOTE: the invariant here is that `num_stored_blocks` should
# always be a multiple of `blocks_in_chunk`
# TODO: This should be checked everytime we update the num_stored_blocks
min_available_blocks = min(
len(tracker.block_hashes),
len(tracker.allocated_block_ids),
tracker.num_scheduled_tokens // vllm_block_size,
)
num_staging_blocks = min_available_blocks - tracker.num_stored_blocks
num_chunks = num_staging_blocks // blocks_in_chunk
if num_chunks >= 1:
start = tracker.num_stored_blocks
end = start + num_chunks * blocks_in_chunk
block_ids = tracker.allocated_block_ids[start:end]
start_token_idx = start * vllm_block_size
end_token_idx = end * vllm_block_size
token_ids = list(tracker.all_token_ids)
op = LoadStoreOp(
token_ids=token_ids,
block_ids=block_ids,
start=start_token_idx,
end=end_token_idx,
)
ret = LMCacheMPRequestMetadata(
request_id=tracker.request_id,
direction="STORE",
op=op,
)
# Update the request tracker
tracker.increase_num_stored_blocks(end - start)
return ret
return None
@staticmethod
def GetRetrieveMetadata(
tracker: LMCacheMPRequestTracker,
blocks_in_chunk: int,
vllm_block_size: int,
) -> "LMCacheMPRequestMetadata | None":
"""
Generate the retrieve metadata for the current request tracker.
Args:
tracker: The request tracker to generate the metadata from.
blocks_in_chunk: the number of blocks in a LMCache data chunk
vllm_block_size: the block size used in vLLM
"""
if not tracker.is_ready_for_retrieving():
return None
# |---------------------|-----------------|----------------|
# | num_vllm_hit_blocks |
# | lmcache chunk 1 | lmcache chunk 2 |
# | need to retrieve |
start = tracker.num_vllm_hit_blocks // blocks_in_chunk * blocks_in_chunk
end = tracker.num_lmcache_hit_blocks
assert end % blocks_in_chunk == 0, (
"The number of LMCache hit blocks should be a multiple of the "
"number of blocks in a lmcache chunk. "
)
assert len(tracker.block_hashes) >= end, (
"The number of block hashes should be greater than or equal to the "
"number of LMCache hit blocks. "
)
if end > start:
block_ids = tracker.allocated_block_ids[start:end]
start_token_idx = start * vllm_block_size
end_token_idx = end * vllm_block_size
token_ids = list(tracker.all_token_ids)
op = LoadStoreOp(
token_ids=token_ids,
block_ids=block_ids,
start=start_token_idx,
end=end_token_idx,
)
ret = LMCacheMPRequestMetadata(
request_id=tracker.request_id,
direction="RETRIEVE",
op=op,
)
return ret
return None
class LMCacheMPConnectorMetadata(KVConnectorMetadata):
def __init__(self):
super().__init__()
self.requests: list[LMCacheMPRequestMetadata] = []
def add_request_metadata(self, request_metadata: LMCacheMPRequestMetadata):
self.requests.append(request_metadata)
def __len__(self):
return len(self.requests)
# For debugging
def __str__(self):
request_strs = []
for req_meta in self.requests:
request_strs.append(
f"RequestMetadata(request_id={req_meta.request_id}, "
f"direction={req_meta.direction}, "
f"num_blocks={len(req_meta.op)}, "
f"block_ids={req_meta.op.block_ids})"
)
return "[" + "\n".join(request_strs) + "]"
def __repr__(self):
return self.__str__()
class LMCacheMPConnector(KVConnectorBase_V1):
"""
The connector for LMCache multi-process mode.
Extra configs (kv_transfer_config.extra_config):
- lmcache.mp.host: the host of the LMCache server.
- lmcache.mp.port: the port of the LMCache server.
"""
def __init__(
self,
vllm_config: "VllmConfig",
role: KVConnectorRole,
kv_cache_config: "KVCacheConfig | None" = None,
):
super().__init__(vllm_config, role, kv_cache_config)
assert vllm_config.kv_transfer_config is not None
server_host = vllm_config.kv_transfer_config.get_from_extra_config(
"lmcache.mp.host", "tcp://localhost"
)
server_port = vllm_config.kv_transfer_config.get_from_extra_config(
"lmcache.mp.port", 5555
)
server_url = f"{server_host}:{server_port}"
zmq_context = zmq.Context.instance()
if self.role == KVConnectorRole.SCHEDULER:
self.scheduler_adapter = create_scheduler_adapter(
server_url, zmq_context, vllm_config
)
self.request_trackers: dict[str, LMCacheMPRequestTracker] = {}
elif self.role == KVConnectorRole.WORKER:
self.worker_adapter = create_worker_adapter(
server_url, zmq_context, vllm_config
)
else:
raise ValueError(f"Unknown KVConnectorRole: {self.role}")
self.vllm_block_size = vllm_config.cache_config.block_size
@property
def role(self) -> KVConnectorRole:
return self._role
# ==============================
# Worker-side methods
# ==============================
def _get_connector_metadata(self) -> KVConnectorMetadata:
"""Get the connector metadata.
This function should only be called inside the connector.
Returns:
ConnectorMetadata: the connector metadata.
"""
# Should only be called while set to valid metadata.
assert self._connector_metadata is not None
return self._connector_metadata
def register_kv_caches(self, kv_caches: dict[str, torch.Tensor]):
"""
Initialize with the KV caches. Useful for pre-registering the
KV Caches in the KVConnector (e.g. for NIXL).
Args:
kv_caches: dictionary of layer names, kv cache
"""
logger.info("Registering kv caches!")
self.worker_adapter.register_kv_caches(kv_caches)
return
def start_load_kv(self, forward_context: "ForwardContext", **kwargs: Any) -> None:
"""
Start loading the KV cache from the connector to vLLM's paged
KV buffer. This is called from the forward context before the
forward pass to enable async loading during model execution.
Args:
forward_context (ForwardContext): the forward context.
**kwargs: additional arguments for the load operation
Note:
The number of elements in kv_caches and layer_names should be
the same.
"""
metadata = self._get_connector_metadata()
assert isinstance(metadata, LMCacheMPConnectorMetadata)
request_ids = []
ops = []
for meta in metadata.requests:
if meta.direction != "RETRIEVE":
continue
request_ids.append(meta.request_id)
ops.append(meta.op)
if len(request_ids) == 0:
return
with torch.cuda.stream(torch.cuda.current_stream()):
event = torch.cuda.Event(interprocess=True)
event.record()
self.worker_adapter.batched_submit_retrieve_requests(request_ids, ops, event)
def wait_for_layer_load(self, layer_name: str) -> None:
"""
Block until the KV for a specific layer is loaded into vLLM's
paged buffer. This is called from within attention layer to ensure
async copying from start_load_kv is complete.
This interface will be useful for layer-by-layer pipelining.
Args:
layer_name: the name of that layer
"""
return
def save_kv_layer(
self,
layer_name: str,
kv_layer: torch.Tensor,
attn_metadata: AttentionMetadata,
**kwargs: Any,
) -> None:
"""
Start saving a layer of KV cache from vLLM's paged buffer
to the connector. This is called from within attention layer to
enable async copying during execution.
Args:
layer_name (str): the name of the layer.
kv_layer (torch.Tensor): the paged KV buffer of the current
layer in vLLM.
attn_metadata (AttentionMetadata): the attention metadata.
**kwargs: additional arguments for the save operation.
"""
return
def wait_for_save(self):
"""
Block until all the save operations is done. This is called
as the forward context exits to ensure that the async saving
from save_kv_layer is complete before finishing the forward.
This prevents overwrites of paged KV buffer before saving done.
"""
metadata = self._get_connector_metadata()
assert isinstance(metadata, LMCacheMPConnectorMetadata)
request_ids = []
ops = []
for meta in metadata.requests:
if meta.direction != "STORE":
continue
request_ids.append(meta.request_id)
ops.append(meta.op)
if len(request_ids) == 0:
return
with torch.cuda.stream(torch.cuda.current_stream()):
event = torch.cuda.Event(interprocess=True)
event.record()
self.worker_adapter.batched_submit_store_requests(request_ids, ops, event)
def get_finished(
self, finished_req_ids: set[str]
) -> tuple[set[str] | None, set[str] | None]:
"""
Notifies worker-side connector ids of requests that have
finished generating tokens on the worker.
The scheduler process (via the Executors) will use this output
to track which workers are done.
Returns:
ids of requests that have finished asynchronous transfer
(requests that previously returned True from request_finished()),
tuple of (sending/saving ids, recving/loading ids).
The finished saves/sends req ids must belong to a set provided in a
call to this method (this call or a prior one).
"""
val = self.worker_adapter.get_finished(finished_req_ids)
# logger.error("Finished req ids: %s, %s", val[0], val[1])
return val
def get_block_ids_with_load_errors(self) -> set[int]:
"""
Get the set of block IDs that failed to load.
Returns:
Set of block IDs that encountered load errors.
Empty set if no load errors occurred.
Notes:
- Applies to both sync- and async-loading requests.
- Async loading: failed blocks may be reported in any forward pass
up to and including the pass where the request ID is returned by
`get_finished()`. Even if failures occur, the request must still
be reported via `get_finished()`, and the failed block IDs must
appear here no later than that same pass.
- Sync loading: failed blocks should be reported in the forward
pass in which they are detected.
"""
# TODO: add error tracking
return set()
def shutdown(self):
"""
Shutdown the connector. This is called when the worker process
is shutting down to ensure that all the async operations are
completed and the connector is cleaned up properly.
"""
if hasattr(self, "worker_adapter"):
self.worker_adapter.shutdown()
return None
def get_kv_connector_stats(self) -> "KVConnectorStats | None":
"""
Get the KV connector stats collected during the last interval.
"""
return None
# ==============================
# Scheduler-side methods
# ==============================
def get_num_new_matched_tokens(
self,
request: "Request",
num_computed_tokens: int,
) -> tuple[int | None, bool]:
"""
Get number of new tokens that can be loaded from the
external KV cache beyond the num_computed_tokens.
Args:
request (Request): the request object.
num_computed_tokens (int): the number of locally
computed tokens for this request
Returns:
A tuple with the following elements:
- An optional number of tokens that can be loaded from the
external KV cache beyond what is already computed.
If None, it means that the connector needs more time to
determine the number of matched tokens, and the scheduler
should query for this request again later.
- `True` if external KV cache tokens will be loaded
asynchronously (between scheduler steps). Must be
'False' if the first element is 0.
Notes:
The connector should only consider the largest prefix of prompt-
tokens for which KV cache is actually available at the time of the
call. If the cache cannot be loaded for some tokens (e.g., due to
connectivity issues or eviction), those tokens must not be taken
into account.
"""
tracker = self._get_or_create_request_tracker(request)
# TODO: support loading KV for preempted requests in the future
if request.status == RequestStatus.PREEMPTED:
return 0, False
self.scheduler_adapter.maybe_submit_lookup_request(
request.request_id,
token_ids=list(request.all_token_ids),
)
ret = self.scheduler_adapter.check_lookup_result(request.request_id)
if ret is None:
return None, True
if ret == 0:
return 0, False
assert (
ret % (self.scheduler_adapter.num_blocks_per_chunk() * self.vllm_block_size)
== 0
)
# Update num stored blocks for the tracker
num_vllm_blocks = num_computed_tokens // self.vllm_block_size
num_lmcache_blocks = ret // self.vllm_block_size
tracker.increase_num_stored_blocks(num_lmcache_blocks)
# Save the vllm and lmcache hit tokens
tracker.num_vllm_hit_blocks = num_vllm_blocks
tracker.num_lmcache_hit_blocks = num_lmcache_blocks
need_to_load = max(0, ret - num_computed_tokens)
logger.debug(
"vLLM hit is: %d, Need to load is %d", num_computed_tokens, need_to_load
)
return need_to_load, need_to_load > 0
def update_state_after_alloc(
self, request: "Request", blocks: "KVCacheBlocks", num_external_tokens: int
):
"""
Update KVConnector state after block allocation.
If get_num_new_matched_tokens previously returned True for a
request, this function may be called twice for that same request -
first when blocks are allocated for the connector tokens to be
asynchronously loaded into, and second when any additional blocks
are allocated, after the load/transfer is complete.
Args:
request (Request): the request object.
blocks (KVCacheBlocks): the blocks allocated for the request.
num_external_tokens (int): the number of tokens that will be
loaded from the external KV cache.
"""
# NOTE: the `blocks` are NEW BLOCKS allocated for this request.
tracker = self._get_request_tracker(request.request_id)
block_ids = reformat_block_ids(blocks.get_block_ids())
# No matter we need to retrieve or not, we need to update
# the block ids into the tracker
tracker.append_block_ids(block_ids)
# Update the state of the tracker
condition = tracker.needs_retrieve()
if tracker.state == LMCacheMPRequestState.PREFETCHING:
# If need to retrieve, change to WAITING_FOR_LOAD
# Otherwise, change to READY
tracker.state = (
LMCacheMPRequestState.WAITING_FOR_LOAD
if condition
else LMCacheMPRequestState.READY
)
# Clean up lookup future in scheduler adapter
self.scheduler_adapter.cleanup_lookup_result(request.request_id)
def build_connector_meta(
self, scheduler_output: SchedulerOutput
) -> KVConnectorMetadata:
"""
Build the connector metadata for this step.
This function should NOT modify fields in the scheduler_output.
Also, calling this function will reset the state of the connector.
Args:
scheduler_output (SchedulerOutput): the scheduler output object.
"""
metadata = LMCacheMPConnectorMetadata()
self._process_retrieve_requests(metadata)
self._process_new_requests(scheduler_output, metadata)
self._process_cached_requests(scheduler_output, metadata)
if len(metadata) > 0:
logger.debug("Final connector metadata: %s", metadata)
return metadata
def update_connector_output(self, connector_output: KVConnectorOutput):
"""
Update KVConnector state from worker-side connectors output.
Args:
connector_output (KVConnectorOutput): the worker-side
connectors output.
"""
return
def request_finished(
self,
request: "Request",
block_ids: list[int],
) -> tuple[bool, dict[str, Any] | None]:
"""
Called exactly once when a request has finished, before its blocks are
freed.
The connector may assumes responsibility for freeing the blocks
asynchronously by returning True.
Returns:
True if the request is being saved/sent asynchronously and blocks
should not be freed until the request_id is returned from
get_finished().
Optional KVTransferParams to be included in the request outputs
returned by the engine.
"""
# Clean up request tracker to prevent memory leak
self._cleanup_request_tracker(request.request_id)
# Notify LMCache to end the session for this request
self.scheduler_adapter.end_session(request.request_id)
return True, None
def take_events(self) -> Iterable["KVCacheEvent"]:
"""
Take the KV cache events from the connector.
Yields:
New KV cache events since the last call.
"""
return ()
@classmethod
def get_required_kvcache_layout(cls, vllm_config: "VllmConfig") -> str | None:
"""
Get the required KV cache layout for this connector.
Args:
vllm_config (VllmConfig): the vllm config.
Returns:
str: the required KV cache layout. e.g. HND, or NHD.
None if the connector does not require a specific layout.
"""
if cls is KVConnectorBase_V1:
raise TypeError(
"get_required_kvcache_layout should not be called "
"on the abstract base class"
)
return None
def get_finished_count(self) -> int | None:
"""
Get the count of requests expected to complete send/receive operations
via this connector. This method is used to initialize the
KVOutputAggregator, overwriting the default world_size.
Returns:
int: expected sending or receiving completion count.
"""
return None
@classmethod
def build_kv_connector_stats(
cls, data: dict[str, Any] | None = None
) -> "KVConnectorStats | None":
"""
KVConnectorStats resolution method. This method allows dynamically
registered connectors to return their own KVConnectorStats object,
which can implement custom aggregation logic on the data dict.
"""
return None
@classmethod
def build_prom_metrics(
cls,
vllm_config: "VllmConfig",
metric_types: dict[type["PromMetric"], type["PromMetricT"]],
labelnames: list[str],
per_engine_labelvalues: dict[int, list[object]],
) -> "KVConnectorPromMetrics | None":
"""
Create a KVConnectorPromMetrics subclass which should register
per-connector Prometheus metrics and implement observe() to
expose connector transfer stats via Prometheus.
"""
return None
##############################
# Helper functions
##############################
def _process_retrieve_requests(
self,
metadata: LMCacheMPConnectorMetadata,
) -> None:
blocks_per_chunk = self.scheduler_adapter.num_blocks_per_chunk()
for request_tracker in self.request_trackers.values():
if request_tracker.state != LMCacheMPRequestState.WAITING_FOR_LOAD:
continue
r_metadata = LMCacheMPRequestMetadata.GetRetrieveMetadata(
request_tracker,
blocks_per_chunk,
vllm_block_size=self.vllm_block_size,
)
if r_metadata is not None:
metadata.add_request_metadata(r_metadata)
request_tracker.state = LMCacheMPRequestState.READY
def _process_new_requests(
self,
scheduler_output: SchedulerOutput,
metadata: LMCacheMPConnectorMetadata,
) -> None:
blocks_per_chunk = self.scheduler_adapter.num_blocks_per_chunk()
for new_request in scheduler_output.scheduled_new_reqs:
request_tracker = self._get_request_tracker(new_request.req_id)
num_new_tokens = scheduler_output.num_scheduled_tokens[new_request.req_id]
request_tracker.increase_num_scheduled_tokens(num_new_tokens)
r_meta = LMCacheMPRequestMetadata.GetStoreMetadata(
request_tracker, blocks_per_chunk, self.vllm_block_size
)
if r_meta is not None:
metadata.add_request_metadata(r_meta)
def _process_cached_requests(
self,
scheduler_output: SchedulerOutput,
metadata: LMCacheMPConnectorMetadata,
) -> None:
blocks_per_chunk = self.scheduler_adapter.num_blocks_per_chunk()
cached_reqs = scheduler_output.scheduled_cached_reqs
for idx, request_id in enumerate(cached_reqs.req_ids):
request_tracker = self._get_request_tracker(request_id)
# Update block ids
new_block_ids = reformat_block_ids(cached_reqs.new_block_ids[idx])
if request_id not in cached_reqs.resumed_req_ids:
request_tracker.append_block_ids(new_block_ids)
# Update new scheduled tokens
num_new_tokens = cached_reqs.num_computed_tokens[idx]
request_tracker.increase_num_scheduled_tokens(num_new_tokens)
r_meta = LMCacheMPRequestMetadata.GetStoreMetadata(
request_tracker, blocks_per_chunk, self.vllm_block_size
)
if r_meta is not None:
metadata.add_request_metadata(r_meta)
def _get_request_tracker(self, request_id: str) -> LMCacheMPRequestTracker:
assert request_id in self.request_trackers, (
f"Request tracker for request_id {request_id} not found. "
)
return self.request_trackers[request_id]
def _get_or_create_request_tracker(
self, request: "Request"
) -> LMCacheMPRequestTracker:
request_id = request.request_id
# Remove the old trackers that is created before the preemption
if (
request.status == RequestStatus.PREEMPTED
and request_id in self.request_trackers
):
tracker = self.request_trackers[request_id]
# NOTE: since this function may be called multiple times
# for a single request (because get_num_new_matched_tokens
# may be called multiple times) for the same request, we
# will only do the remove if the tracker is not in the "fresh"
# state, i.e., PREFETCHING
if tracker.state != LMCacheMPRequestState.PREFETCHING:
self.request_trackers.pop(request_id)
if request_id not in self.request_trackers:
new_tracker = LMCacheMPRequestTracker(request)
self.request_trackers[request_id] = new_tracker
return self.request_trackers[request_id]
def _cleanup_request_tracker(self, request_id: str) -> None:
"""
Clean up request tracker and associated lookup future for a request.
This should be called when a request is finished to prevent memory leak.
"""
# Clean up request tracker
if self.request_trackers.pop(request_id, None):
logger.debug(
"[KVConnector] Cleaned up request_tracker for request %s",
request_id,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/distributed/kv_transfer/kv_connector/v1/lmcache_mp_connector.py",
"license": "Apache License 2.0",
"lines": 803,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/v1/kv_offload/arc_manager.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections import OrderedDict
from collections.abc import Iterable
from vllm.v1.core.kv_cache_utils import BlockHash
from vllm.v1.kv_offload.abstract import (
LoadStoreSpec,
OffloadingEvent,
OffloadingManager,
PrepareStoreOutput,
)
from vllm.v1.kv_offload.backend import Backend, BlockStatus
class ARCOffloadingManager(OffloadingManager):
"""
An OffloadingManager implementing the ARC (Adaptive Replacement Cache)
eviction policy with a pluggable backend.
Data Structures:
T1: Recent cache containing blocks accessed once.
T2: Frequent cache containing blocks accessed multiple times.
B1/B2: Ghost lists tracking recently evicted blocks from T1/T2.
target_t1_size: Adaptive target size for the T1 partition.
Algorithm Flow:
1. Cache lookup (lookup):
Searches T1 and T2 for block hashes and counts consecutive hits
until a miss or non-ready block is encountered.
2. Cache touch (touch) - Adaptive Learning:
For each block_hash (in reverse order):
- If in T1: Move to T2 (promotion from recent to frequent).
- If in T2: Move to MRU position (end of queue).
- If in B1 ghost list: Increase target_t1_size.
- If in B2 ghost list: Decrease target_t1_size.
3. Block eviction (prepare_store) - Adaptive Replacement:
Determines eviction source based on adaptive target:
- If T1 size > target_t1_size: Evict from T1, add to B1.
- Otherwise: Evict from T2, add to B2.
Finally, bound each ghost list size.
4. Block insertion (prepare_store):
New blocks are always inserted into T1 and removed from B1/B2 if
present. Blocks may later be promoted to T2 during touch operations.
Adaptive Behavior:
The algorithm self-tunes the recency vs. frequency trade-off:
- B1 hit: Recent access patterns matter more → increase T1.
- B2 hit: Frequent access patterns matter more → decrease T1.
"""
def __init__(self, backend: Backend, enable_events: bool = False):
self.backend: Backend = backend
self.target_t1_size: float = 0.0
self.t1: OrderedDict[BlockHash, BlockStatus] = OrderedDict()
self.t2: OrderedDict[BlockHash, BlockStatus] = OrderedDict()
# block_hash -> None (only care about presence)
self.b1: OrderedDict[BlockHash, None] = OrderedDict()
self.b2: OrderedDict[BlockHash, None] = OrderedDict()
self.events: list[OffloadingEvent] | None = [] if enable_events else None
self.cache_capacity: int = self.backend.get_num_free_blocks()
def lookup(self, block_hashes: Iterable[BlockHash]) -> int | None:
hit_count = 0
for block_hash in block_hashes:
block = self.t1.get(block_hash) or self.t2.get(block_hash)
if block is None or not block.is_ready:
break
hit_count += 1
return hit_count
def prepare_load(self, block_hashes: Iterable[BlockHash]) -> LoadStoreSpec:
blocks = []
for block_hash in block_hashes:
block = self.t1.get(block_hash) or self.t2.get(block_hash)
assert block is not None, f"Block {block_hash!r} not found in cache"
assert block.is_ready, f"Block {block_hash!r} is not ready for reading"
block.ref_cnt += 1
blocks.append(block)
return self.backend.get_load_store_spec(block_hashes, blocks)
def touch(self, block_hashes: Iterable[BlockHash]):
for block_hash in reversed(list(block_hashes)):
if block_hash in self.t1:
block = self.t1.pop(block_hash)
if not block.is_ready:
# block was just prepared to be stored, not really touched twice
# keep it in T1 and mark as most recently used
self.t1[block_hash] = block
else:
self.t2[block_hash] = block
elif block_hash in self.t2:
self.t2.move_to_end(block_hash)
elif block_hash in self.b1:
delta = max(1, len(self.b2) / len(self.b1))
self.target_t1_size = min(
self.target_t1_size + delta, self.cache_capacity
)
# move to MRU position (end) to keep it fresh in the ghost list
self.b1.move_to_end(block_hash)
elif block_hash in self.b2:
delta = max(1, len(self.b1) / len(self.b2))
self.target_t1_size = max(self.target_t1_size - delta, 0)
# move to MRU position (end) to keep it fresh in the ghost list
self.b2.move_to_end(block_hash)
def complete_load(self, block_hashes: Iterable[BlockHash]):
for block_hash in block_hashes:
block = self.t1.get(block_hash) or self.t2.get(block_hash)
assert block is not None, f"Block {block_hash!r} not found"
assert block.ref_cnt > 0, f"Block {block_hash!r} ref_cnt is already 0"
block.ref_cnt -= 1
def prepare_store(
self, block_hashes: Iterable[BlockHash]
) -> PrepareStoreOutput | None:
block_hashes_to_store = []
for block_hash in block_hashes:
if block_hash not in self.t1 and block_hash not in self.t2:
block_hashes_to_store.append(block_hash)
if not block_hashes_to_store:
return PrepareStoreOutput(
block_hashes_to_store=[],
store_spec=self.backend.get_load_store_spec([], []),
block_hashes_evicted=[],
)
num_blocks_to_evict = (
len(block_hashes_to_store) - self.backend.get_num_free_blocks()
)
to_evict = []
while num_blocks_to_evict > 0:
block_to_evict = None
if len(self.t1) >= int(self.target_t1_size):
# try to evict the least recently used (oldest) block from T1
for block_hash, block in self.t1.items():
if block.ref_cnt == 0:
block_to_evict = (block_hash, block)
eviction_t = self.t1
eviction_b = self.b1
break
if not block_to_evict:
# try to evict the least recently used (oldest) block from T2
for block_hash, block in self.t2.items():
if block.ref_cnt == 0:
block_to_evict = (block_hash, block)
eviction_t = self.t2
eviction_b = self.b2
break
else:
# cannot evict enough blocks, cache is full of in-use items
return None
block_hash, block = block_to_evict
del eviction_t[block_hash]
eviction_b[block_hash] = None
to_evict.append(block_hash)
self.backend.free(block)
num_blocks_to_evict -= 1
for b in [self.b1, self.b2]:
for i in range(len(b) - self.cache_capacity):
b.popitem(last=False)
if to_evict and self.events is not None:
self.events.append(
OffloadingEvent(
block_hashes=to_evict,
block_size=self.backend.block_size,
medium=self.backend.medium,
removed=True,
)
)
blocks = self.backend.allocate_blocks(block_hashes_to_store)
assert len(blocks) == len(block_hashes_to_store), (
"Backend did not allocate the expected number of blocks"
)
for block_hash, block in zip(block_hashes_to_store, blocks):
self.t1[block_hash] = block
self.b1.pop(block_hash, None)
self.b2.pop(block_hash, None)
store_spec = self.backend.get_load_store_spec(block_hashes_to_store, blocks)
return PrepareStoreOutput(
block_hashes_to_store=block_hashes_to_store,
store_spec=store_spec,
block_hashes_evicted=to_evict,
)
def complete_store(self, block_hashes: Iterable[BlockHash], success: bool = True):
stored_block_hashes: list[BlockHash] = []
if success:
for block_hash in block_hashes:
block = self.t1.get(block_hash) or self.t2.get(block_hash)
if block is not None and not block.is_ready:
block.ref_cnt = 0
stored_block_hashes.append(block_hash)
else:
for block_hash in block_hashes:
block = self.t1.pop(block_hash, None)
if block is None:
block = self.t2.pop(block_hash, None)
if block is not None and not block.is_ready:
self.backend.free(block)
if stored_block_hashes and self.events is not None:
self.events.append(
OffloadingEvent(
block_hashes=stored_block_hashes,
block_size=self.backend.block_size,
medium=self.backend.medium,
removed=False,
)
)
def take_events(self) -> Iterable[OffloadingEvent]:
if self.events is not None:
yield from self.events
self.events.clear()
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/kv_offload/arc_manager.py",
"license": "Apache License 2.0",
"lines": 199,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:examples/online_serving/disaggregated_encoder/disagg_epd_proxy.py | #!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
disagg_encoder_proxy.py
Proxy that routes OpenAI-compatible “/v1/chat/completions” requests to two
clusters:
• encode (multimodal feature extraction)
• decode (language-model inference)
For MM input we:
1. Extract *every* image/audio item.
2. Fire N concurrent requests to the encoder cluster
(one request per item, with **all text removed**).
3. Wait for all of them to succeed.
4. Forward the *original* request to a decode server.
"""
from __future__ import annotations
import argparse
import asyncio
import logging
import os
import random
import uuid
from collections.abc import AsyncIterator
import aiohttp
import uvicorn
from fastapi import FastAPI, HTTPException, Request
from fastapi.responses import JSONResponse, StreamingResponse
###############################################################################
# FastAPI app & global state
###############################################################################
logging.basicConfig(
level=logging.DEBUG, format="%(asctime)s %(levelname)s: %(message)s"
)
logger = logging.getLogger("proxy")
app = FastAPI()
encode_session: aiohttp.ClientSession | None = None
prefill_session: aiohttp.ClientSession | None = None
decode_session: aiohttp.ClientSession | None = None
###############################################################################
# Utils
###############################################################################
MM_TYPES = {"image_url", "audio_url", "input_audio"}
def extract_mm_items(request_data: dict) -> list[dict]:
"""
Return *all* image/audio items that appear anywhere in `messages`.
Each returned dict looks like:
{ "type": "image_url", "image_url": {...} }
"""
items: list[dict] = []
for msg in request_data.get("messages", []):
content = msg.get("content")
if not isinstance(content, list):
continue
for item in content:
if item.get("type") in MM_TYPES:
items.append(item)
return items
async def fanout_encoder_primer(
orig_request: dict,
e_urls: list[str],
req_id: str,
) -> None:
"""
1. Build one request *per MM item* with all text removed.
2. Send them concurrently to the encode cluster.
3. Raise if any of them fails.
"""
logger.info("[%s] Processing multimodal items...", req_id)
mm_items = extract_mm_items(orig_request)
if not mm_items:
logger.info("[%s] No multimodal items, skipping encoder", req_id)
return # nothing to do
logger.info("[%s] got %d multimodal items...", req_id, len(mm_items))
tasks = []
# Round-robin over encode servers to distribute load a bit
url_cycle = (e_urls[i % len(e_urls)] for i in range(len(mm_items)))
for idx, (item, target_url) in enumerate(zip(mm_items, url_cycle)):
# Derive a *child* request id: <parent>:<index>:<random-short>
child_req_id = f"{req_id}:{idx}:{uuid.uuid4().hex[:6]}"
headers = {"x-request-id": child_req_id}
encoder_req = {
# You *may* need to keep additional fields
"model": orig_request.get("model"),
"messages": [
{"role": "user", "content": [item]},
],
# Only need 1 token so the server actually runs the encoder path
"max_tokens": 1,
"stream": False,
}
tasks.append(
encode_session.post(
f"{target_url}/v1/chat/completions",
json=encoder_req,
headers=headers,
)
)
results = await asyncio.gather(*tasks, return_exceptions=True)
# Fail fast if any sub-request failed
for idx, r in enumerate(results):
if isinstance(r, Exception):
logger.error(
"[%s] Encoder request #%d raised exception: %s",
req_id,
idx,
r,
exc_info=r,
)
raise HTTPException(
status_code=502, detail=f"Encoder request failed: {str(r)}"
)
if r.status != 200:
try:
detail = await r.text()
except Exception:
detail = "<unable to read body>"
logger.error(
"[%s] Encoder request #%d returned status %s: %s",
req_id,
idx,
r.status,
detail,
)
raise HTTPException(
status_code=r.status,
detail=f"Encoder request failed: {detail}",
)
logger.info(
"[%s] All %d encoder requests completed successfully", req_id, len(mm_items)
)
async def maybe_prefill(
req_data: dict,
p_url: str,
req_id: str,
) -> dict:
"""
- Do prefill-only task if p_url exist;
- Return modified request data with kv transfer params (for nixl connector)
- Else, skip and return the original request data for decode
"""
if p_url:
logger.info("[%s] Processing through prefill: %s", req_id, p_url)
prefill_response = await process_prefill_stage(req_data, p_url, req_id)
# for nixl connector to facilitate kv transfer...
prefill_response_json = await prefill_response.json()
kv_transfer_params = prefill_response_json.get("kv_transfer_params", {})
if kv_transfer_params:
req_data["kv_transfer_params"] = kv_transfer_params
return req_data
else:
return req_data
async def process_prefill_stage(
req_data: dict,
p_url: str,
req_id: str,
) -> dict:
"""Process request through Prefill stage and return kv_transfer_params"""
logger.info("[%s] Sending prefill request to: %s", req_id, p_url)
prefill_request = req_data.copy()
prefill_request["kv_transfer_params"] = {
"do_remote_decode": True,
"do_remote_prefill": False,
"remote_engine_id": None,
"remote_block_ids": None,
"remote_host": None,
"remote_port": None,
}
prefill_request["stream"] = False
prefill_request["max_tokens"] = 1
if "max_completion_tokens" in prefill_request:
prefill_request["max_completion_tokens"] = 1
if "stream_options" in prefill_request:
del prefill_request["stream_options"]
headers = {"x-request-id": req_id}
try:
prefill_response = await prefill_session.post(
f"{p_url}/v1/chat/completions", json=prefill_request, headers=headers
)
prefill_response.raise_for_status()
if prefill_response.status != 200:
error_text = await prefill_response.text()
logger.error(
"[%s] Prefill request failed with status %d: %s",
req_id,
prefill_response.status,
error_text,
)
raise HTTPException(
status_code=prefill_response.status,
detail={"error": "Prefill request failed", "message": error_text},
)
logger.info("[%s] Prefill request completed successfully", req_id)
return prefill_response
except Exception as e:
logger.error("Prefill processing failed: %s", str(e))
raise HTTPException(
status_code=500,
detail={"error": "Prefill processing error", "message": str(e)},
) from e
###############################################################################
# Middleware for request/response logging
###############################################################################
@app.middleware("http")
async def log_requests(request: Request, call_next):
"""Middleware to log all incoming requests and responses"""
req_id = request.headers.get("x-request-id", str(uuid.uuid4()))
# Log incoming request
logger.info(
">>> [%s] %s %s from %s",
req_id,
request.method,
request.url.path,
request.client.host if request.client else "unknown",
)
try:
# Process request
response = await call_next(request)
# Log response
logger.info(
"<<< [%s] %s %s completed with status %d",
req_id,
request.method,
request.url.path,
response.status_code,
)
return response
except Exception as e:
# Log errors
logger.exception(
"!!! [%s] %s %s failed with error: %s",
req_id,
request.method,
request.url.path,
str(e),
)
raise
###############################################################################
# FastAPI lifecycle
###############################################################################
@app.on_event("startup")
async def on_startup() -> None:
global encode_session, prefill_session, decode_session
timeout = aiohttp.ClientTimeout(total=100_000)
connector = aiohttp.TCPConnector(limit=0, force_close=False)
encode_session = aiohttp.ClientSession(timeout=timeout, connector=connector)
if app.state.p_urls:
# only setup if prefill instance(s) exist
prefill_session = aiohttp.ClientSession(timeout=timeout, connector=connector)
decode_session = aiohttp.ClientSession(timeout=timeout, connector=connector)
@app.on_event("shutdown")
async def on_shutdown() -> None:
global encode_session, prefill_session, decode_session
if encode_session:
await encode_session.close()
if prefill_session:
await prefill_session.close()
if decode_session:
await decode_session.close()
###############################################################################
# Core forwarding
###############################################################################
async def forward_non_stream(
req_data: dict, req_id: str, e_urls: list[str], p_url: str, d_url: str
) -> dict:
try:
# Step 1: Process through Encoder instance (if has MM input)
await fanout_encoder_primer(req_data, e_urls, req_id)
# Step 2: Process through Prefill instance
req_data = await maybe_prefill(req_data, p_url, req_id)
# Step 3: Process through Decode instance
logger.info("[%s] Forwarding to decode: %s", req_id, d_url)
headers = {"x-request-id": req_id}
# Non-streaming response
async with decode_session.post(
f"{d_url}/v1/chat/completions", json=req_data, headers=headers
) as resp:
resp.raise_for_status()
return await resp.json()
except HTTPException:
raise
except Exception as e:
logger.exception("[%s] Error in forward_non_stream: %s", req_id, str(e))
raise HTTPException(status_code=500, detail=f"Proxy error: {str(e)}") from e
async def forward_stream(
req_data: dict, req_id: str, e_urls: list[str], p_url: str, d_url: str
) -> AsyncIterator[str]:
try:
# Step 1: Process through Encoder instance (if has MM input)
await fanout_encoder_primer(req_data, e_urls, req_id)
# Step 2: Process through Prefill instance
req_data = await maybe_prefill(req_data, p_url, req_id)
# Step 3: Process through Decode instance
logger.info("[%s] Starting streaming from decode: %s", req_id, d_url)
headers = {"x-request-id": req_id}
# Streaming response
async with decode_session.post(
f"{d_url}/v1/chat/completions",
json=req_data,
headers=headers,
) as resp:
resp.raise_for_status()
async for chunk in resp.content.iter_chunked(1024):
if chunk:
yield chunk.decode("utf-8", errors="ignore")
logger.info("[%s] Streaming completed", req_id)
except HTTPException:
logger.exception("[%s] HTTPException in forward_stream", req_id)
raise
except Exception as e:
logger.exception("[%s] Error in forward_stream: %s", req_id, str(e))
raise HTTPException(
status_code=500, detail=f"Proxy streaming error: {str(e)}"
) from e
###############################################################################
# Public routes
###############################################################################
@app.post("/v1/chat/completions")
async def chat_completions(request: Request):
try:
req_data = await request.json()
req_id = request.headers.get("x-request-id", str(uuid.uuid4()))
e_urls = app.state.e_urls # we want the full list for fan-out
p_url = random.choice(app.state.p_urls) if app.state.p_urls else None
d_url = random.choice(app.state.d_urls)
is_streaming = req_data.get("stream", False)
if is_streaming:
return StreamingResponse(
forward_stream(req_data, req_id, e_urls, p_url, d_url),
media_type="text/event-stream",
)
result = await forward_non_stream(req_data, req_id, e_urls, p_url, d_url)
return JSONResponse(content=result)
except HTTPException:
raise
except Exception as e:
logger.exception("Error in chat_completions endpoint: %s", str(e))
raise HTTPException(
status_code=500, detail=f"Request processing error: {str(e)}"
) from e
@app.get("/v1/models")
async def list_models():
async with decode_session.get(f"{app.state.d_urls[0]}/v1/models") as resp:
resp.raise_for_status()
return await resp.json()
@app.get("/health")
async def health_check():
async def healthy(urls):
if not urls:
return "empty"
for u in urls:
try:
async with encode_session.get(f"{u}/health") as resp:
resp.raise_for_status()
except Exception:
return "unhealthy"
return "healthy"
e_status, p_status, d_status = await asyncio.gather(
healthy(app.state.e_urls), healthy(app.state.p_urls), healthy(app.state.d_urls)
)
overall_healthy = all(
status != "unhealthy" for status in (e_status, p_status, d_status)
)
status_code = 200 if overall_healthy else 503
return JSONResponse(
{
"proxy": "healthy",
"encode_cluster": e_status,
"prefill_cluster": p_status,
"decode_cluster": d_status,
},
status_code=status_code,
)
###############################################################################
# Simple profiler fan-out (unchanged except for sessions)
###############################################################################
async def _post_if_available(
session: aiohttp.ClientSession,
url: str,
payload: dict,
headers: dict,
) -> dict | None:
"""
POST `payload` to `url`.
Returns
-------
• The decoded JSON body on success (2xx)
• None if the endpoint does not exist (404)
• Raises for anything else.
"""
try:
resp = await session.post(url, json=payload, headers=headers)
if resp.status == 404: # profiling disabled on that server
logger.warning("Profiling endpoint missing on %s", url)
return None
resp.raise_for_status()
return await resp.json(content_type=None)
except aiohttp.ClientResponseError as exc:
# Pass 404 through the branch above, re-raise everything else
if exc.status == 404:
logger.warning("Profiling endpoint missing on %s", url)
return None
raise
except Exception:
# Network errors etc.: propagate
raise
async def _profile_cmd(cmd: str, payload: dict, e_url: str, p_url: str, d_url: str):
"""
Fire & forget to both clusters, tolerate 404.
"""
headers = {"Authorization": f"Bearer {os.getenv('OPENAI_API_KEY', '')}"}
encode_task = _post_if_available(
encode_session, f"{e_url}/{cmd}_profile", payload, headers
)
prefill_task = (
_post_if_available(prefill_session, f"{p_url}/{cmd}_profile", payload, headers)
if p_url is not None
else asyncio.sleep(0)
)
decode_task = _post_if_available(
decode_session, f"{d_url}/{cmd}_profile", payload, headers
)
encode_res, prefill_res, decode_res = await asyncio.gather(
encode_task, prefill_task, decode_task
)
# If *all* clusters said “I don’t have that route”, surface an error
if encode_res is prefill_res is decode_res is None:
raise HTTPException(
status_code=503,
detail="Profiling endpoints are disabled on all clusters",
)
return {
"encode": encode_res, # may be None
"prefill": prefill_res, # may be None
"decode": decode_res, # may be None
}
@app.post("/start_profile")
async def start_profile(request: Request):
body = await request.json()
# TODO: handle multi urls properly
e_url = random.choice(app.state.e_urls)
p_url = random.choice(app.state.p_urls) if app.state.p_urls else None
d_url = random.choice(app.state.d_urls)
return await _profile_cmd("start", body, e_url, p_url, d_url)
@app.post("/stop_profile")
async def stop_profile(request: Request):
body = await request.json()
# TODO: handle multi urls properly
e_url = random.choice(app.state.e_urls)
p_url = random.choice(app.state.p_urls) if app.state.p_urls else None
d_url = random.choice(app.state.d_urls)
return await _profile_cmd("stop", body, e_url, p_url, d_url)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--host", default="0.0.0.0")
parser.add_argument("--port", type=int, default=8000)
parser.add_argument(
"--encode-servers-urls",
required=True,
help='Comma-separated encode URLs ("http://e1:8001,http://e2:8001")',
)
parser.add_argument(
"--prefill-servers-urls",
required=True,
help=(
'Comma-separated prefill URLs ("http://p1:8003,http://p2:8004") ',
'to enable E->P->D, set "disable" or "none" to enable E->PD',
),
)
parser.add_argument(
"--decode-servers-urls",
required=True,
help='Comma-separated decode URLs ("http://d1:8005,http://d2:8006")',
)
args = parser.parse_args()
app.state.e_urls = [
u.strip() for u in args.encode_servers_urls.split(",") if u.strip()
]
app.state.d_urls = [
u.strip() for u in args.decode_servers_urls.split(",") if u.strip()
]
# handle prefill instances
if args.prefill_servers_urls.lower() in ("disable", "none", ""):
app.state.p_urls = []
logger.info(
"Disaggregated prefill phase explicitly disabled by user. Running E + PD..."
)
else:
app.state.p_urls = [
u.strip() for u in args.prefill_servers_urls.split(",") if u.strip()
]
logger.info("Disaggregated prefill phase is enabled. Running E + P + D...")
logger.info("Proxy listening on %s:%s", args.host, args.port)
logger.info("Encode servers: %s", app.state.e_urls)
logger.info("Prefill instances %s", app.state.p_urls)
logger.info("Decode servers: %s", app.state.d_urls)
uvicorn.run(
app,
host=args.host,
port=args.port,
log_level="info",
loop="uvloop",
access_log=True,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/online_serving/disaggregated_encoder/disagg_epd_proxy.py",
"license": "Apache License 2.0",
"lines": 504,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/v1/ec_connector/integration/test_epd_correctness.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
EPD Correctness Test
Tests that EPD (Encoder-Prefill-Decode) disaggregation produces the same
outputs as a baseline single instance.
Usage:
# Baseline mode (saves outputs):
python test_epd_correctness.py \
--service_url http://localhost:8000 \
--model_name Qwen/Qwen2.5-VL-3B-Instruct \
--mode baseline \
--baseline_file .vllm_epd_baseline.txt
# Disagg mode (compares outputs):
python test_epd_correctness.py \
--service_url http://localhost:8000 \
--model_name Qwen/Qwen2.5-VL-3B-Instruct \
--mode disagg \
--baseline_file .vllm_epd_baseline.txt
"""
import argparse
import json
import os
import time
import openai
import requests
from vllm.assets.image import ImageAsset
from vllm.multimodal.utils import encode_image_url
MAX_OUTPUT_LEN = 256
# Sample prompts with multimodal content
image_1 = ImageAsset("stop_sign").pil_image.resize((1280, 720))
image_2 = ImageAsset("cherry_blossom").pil_image.resize((1280, 720))
image_local_path = f"{os.path.dirname(os.path.abspath(__file__))}/hato.jpg"
SAMPLE_PROMPTS_MM: list[dict] = [
{
"messages": [
{
"role": "user",
"content": [
{
"type": "image_url",
"image_url": {"url": encode_image_url(image_1)},
},
{"type": "text", "text": "What's in this image?"},
],
}
],
"description": "Single image query",
},
{
"messages": [
{
"role": "user",
"content": [
{
"type": "image_url",
"image_url": {"url": encode_image_url(image_2)},
},
{
"type": "image_url",
"image_url": {"url": f"file://{image_local_path}"},
},
{"type": "text", "text": "Describe these 2 images in detail."},
],
}
],
"description": "2 images with detailed query",
},
]
# Text-only prompts for mixed testing
SAMPLE_PROMPTS_TEXT: list[dict] = [
{
"messages": [{"role": "user", "content": "What is the capital of France?"}],
"description": "Simple text-only query",
},
{
"messages": [
{"role": "user", "content": "Explain quantum computing in simple terms."}
],
"description": "Text-only explanation request",
},
]
def check_vllm_server(url: str, timeout=5, retries=10) -> bool:
"""Check if the vLLM server is ready.
Args:
url: The URL to check (usually /health or /healthcheck endpoint)
timeout: Timeout in seconds for each request
retries: Number of retries if the server is not ready
Returns:
True if the server is ready, False otherwise
"""
for attempt in range(retries):
try:
response = requests.get(url, timeout=timeout)
if response.status_code == 200:
print(f"Server is ready at {url}")
return True
else:
print(
f"Attempt {attempt + 1}/{retries}: Server returned "
f"status code {response.status_code}"
)
except requests.exceptions.RequestException as e:
print(f"Attempt {attempt + 1}/{retries}: Error connecting: {e}")
time.sleep(2) # Wait before retrying
return False
def run_chat_completion(
base_url: str,
model_name: str,
messages: list,
max_tokens: int = MAX_OUTPUT_LEN,
) -> str:
"""Run a chat completion request.
Args:
base_url: Base URL of the vLLM server
model_name: Name of the model
messages: Messages for chat completion
max_tokens: Maximum tokens to generate
Returns:
Generated text content
"""
client = openai.OpenAI(api_key="EMPTY", base_url=base_url)
completion = client.chat.completions.create(
model=model_name,
messages=messages,
max_tokens=max_tokens,
temperature=0.0,
seed=42,
)
return completion.choices[0].message.content
def main():
"""Main test function."""
parser = argparse.ArgumentParser(
description="EPD correctness test - compare disagg vs baseline"
)
parser.add_argument(
"--service_url",
type=str,
required=True,
help="The vLLM service URL (e.g., http://localhost:8000)",
)
parser.add_argument(
"--model_name",
type=str,
required=True,
help="Model name",
)
parser.add_argument(
"--mode",
type=str,
default="baseline",
choices=["baseline", "baseline_pd", "disagg"],
help="Mode: baseline/baseline_pd (saves outputs) or disagg (compares outputs)",
)
parser.add_argument(
"--baseline_file",
type=str,
default=".vllm_epd_baseline.txt",
help="File to save/load baseline outputs",
)
parser.add_argument(
"--use_mm_prompts",
action="store_true",
help="Use multimodal prompts (default: use text-only for quick testing)",
)
args = parser.parse_args()
print(f"Service URL: {args.service_url}")
print(f"Model: {args.model_name}")
print(f"Mode: {args.mode}")
print(f"Output file: {args.baseline_file}")
print(f"Use MM prompts: {args.use_mm_prompts}")
# Determine health check endpoint
if args.mode == "baseline":
health_check_url = f"{args.service_url}/health"
elif args.mode == "baseline_pd":
# Nixl toy proxy use /healthcheck
health_check_url = f"{args.service_url}/healthcheck"
else:
# Disagg EPD proxy uses /health
health_check_url = f"{args.service_url}/health"
if not os.path.exists(args.baseline_file):
raise ValueError(
f"In disagg mode, the output file {args.baseline_file} from "
"baseline does not exist. Run baseline mode first."
)
# Check if server is ready
if not check_vllm_server(health_check_url):
raise RuntimeError(f"vLLM server at {args.service_url} is not ready!")
# Select prompts to use
if args.use_mm_prompts:
test_prompts = SAMPLE_PROMPTS_MM
print("Using multimodal prompts")
else:
test_prompts = SAMPLE_PROMPTS_TEXT
print("Using text-only prompts for quick testing")
# Run completions
service_url = f"{args.service_url}/v1"
output_strs = {}
for i, prompt_data in enumerate(test_prompts):
print(
f"\nRunning prompt {i + 1}/{len(test_prompts)}: "
f"{prompt_data['description']}"
)
output_str = run_chat_completion(
base_url=service_url,
model_name=args.model_name,
messages=prompt_data["messages"],
max_tokens=MAX_OUTPUT_LEN,
)
# Use description as key for comparison
key = prompt_data["description"]
output_strs[key] = output_str
print(f"Output: {output_str}")
if args.mode in ("baseline", "baseline_pd"):
# Baseline mode: Save outputs
print(f"\nSaving baseline outputs to {args.baseline_file}")
try:
with open(args.baseline_file, "w") as json_file:
json.dump(output_strs, json_file, indent=4)
print("✅ Baseline outputs saved successfully")
except OSError as e:
print(f"Error writing to file: {e}")
raise
else:
# Disagg mode: Load and compare outputs
print(f"\nLoading baseline outputs from {args.baseline_file}")
baseline_outputs = None
try:
with open(args.baseline_file) as json_file:
baseline_outputs = json.load(json_file)
except OSError as e:
print(f"Error reading from file: {e}")
raise
# Verify outputs match
print("\nComparing disagg outputs with baseline...")
assert isinstance(baseline_outputs, dict), "Baseline outputs should be a dict"
assert len(baseline_outputs) == len(output_strs), (
f"Length mismatch: baseline has {len(baseline_outputs)}, "
f"disagg has {len(output_strs)}"
)
all_match = True
for key, baseline_output in baseline_outputs.items():
assert key in output_strs, f"{key} not in disagg outputs"
disagg_output = output_strs[key]
if baseline_output == disagg_output:
print(f"✅ {key}: MATCH")
else:
print(f"❌ {key}: MISMATCH")
print(f" Baseline: {baseline_output}")
print(f" Disagg: {disagg_output}")
all_match = False
assert all_match, "❌❌Disagg outputs do not match baseline!❌❌"
if all_match:
print("\n✅ All outputs match! Test PASSED")
if __name__ == "__main__":
main()
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/ec_connector/integration/test_epd_correctness.py",
"license": "Apache License 2.0",
"lines": 256,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/config/ec_transfer.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import hashlib
import uuid
from dataclasses import field
from typing import Any, Literal, get_args
from vllm.config.utils import config
ECProducer = Literal["ec_producer", "ec_both"]
ECConsumer = Literal["ec_consumer", "ec_both"]
ECRole = Literal[ECProducer, ECConsumer]
@config
class ECTransferConfig:
"""Configuration for distributed EC cache transfer."""
ec_connector: str | None = None
"""The EC connector for vLLM to transmit EC caches between vLLM instances.
"""
engine_id: str | None = None
"""The engine id for EC transfers."""
ec_buffer_device: str | None = "cuda"
"""The device used by ec connector to buffer the EC cache.
Currently only support 'cuda'."""
ec_buffer_size: float = 1e9
"""The buffer size for TorchDistributedConnector. Measured in number of
bytes. Recommended value: 1e9 (about 1GB)."""
ec_role: ECRole | None = None
"""Whether this vLLM instance produces, consumes EC cache, or both. Choices
are 'ec_producer', 'ec_consumer', 'ec_both'."""
ec_rank: int | None = None
"""The rank of this vLLM instance in the EC cache transfer. Typical value:
0 for encoder, 1 for pd instance.
Currently only 1P1D is supported."""
ec_parallel_size: int = 1
"""The number of parallel instances for EC cache transfer. For
PyNcclConnector, this should be 2."""
ec_ip: str = "127.0.0.1"
"""The EC connector ip, used to build distributed connection."""
ec_port: int = 14579
"""The EC connector port, used to build distributed connection."""
ec_connector_extra_config: dict[str, Any] = field(default_factory=dict)
"""any extra config that the connector may need."""
ec_connector_module_path: str | None = None
"""The Python module path to dynamically load the EC connector from.
Only supported in V1."""
def compute_hash(self) -> str:
"""
WARNING: Whenever a new field is added to this config,
ensure that it is included in the factors list if
it affects the computation graph.
Provide a hash that uniquely identifies all the configs
that affect the structure of the computation
graph from input ids/embeddings to the final hidden states,
excluding anything before input ids/embeddings and after
the final hidden states.
"""
# no factors to consider.
# this config will not affect the computation graph.
factors: list[Any] = []
hash_str = hashlib.md5(str(factors).encode(), usedforsecurity=False).hexdigest()
return hash_str
def __post_init__(self) -> None:
if self.engine_id is None:
self.engine_id = str(uuid.uuid4())
if self.ec_role is not None and self.ec_role not in get_args(ECRole):
raise ValueError(
f"Unsupported ec_role: {self.ec_role}. "
f"Supported roles are {get_args(ECRole)}"
)
if self.ec_connector is not None and self.ec_role is None:
raise ValueError(
"Please specify ec_role when ec_connector "
f"is set, supported roles are {get_args(ECRole)}"
)
@property
def is_ec_transfer_instance(self) -> bool:
return self.ec_connector is not None and self.ec_role in get_args(ECRole)
@property
def is_ec_producer(self) -> bool:
return self.ec_connector is not None and self.ec_role in get_args(ECProducer)
@property
def is_ec_consumer(self) -> bool:
return self.ec_connector is not None and self.ec_role in get_args(ECConsumer)
def get_from_extra_config(self, key, default) -> Any:
return self.ec_connector_extra_config.get(key, default)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/config/ec_transfer.py",
"license": "Apache License 2.0",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/distributed/ec_transfer/ec_connector/base.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
ECConnectorBase Class for Distributed Encoder Cache &
P2P Encoder cache communication in V1
The class provides the following primitives:
Scheduler-side: runs in the scheduler, binds metadata, which
is used by the worker-side to load/save Encoder cache.
check_caches_exist() - Check whether Encoder cache of requests exist
update_state_after_alloc() - update ECConnector state after
allocate. This will decide to load the cache or not
request_finished() - called when a request is finished,
free the cache with the requests
Worker-side: runs in each worker, loads/saves Encoder Cache to/from
the Connector based on the metadata.
start_load_ec() - starts loading all ECs (maybe async)
wait_for_save() - blocks until all saves are done
get_finished() - called with ids of finished requests, returns
ids of requests that have completed async sending/recving.
"""
import enum
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any
import torch
from vllm.logger import init_logger
from vllm.v1.core.sched.output import SchedulerOutput
from vllm.v1.outputs import ECConnectorOutput
if TYPE_CHECKING:
from vllm.config import VllmConfig
from vllm.v1.request import Request
logger = init_logger(__name__)
class ECConnectorRole(enum.Enum):
# Connector running in the scheduler process
SCHEDULER = 0
# Connector running in the worker process
WORKER = 1
class ECConnectorMetadata(ABC): # noqa: B024
"""
Abstract Metadata used to communicate between the
Scheduler ECConnector and Worker ECConnector.
"""
pass
class ECConnectorBase(ABC):
def __init__(self, vllm_config: "VllmConfig", role: ECConnectorRole):
self._connector_metadata: ECConnectorMetadata | None = None
self._vllm_config = vllm_config
self._role = role
if vllm_config.ec_transfer_config is not None:
self._is_producer = vllm_config.ec_transfer_config.is_ec_producer
self._is_consumer = vllm_config.ec_transfer_config.is_ec_consumer
else:
raise ValueError("ec_transfer_config must be set for ECConnectorBase")
@property
def role(self) -> ECConnectorRole:
return self._role
@property
def is_producer(self) -> bool:
return self._is_producer
@property
def is_consumer(self) -> bool:
return self._is_consumer
# ==============================
# Worker-side methods
# ==============================
def bind_connector_metadata(self, connector_metadata: ECConnectorMetadata) -> None:
"""Set the connector metadata from the scheduler.
This function should be called by the model runner every time
before the model execution. The metadata will be used for runtime
EC cache loading.
Args:
connector_metadata (dict): the connector metadata.
"""
self._connector_metadata = connector_metadata
def clear_connector_metadata(self) -> None:
"""Clear the connector metadata.
This function should be called by the model runner every time
after the model execution.
"""
self._connector_metadata = None
def _get_connector_metadata(self) -> ECConnectorMetadata:
"""Get the connector metadata.
This function should only be called inside the connector.
Returns:
ConnectorMetadata: the connector metadata.
"""
# Should only be called while set to valid metadata.
assert self._connector_metadata is not None
return self._connector_metadata
def register_caches(
self,
ec_caches: dict[str, torch.Tensor],
):
"""
Initialize with the EC caches.
Args:
ec_caches: dictionary of encoder cache
"""
# TODO: Implement this later for P2P feature
return
@abstractmethod
def start_load_caches(
self, encoder_cache: dict[str, torch.Tensor], **kwargs
) -> None:
"""
Start loading the cache from the connector into vLLM's encoder cache.
This method loads the encoder cache based on metadata provided by the scheduler.
It is called before `_gather_mm_embeddings` for the EC Connector. For EC,
the `encoder_cache` and `mm_hash` are stored in `kwargs`.
Args:
encoder_cache (dict[str, torch.Tensor]): A dictionary mapping multimodal
data hashes (`mm_hash`) to encoder cache tensors.
kwargs (dict): Additional keyword arguments for the connector.
"""
pass
@abstractmethod
def save_caches(
self, encoder_cache: dict[str, torch.Tensor], mm_hash: str, **kwargs
) -> None:
"""
Save the encoder cache to the connector.
This method saves the encoder cache from the worker's local storage
to shared storage or another external connector.
Args:
encoder_cache (dict[str, torch.Tensor]): A dictionary mapping multimodal
data hashes (`mm_hash`) to encoder cache tensors.
mm_hash (str): The hash of the multimodal data whose cache is being saved.
kwargs (dict): Additional keyword arguments for the connector.
"""
pass
def get_finished(
self, finished_req_ids: set[str]
) -> tuple[set[str] | None, set[str] | None]:
"""
Notifies worker-side connector ids of requests that have
finished generating tokens on the worker.
The scheduler process (via the Executors) will use this output
to track which workers are done.
Returns:
ids of requests that have finished asynchronous transfer
(requests that previously returned True from request_finished()),
tuple of (sending/saving ids, recving/loading ids).
The finished saves/sends req ids must belong to a set provided in a
call to this method (this call or a prior one).
"""
return None, None
# ==============================
# Scheduler-side methods
# ==============================
@abstractmethod
def has_cache_item(
self,
identifier: str,
) -> bool:
"""
Check if a single encoder cache exists
Args:
identifier (str): the identifier of the media.
Returns:
A bool where value is True if cache exist for
the media
"""
pass
@abstractmethod
def update_state_after_alloc(self, request: "Request", index: int):
"""
Update ECConnector state to decide allocate cache for requests
Args:
request (Request): the request object.
"""
pass
@abstractmethod
def build_connector_meta(
self, scheduler_output: SchedulerOutput
) -> ECConnectorMetadata:
"""
Build the connector metadata for this step.
This function should NOT modify fields in the scheduler_output.
Also, calling this function will reset the state of the connector.
Args:
scheduler_output (SchedulerOutput): the scheduler output object.
"""
pass
def update_connector_output(self, connector_output: ECConnectorOutput):
"""
Update ECConnector state from worker-side connectors output.
Args:
connector_output (ECConnectorOutput): the worker-side
connectors output.
"""
return
def request_finished(
self, request: "Request"
) -> tuple[bool, dict[str, Any] | None]:
"""
Called when a request has finished, before its encoder cache is freed.
Returns:
True if the request is being saved/sent asynchronously and cached
should not be freed until the request_id is returned from
get_finished().
"""
return False, None
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/distributed/ec_transfer/ec_connector/base.py",
"license": "Apache License 2.0",
"lines": 201,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/distributed/ec_transfer/ec_connector/factory.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import importlib
from collections.abc import Callable
from typing import TYPE_CHECKING
from vllm.distributed.ec_transfer.ec_connector.base import (
ECConnectorBase,
ECConnectorRole,
)
from vllm.logger import init_logger
if TYPE_CHECKING:
from vllm.config import ECTransferConfig, VllmConfig
logger = init_logger(__name__)
class ECConnectorFactory:
_registry: dict[str, Callable[[], type[ECConnectorBase]]] = {}
@classmethod
def register_connector(cls, name: str, module_path: str, class_name: str) -> None:
"""Register a connector with a lazy-loading module and class name."""
if name in cls._registry:
raise ValueError(f"Connector '{name}' is already registered.")
def loader() -> type[ECConnectorBase]:
module = importlib.import_module(module_path)
return getattr(module, class_name)
cls._registry[name] = loader
@classmethod
def create_connector(
cls,
config: "VllmConfig",
role: ECConnectorRole,
) -> ECConnectorBase:
ec_transfer_config = config.ec_transfer_config
if ec_transfer_config is None:
raise ValueError("ec_transfer_config must be set to create a connector")
connector_cls = cls.get_connector_class(ec_transfer_config)
logger.info(
"Creating connector with name: %s and engine_id: %s",
connector_cls.__name__,
ec_transfer_config.engine_id,
)
# Connector is explicitly separated into two roles.
# Scheduler connector:
# - Co-locate with scheduler process
# - Should only be used inside the Scheduler class
# Worker connector:
# - Co-locate with worker process
return connector_cls(config, role)
@classmethod
def get_connector_class(
cls, ec_transfer_config: "ECTransferConfig"
) -> type[ECConnectorBase]:
"""Get the connector class by name."""
connector_name = ec_transfer_config.ec_connector
if connector_name is None:
raise ValueError("EC connect must not be None")
elif connector_name in cls._registry:
connector_cls = cls._registry[connector_name]()
else:
connector_module_path = ec_transfer_config.ec_connector_module_path
if connector_module_path is None:
raise ValueError(f"Unsupported connector type: {connector_name}")
connector_module = importlib.import_module(connector_module_path)
connector_cls = getattr(connector_module, connector_name)
return connector_cls
# Register various connectors here.
# The registration should not be done in each individual file, as we want to
# only load the files corresponding to the current connector.
ECConnectorFactory.register_connector(
"ECExampleConnector",
"vllm.distributed.ec_transfer.ec_connector.example_connector",
"ECExampleConnector",
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/distributed/ec_transfer/ec_connector/factory.py",
"license": "Apache License 2.0",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/distributed/ec_transfer/ec_transfer_state.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from typing import TYPE_CHECKING
from vllm.distributed.ec_transfer.ec_connector.base import (
ECConnectorBase,
ECConnectorRole,
)
from vllm.distributed.ec_transfer.ec_connector.factory import ECConnectorFactory
if TYPE_CHECKING:
from vllm.config import VllmConfig
_EC_CONNECTOR_AGENT: ECConnectorBase | None = None
def get_ec_transfer() -> ECConnectorBase:
assert _EC_CONNECTOR_AGENT is not None, "disaggregated EC cache is not initialized"
return _EC_CONNECTOR_AGENT
def has_ec_transfer() -> bool:
return _EC_CONNECTOR_AGENT is not None
def ensure_ec_transfer_initialized(vllm_config: "VllmConfig") -> None:
"""
Initialize EC cache connector.
"""
global _EC_CONNECTOR_AGENT
if vllm_config.ec_transfer_config is None:
return
if (
vllm_config.ec_transfer_config.is_ec_transfer_instance
and _EC_CONNECTOR_AGENT is None
):
_EC_CONNECTOR_AGENT = ECConnectorFactory.create_connector(
config=vllm_config, role=ECConnectorRole.WORKER
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/distributed/ec_transfer/ec_transfer_state.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/v1/worker/ec_connector_model_runner_mixin.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Define EC connector functionality mixin for model runners.
"""
from collections.abc import Generator
from contextlib import AbstractContextManager, contextmanager, nullcontext
from typing import TYPE_CHECKING
import torch
from vllm.distributed.ec_transfer import get_ec_transfer, has_ec_transfer
from vllm.distributed.ec_transfer.ec_connector.base import ECConnectorBase
from vllm.logger import init_logger
from vllm.v1.outputs import ECConnectorOutput
if TYPE_CHECKING:
from vllm.v1.core.sched.output import SchedulerOutput
logger = init_logger(__name__)
# Defined as a EC connector functionality mixin for ModelRunner (GPU, TPU)
class ECConnectorModelRunnerMixin:
@staticmethod
def maybe_save_ec_to_connector(
encoder_cache: dict[str, torch.Tensor],
mm_hash: str,
):
if not has_ec_transfer():
logger.debug("Not have ec transfer please check")
return
connector = get_ec_transfer()
connector.save_caches(encoder_cache=encoder_cache, mm_hash=mm_hash)
@staticmethod
def get_finished_ec_transfers(
scheduler_output: "SchedulerOutput",
) -> tuple[set[str] | None, set[str] | None]:
if has_ec_transfer():
return get_ec_transfer().get_finished(scheduler_output.finished_req_ids)
return None, None
@staticmethod
def maybe_get_ec_connector_output(
scheduler_output: "SchedulerOutput",
encoder_cache: dict[str, torch.Tensor],
**kwargs,
) -> AbstractContextManager[ECConnectorOutput | None]:
return (
ECConnectorModelRunnerMixin._get_ec_connector_output(
scheduler_output, encoder_cache, **kwargs
)
if has_ec_transfer()
else nullcontext()
)
# This context manager must be used within an active forward context.
# It encapsulates the entire EC connector lifecycle within execute_model
@staticmethod
@contextmanager
def _get_ec_connector_output(
scheduler_output: "SchedulerOutput",
encoder_cache: dict[str, torch.Tensor],
**kwargs,
) -> Generator[ECConnectorOutput, None, None]:
output = ECConnectorOutput()
ec_connector = get_ec_transfer()
assert isinstance(ec_connector, ECConnectorBase)
assert scheduler_output.ec_connector_metadata is not None
ec_connector.bind_connector_metadata(scheduler_output.ec_connector_metadata)
# Load caches for consumer or both roles
if ec_connector.is_consumer:
ec_connector.start_load_caches(encoder_cache, **kwargs)
try:
yield output
finally:
output.finished_sending, output.finished_recving = (
ec_connector.get_finished(scheduler_output.finished_req_ids)
)
ec_connector.clear_connector_metadata()
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/worker/ec_connector_model_runner_mixin.py",
"license": "Apache License 2.0",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/kernels/attention/test_cpu_attn.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import functools
import math
import pytest
import torch
from vllm.platforms import CpuArchEnum, current_platform
from vllm.utils.torch_utils import set_random_seed
from vllm.v1.attention.backends.cpu_attn import _get_attn_isa
if not current_platform.is_cpu():
pytest.skip("skipping CPU-only tests", allow_module_level=True)
from vllm._custom_ops import (
cpu_attention_with_kv_cache,
cpu_attn_get_scheduler_metadata,
cpu_attn_reshape_and_cache,
)
NUM_HEADS = [
(4, 4),
(8, 2),
(9, 3),
]
HEAD_SIZES = [96, 128]
HEAD_SIZES_VEC16 = [96, 80, 112, 128]
QTYPES = [torch.bfloat16, torch.half, torch.float32]
SLIDING_WINDOWS = [None, 256]
NUM_BLOCKS = [
1024,
]
SEQ_LENS = [ # (q_len, kv_len)
[(1, 213), (1, 1), (1, 312), (1, 7), (1, 7812)], # decode batch
[(2345, 2345), (5, 5), (3, 16), (134, 5131)], # prefill batch
[(992, 2456), (1, 1234), (98, 1145), (1, 4162), (2345, 2345)], # mixed batch
]
def get_attn_isa(
block_size: int | None = None,
dtype: torch.dtype | None = None,
):
if block_size and dtype:
return _get_attn_isa(dtype, block_size)
else:
if current_platform.get_cpu_architecture() == CpuArchEnum.ARM:
return "neon"
elif torch._C._cpu._is_amx_tile_supported():
return "amx"
else:
return "vec"
# rand number generation takes too much time, cache rand tensors
@functools.lru_cache(maxsize=128, typed=False)
def tensor_cache(
elem_num: int,
dtype: torch.dtype,
) -> torch.Tensor:
tensor = torch.randn(elem_num, dtype=dtype)
return tensor
def _get_alibi_slopes(total_num_heads: int) -> torch.Tensor:
closest_power_of_2 = 2 ** math.floor(math.log2(total_num_heads))
base = torch.tensor(
2 ** (-(2 ** -(math.log2(closest_power_of_2) - 3))),
dtype=torch.float32,
)
powers = torch.arange(1, 1 + closest_power_of_2, dtype=torch.int32)
slopes = torch.pow(base, powers)
if closest_power_of_2 != total_num_heads:
extra_base = torch.tensor(
2 ** (-(2 ** -(math.log2(2 * closest_power_of_2) - 3))),
dtype=torch.float32,
)
num_remaining_heads = min(
closest_power_of_2, total_num_heads - closest_power_of_2
)
extra_powers = torch.arange(
start=1, end=1 + 2 * num_remaining_heads, step=2, dtype=torch.int32
)
slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0)
return slopes.float()
def ref_paged_attn(
query: torch.Tensor,
key_cache: torch.Tensor,
value_cache: torch.Tensor,
query_lens: list[int],
kv_lens: list[int],
block_tables: torch.Tensor,
scale: float,
sliding_window: int | None = None,
soft_cap: float | None = None,
alibi_slopes: torch.Tensor | None = None,
s_aux: torch.Tensor | None = None,
) -> torch.Tensor:
num_seqs = len(query_lens)
block_tables = block_tables.cpu().numpy()
_, block_size, num_kv_heads, head_size = key_cache.shape
dtype = query.dtype
outputs: list[torch.Tensor] = []
start_idx = 0
if alibi_slopes is not None:
alibi_slopes = alibi_slopes[:, None, None]
if s_aux is not None:
s_aux = s_aux.float()
s_aux = s_aux[:, None, None]
for i in range(num_seqs):
query_len = query_lens[i]
kv_len = kv_lens[i]
q = query[start_idx : start_idx + query_len].float()
q *= scale
num_kv_blocks = (kv_len + block_size - 1) // block_size
block_indices = block_tables[i, :num_kv_blocks]
k = key_cache[block_indices].view(-1, num_kv_heads, head_size)
k = k[:kv_len].float()
v = value_cache[block_indices].view(-1, num_kv_heads, head_size)
v = v[:kv_len].float()
if q.shape[1] != k.shape[1]:
k = torch.repeat_interleave(k, q.shape[1] // k.shape[1], dim=1)
v = torch.repeat_interleave(v, q.shape[1] // v.shape[1], dim=1)
attn = torch.einsum("qhd,khd->hqk", q, k).float()
empty_mask = torch.ones(query_len, kv_len)
mask = torch.triu(empty_mask, diagonal=kv_len - query_len + 1).bool()
if sliding_window is not None:
sliding_window_mask = (
torch.triu(
empty_mask, diagonal=kv_len - (query_len + sliding_window) + 1
)
.bool()
.logical_not()
)
mask |= sliding_window_mask
if soft_cap is not None:
attn = soft_cap * torch.tanh(attn / soft_cap)
if alibi_slopes is not None:
q_start_pos = kv_len - query_len
q_pos = q_start_pos + torch.arange(0, query_len)[None, :, None]
kv_pos = torch.arange(0, kv_len)[None, None, :]
dist = q_pos - kv_pos
alibi_bias = -alibi_slopes * dist
attn += alibi_bias
attn.masked_fill_(mask, float("-inf"))
if s_aux is not None:
s_aux_ext = s_aux.repeat(1, query_len, 1)
attn = torch.cat((s_aux_ext, attn), dim=-1)
attn = torch.softmax(attn, dim=-1)
if s_aux is not None:
attn = attn[:, :, 1:]
out = torch.einsum("hqk,khd->qhd", attn, v).to(dtype=dtype)
outputs.append(out)
start_idx += query_len
return torch.cat(outputs, dim=0)
@torch.inference_mode()
def varlen_with_paged_kv(
seq_lens: list[tuple[int, int]],
num_heads: tuple[int, int],
head_size: int,
sliding_window: int | None,
dtype: torch.dtype,
block_size: int,
soft_cap: float | None,
num_blocks: int,
use_alibi: bool,
use_sink: bool,
isa: str,
) -> None:
set_random_seed(0)
num_seqs = len(seq_lens)
query_lens = [x[0] for x in seq_lens]
kv_lens = [x[1] for x in seq_lens]
num_query_heads = num_heads[0]
num_kv_heads = num_heads[1]
assert num_query_heads % num_kv_heads == 0
max_kv_len = max(kv_lens)
window_size = (sliding_window - 1, 0) if sliding_window is not None else (-1, -1)
scale = head_size**-0.5
token_num = sum(query_lens)
# for n heads the set of slopes is the geometric sequence that starts
# 2^(-8/n)
alibi_slopes = _get_alibi_slopes(num_query_heads) if use_alibi else None
s_aux = (
15 * torch.rand((num_query_heads,), dtype=torch.bfloat16) if use_sink else None
)
query = tensor_cache(
elem_num=token_num * num_query_heads * head_size,
dtype=dtype,
)
query = query.view(
token_num,
num_query_heads,
head_size,
)
key_value = tensor_cache(
elem_num=2 * num_blocks * num_kv_heads * block_size * head_size,
dtype=dtype,
)
key_value = key_value.view(
2,
num_blocks,
block_size,
num_kv_heads,
head_size,
)
key_cache, value_cache = key_value.unbind(0)
# KV cache for CPU attention
packed_key_cache = torch.empty(
num_blocks, num_kv_heads, block_size, head_size, dtype=dtype
)
packed_value_cache = torch.empty_like(packed_key_cache)
cu_query_lens = torch.tensor([0] + query_lens, dtype=torch.int32).cumsum(
dim=0, dtype=torch.int32
)
kv_lens_tensor = torch.tensor(kv_lens, dtype=torch.int32)
max_num_blocks_per_seq = (max_kv_len + block_size - 1) // block_size
block_tables = torch.randint(
0, num_blocks, (num_seqs, max_num_blocks_per_seq), dtype=torch.int32
)
# use reshape_and_cache to pack key_cache and value_cache
slot_mapping = torch.arange(0, num_blocks * block_size, dtype=torch.int64)
cpu_attn_reshape_and_cache(
key=key_cache.view(-1, num_kv_heads, head_size),
value=value_cache.view(-1, num_kv_heads, head_size),
key_cache=packed_key_cache,
value_cache=packed_value_cache,
slot_mapping=slot_mapping,
isa=isa,
)
metadata = cpu_attn_get_scheduler_metadata(
num_reqs=num_seqs,
num_heads=num_query_heads,
num_kv_heads=num_kv_heads,
head_dim=head_size,
seq_lens=kv_lens_tensor,
dtype=dtype,
query_start_loc=cu_query_lens,
causal=True,
sliding_window_size=sliding_window if sliding_window is not None else -1,
isa=isa,
enable_kv_split=False,
)
out_without_split = torch.empty_like(query)
cpu_attention_with_kv_cache(
query=query,
key_cache=packed_key_cache,
value_cache=packed_value_cache,
output=out_without_split,
query_start_loc=cu_query_lens,
seq_lens=kv_lens_tensor,
scale=scale,
causal=True,
alibi_slopes=alibi_slopes,
sliding_window=window_size,
block_table=block_tables,
softcap=soft_cap if soft_cap is not None else 0,
scheduler_metadata=metadata,
s_aux=s_aux,
)
metadata = cpu_attn_get_scheduler_metadata(
num_reqs=num_seqs,
num_heads=num_query_heads,
num_kv_heads=num_kv_heads,
head_dim=head_size,
seq_lens=kv_lens_tensor,
dtype=dtype,
query_start_loc=cu_query_lens,
causal=True,
sliding_window_size=sliding_window if sliding_window is not None else -1,
isa=isa,
enable_kv_split=True,
)
out_with_split = torch.empty_like(query)
cpu_attention_with_kv_cache(
query=query,
key_cache=packed_key_cache,
value_cache=packed_value_cache,
output=out_with_split,
query_start_loc=cu_query_lens,
seq_lens=kv_lens_tensor,
scale=scale,
causal=True,
alibi_slopes=alibi_slopes,
sliding_window=window_size,
block_table=block_tables,
softcap=soft_cap if soft_cap is not None else 0,
scheduler_metadata=metadata,
s_aux=s_aux,
)
ref_output = ref_paged_attn(
query=query,
key_cache=key_cache,
value_cache=value_cache,
query_lens=query_lens,
kv_lens=kv_lens,
block_tables=block_tables,
scale=scale,
sliding_window=sliding_window,
soft_cap=soft_cap,
alibi_slopes=alibi_slopes,
s_aux=s_aux,
)
atol, rtol = 1.5e-2, 1e-2
(
torch.testing.assert_close(out_with_split, ref_output, atol=atol, rtol=rtol),
f"{torch.max(torch.abs(out_with_split - ref_output))}",
)
(
torch.testing.assert_close(out_without_split, ref_output, atol=atol, rtol=rtol),
f"{torch.max(torch.abs(out_without_split - ref_output))}",
)
@pytest.mark.parametrize("seq_lens", SEQ_LENS)
@pytest.mark.parametrize("num_heads", NUM_HEADS)
@pytest.mark.parametrize("head_size", HEAD_SIZES)
@pytest.mark.parametrize("block_size", [96, 128])
@pytest.mark.parametrize("sliding_window", SLIDING_WINDOWS)
@pytest.mark.parametrize("dtype", QTYPES)
@pytest.mark.parametrize("soft_cap", [None])
@pytest.mark.parametrize("num_blocks", NUM_BLOCKS)
@pytest.mark.parametrize("use_alibi", [False])
@pytest.mark.parametrize("use_sink", [False])
@pytest.mark.parametrize("isa", ["vec"])
def test_varlen_with_paged_kv_normal_vec(
seq_lens: list[tuple[int, int]],
num_heads: tuple[int, int],
head_size: int,
sliding_window: int | None,
dtype: torch.dtype,
block_size: int,
soft_cap: float | None,
num_blocks: int,
use_alibi: bool,
use_sink: bool,
isa: str,
) -> None:
varlen_with_paged_kv(
seq_lens=seq_lens,
num_heads=num_heads,
head_size=head_size,
sliding_window=sliding_window,
dtype=dtype,
block_size=block_size,
soft_cap=soft_cap,
num_blocks=num_blocks,
use_alibi=use_alibi,
use_sink=use_sink,
isa=isa,
)
@pytest.mark.parametrize("seq_lens", SEQ_LENS)
@pytest.mark.parametrize("num_heads", NUM_HEADS)
@pytest.mark.parametrize("head_size", HEAD_SIZES)
@pytest.mark.parametrize("block_size", [96, 128])
@pytest.mark.parametrize("sliding_window", SLIDING_WINDOWS)
@pytest.mark.parametrize("dtype", [torch.bfloat16])
@pytest.mark.parametrize("soft_cap", [None])
@pytest.mark.parametrize("num_blocks", NUM_BLOCKS)
@pytest.mark.parametrize("use_alibi", [False])
@pytest.mark.parametrize("use_sink", [False])
@pytest.mark.parametrize("isa", ["amx"])
@pytest.mark.skipif(
not torch._C._cpu._is_amx_tile_supported(), reason="no AMX support."
)
def test_varlen_with_paged_kv_normal_amx(
seq_lens: list[tuple[int, int]],
num_heads: tuple[int, int],
head_size: int,
sliding_window: int | None,
dtype: torch.dtype,
block_size: int,
soft_cap: float | None,
num_blocks: int,
use_alibi: bool,
use_sink: bool,
isa: str,
) -> None:
varlen_with_paged_kv(
seq_lens=seq_lens,
num_heads=num_heads,
head_size=head_size,
sliding_window=sliding_window,
dtype=dtype,
block_size=block_size,
soft_cap=soft_cap,
num_blocks=num_blocks,
use_alibi=use_alibi,
use_sink=use_sink,
isa=isa,
)
@pytest.mark.parametrize("seq_lens", SEQ_LENS)
@pytest.mark.parametrize("num_heads", NUM_HEADS)
@pytest.mark.parametrize("head_size", HEAD_SIZES_VEC16)
@pytest.mark.parametrize("block_size", [48])
@pytest.mark.parametrize("sliding_window", SLIDING_WINDOWS)
@pytest.mark.parametrize("dtype", [torch.bfloat16])
@pytest.mark.parametrize("soft_cap", [None])
@pytest.mark.parametrize("num_blocks", NUM_BLOCKS)
@pytest.mark.parametrize("use_alibi", [False])
@pytest.mark.parametrize("use_sink", [False])
@pytest.mark.parametrize("isa", ["vec16"])
def test_varlen_with_paged_kv_normal_vec16(
seq_lens: list[tuple[int, int]],
num_heads: tuple[int, int],
head_size: int,
sliding_window: int | None,
dtype: torch.dtype,
block_size: int,
soft_cap: float | None,
num_blocks: int,
use_alibi: bool,
use_sink: bool,
isa: str,
) -> None:
varlen_with_paged_kv(
seq_lens=seq_lens,
num_heads=num_heads,
head_size=head_size,
sliding_window=sliding_window,
dtype=dtype,
block_size=block_size,
soft_cap=soft_cap,
num_blocks=num_blocks,
use_alibi=use_alibi,
use_sink=use_sink,
isa=isa,
)
@pytest.mark.parametrize("seq_lens", SEQ_LENS)
@pytest.mark.parametrize("num_heads", NUM_HEADS)
@pytest.mark.parametrize("head_size", HEAD_SIZES)
@pytest.mark.parametrize("block_size", [96, 128])
@pytest.mark.parametrize("sliding_window", SLIDING_WINDOWS)
@pytest.mark.parametrize("dtype", QTYPES)
@pytest.mark.parametrize("soft_cap", [None])
@pytest.mark.parametrize("num_blocks", NUM_BLOCKS)
@pytest.mark.parametrize("use_alibi", [False])
@pytest.mark.parametrize("use_sink", [False])
@pytest.mark.parametrize("isa", ["neon"])
@pytest.mark.skipif(
current_platform.get_cpu_architecture() != CpuArchEnum.ARM,
reason="Not an Arm CPU.",
)
def test_varlen_with_paged_kv_normal_neon(
seq_lens: list[tuple[int, int]],
num_heads: tuple[int, int],
head_size: int,
sliding_window: int | None,
dtype: torch.dtype,
block_size: int,
soft_cap: float | None,
num_blocks: int,
use_alibi: bool,
use_sink: bool,
isa: str,
) -> None:
varlen_with_paged_kv(
seq_lens=seq_lens,
num_heads=num_heads,
head_size=head_size,
sliding_window=sliding_window,
dtype=dtype,
block_size=block_size,
soft_cap=soft_cap,
num_blocks=num_blocks,
use_alibi=use_alibi,
use_sink=use_sink,
isa=isa,
)
@pytest.mark.parametrize("seq_lens", SEQ_LENS)
@pytest.mark.parametrize("num_heads", NUM_HEADS)
@pytest.mark.parametrize("head_size", [96])
@pytest.mark.parametrize("block_size", [128])
@pytest.mark.parametrize("sliding_window", SLIDING_WINDOWS)
@pytest.mark.parametrize("dtype", [torch.bfloat16])
@pytest.mark.parametrize("soft_cap", [50])
@pytest.mark.parametrize("num_blocks", NUM_BLOCKS)
@pytest.mark.parametrize("use_alibi", [False])
@pytest.mark.parametrize("use_sink", [False])
@pytest.mark.parametrize("isa", [get_attn_isa()])
def test_varlen_with_paged_kv_softcap(
seq_lens: list[tuple[int, int]],
num_heads: tuple[int, int],
head_size: int,
sliding_window: int | None,
dtype: torch.dtype,
block_size: int,
soft_cap: float | None,
num_blocks: int,
use_alibi: bool,
use_sink: bool,
isa: str,
) -> None:
varlen_with_paged_kv(
seq_lens=seq_lens,
num_heads=num_heads,
head_size=head_size,
sliding_window=sliding_window,
dtype=dtype,
block_size=block_size,
soft_cap=soft_cap,
num_blocks=num_blocks,
use_alibi=use_alibi,
use_sink=use_sink,
isa=isa,
)
@pytest.mark.parametrize("seq_lens", SEQ_LENS)
@pytest.mark.parametrize("num_heads", NUM_HEADS)
@pytest.mark.parametrize("head_size", [96])
@pytest.mark.parametrize("block_size", [128])
@pytest.mark.parametrize("sliding_window", SLIDING_WINDOWS)
@pytest.mark.parametrize("dtype", [torch.bfloat16])
@pytest.mark.parametrize("soft_cap", [None])
@pytest.mark.parametrize("num_blocks", NUM_BLOCKS)
@pytest.mark.parametrize("use_alibi", [True])
@pytest.mark.parametrize("use_sink", [False])
@pytest.mark.parametrize("isa", [get_attn_isa()])
def test_varlen_with_paged_kv_alibi(
seq_lens: list[tuple[int, int]],
num_heads: tuple[int, int],
head_size: int,
sliding_window: int | None,
dtype: torch.dtype,
block_size: int,
soft_cap: float | None,
num_blocks: int,
use_alibi: bool,
use_sink: bool,
isa: str,
) -> None:
varlen_with_paged_kv(
seq_lens=seq_lens,
num_heads=num_heads,
head_size=head_size,
sliding_window=sliding_window,
dtype=dtype,
block_size=block_size,
soft_cap=soft_cap,
num_blocks=num_blocks,
use_alibi=use_alibi,
use_sink=use_sink,
isa=isa,
)
@pytest.mark.parametrize("seq_lens", SEQ_LENS)
@pytest.mark.parametrize("num_heads", NUM_HEADS)
@pytest.mark.parametrize("head_size", [96])
@pytest.mark.parametrize("block_size", [128])
@pytest.mark.parametrize("sliding_window", SLIDING_WINDOWS)
@pytest.mark.parametrize("dtype", [torch.bfloat16])
@pytest.mark.parametrize("soft_cap", [None])
@pytest.mark.parametrize("num_blocks", NUM_BLOCKS)
@pytest.mark.parametrize("use_alibi", [False])
@pytest.mark.parametrize("use_sink", [True])
@pytest.mark.parametrize("isa", [get_attn_isa()])
def test_varlen_with_paged_kv_sink(
seq_lens: list[tuple[int, int]],
num_heads: tuple[int, int],
head_size: int,
sliding_window: int | None,
dtype: torch.dtype,
block_size: int,
soft_cap: float | None,
num_blocks: int,
use_alibi: bool,
use_sink: bool,
isa: str,
) -> None:
varlen_with_paged_kv(
seq_lens=seq_lens,
num_heads=num_heads,
head_size=head_size,
sliding_window=sliding_window,
dtype=dtype,
block_size=block_size,
soft_cap=soft_cap,
num_blocks=num_blocks,
use_alibi=use_alibi,
use_sink=use_sink,
isa=isa,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/kernels/attention/test_cpu_attn.py",
"license": "Apache License 2.0",
"lines": 568,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/quantization/test_mixed_precision.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Test quark-quantized {MXFP4, FP8} mixed precision models.
Run `pytest tests/quantization/test_mixed_precision.py`.
"""
import importlib
import importlib.metadata
from dataclasses import dataclass
import lm_eval
import pytest
from packaging import version
QUARK_MXFP4_AVAILABLE = importlib.util.find_spec("quark") is not None and version.parse(
importlib.metadata.version("amd-quark")
) >= version.parse("0.8.99")
@dataclass
class ModelCase:
model_id: str
tp: int
@dataclass
class EvaluationConfig:
model_name: str
def get_model_args(self) -> str:
return (
f"pretrained={self.model_name},"
"tensor_parallel_size=4,dtype=auto,gpu_memory_utilization=0.8,trust_remote_code=False"
)
TEST_CONFIGS = {
# Mixed-precision (AMP) model
# - Demonstrates end-to-end pipeline functionality
"amd/Qwen3-8B-WMXFP4FP8-AMXFP4FP8-AMP-KVFP8": {"arc_challenge": 0.52, "mmlu": 0.72},
# Non-mixed-precision (PTQ) model
# - Reference for pipeline compatibility verification -> No conflicts or breakings
"amd/Llama-2-70b-chat-hf-FP8-MLPerf-fp8_attn_quark_format": {
"arc_challenge": 0.53,
"mmlu": 0.61,
},
}
@pytest.mark.parametrize("model_name, accuracy_numbers", TEST_CONFIGS.items())
@pytest.mark.skipif(not QUARK_MXFP4_AVAILABLE, reason="amd-quark>=0.9 is not available")
def test_mixed_precision_model_accuracies(model_name: str, accuracy_numbers: dict):
results = lm_eval.simple_evaluate(
model="vllm",
model_args=EvaluationConfig(model_name).get_model_args(),
tasks=list(accuracy_numbers.keys()),
batch_size=8,
)
rtol = 0.05
for task, expect_accuracy in accuracy_numbers.items():
measured_accuracy = results["results"][task]["acc,none"]
assert (
measured_accuracy - rtol < expect_accuracy
and measured_accuracy + rtol > expect_accuracy
), f"Expected: {expect_accuracy} | Measured: {measured_accuracy}"
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/quantization/test_mixed_precision.py",
"license": "Apache License 2.0",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/kernels/core/test_fused_qk_norm_rope.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
import torch
from tests.kernels.utils import opcheck
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.rotary_embedding import RotaryEmbedding
from vllm.platforms import current_platform
from vllm.utils.torch_utils import set_random_seed
DTYPES = [torch.bfloat16, torch.float16]
IS_NEOX = [True, False]
EPS_VALUES = [1e-5, 1e-6]
SEEDS = [13]
PARTIAL_ROPE = [True, False]
CUDA_DEVICES = ["cuda:0"]
def _apply_qk_norm_rope(
qkv: torch.Tensor,
positions: torch.Tensor,
q_norm: RMSNorm,
k_norm: RMSNorm,
rope: RotaryEmbedding,
num_heads_q: int,
num_heads_kv: int,
head_dim: int,
) -> torch.Tensor:
q_size = num_heads_q * head_dim
kv_size = num_heads_kv * head_dim
q, k, v = qkv.split([q_size, kv_size, kv_size], dim=-1)
q_by_head = q.view(*q.shape[:-1], q.shape[-1] // head_dim, head_dim)
q_by_head = q_norm.forward_native(q_by_head)
q = q_by_head.view(q.shape)
k_by_head = k.view(*k.shape[:-1], k.shape[-1] // head_dim, head_dim)
k_by_head = k_norm.forward_native(k_by_head)
k = k_by_head.view(k.shape)
q, k = rope.forward_native(positions, q, k)
return torch.cat([q, k, v], dim=-1)
@pytest.mark.skipif(
not current_platform.is_cuda_alike(),
reason="fused_qk_norm_rope custom op requires cuda and rocm platform",
)
@pytest.mark.parametrize("device", CUDA_DEVICES)
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("is_neox", IS_NEOX)
@pytest.mark.parametrize("eps", EPS_VALUES)
@pytest.mark.parametrize("seed", SEEDS)
@pytest.mark.parametrize("rotary_ratio", [1.0, 0.5, 0.25])
@torch.inference_mode()
def test_fused_qk_norm_rope_matches_reference(
default_vllm_config,
device: str,
dtype: torch.dtype,
is_neox: bool,
eps: float,
seed: int,
rotary_ratio: float,
):
torch.set_default_device(device)
set_random_seed(seed)
num_heads, num_kv_heads, head_dim = 16, 4, 128
num_tokens = 4
total_dim = (num_heads + 2 * num_kv_heads) * head_dim
qkv_base = torch.randn(num_tokens, total_dim, dtype=dtype, device=device)
qkv_fused = qkv_base.clone()
positions = torch.arange(num_tokens, dtype=torch.long, device=device)
q_norm = RMSNorm(head_dim, eps=eps).to(device=device, dtype=dtype)
k_norm = RMSNorm(head_dim, eps=eps).to(device=device, dtype=dtype)
q_norm.weight.data.normal_(mean=1.0, std=0.1)
k_norm.weight.data.normal_(mean=1.0, std=0.1)
q_weight = q_norm.weight.data
k_weight = k_norm.weight.data
rotary_dim = int(head_dim * rotary_ratio)
rope = RotaryEmbedding(
head_size=head_dim,
rotary_dim=rotary_dim,
max_position_embeddings=4096,
base=10000.0,
is_neox_style=is_neox,
dtype=dtype,
).to(device)
ref_result = _apply_qk_norm_rope(
qkv=qkv_base,
positions=positions,
q_norm=q_norm,
k_norm=k_norm,
rope=rope,
num_heads_q=num_heads,
num_heads_kv=num_kv_heads,
head_dim=head_dim,
)
opcheck(
torch.ops._C.fused_qk_norm_rope,
(
qkv_fused.clone(),
num_heads,
num_kv_heads,
num_kv_heads,
head_dim,
eps,
q_weight,
k_weight,
rope.cos_sin_cache,
is_neox,
positions.view(-1),
),
)
torch.ops._C.fused_qk_norm_rope(
qkv_fused,
num_heads,
num_kv_heads,
num_kv_heads,
head_dim,
eps,
q_weight,
k_weight,
rope.cos_sin_cache,
is_neox,
positions.view(-1),
)
if dtype == torch.float16:
ATOL, RTOL = (2e-3, 2e-3)
else:
ATOL, RTOL = (1e-2, 1e-2)
torch.testing.assert_close(
qkv_fused,
ref_result,
atol=ATOL,
rtol=RTOL,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/kernels/core/test_fused_qk_norm_rope.py",
"license": "Apache License 2.0",
"lines": 128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_executor/layers/fused_moe/all2all_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from typing import Any
import torch
from vllm.distributed import (
get_ep_group,
)
from vllm.logger import init_logger
from vllm.model_executor.layers.fused_moe.config import (
FusedMoEConfig,
FusedMoEParallelConfig,
FusedMoEQuantConfig,
)
from vllm.model_executor.layers.fused_moe.flashinfer_a2a_prepare_finalize import (
FlashInferA2APrepareAndFinalize,
)
from vllm.model_executor.layers.fused_moe.modular_kernel import (
FusedMoEPrepareAndFinalize,
)
from vllm.model_executor.layers.fused_moe.prepare_finalize import (
MoEPrepareAndFinalizeNaiveEP,
MoEPrepareAndFinalizeNoEP,
)
from vllm.platforms import current_platform
from vllm.utils.import_utils import has_deep_ep, has_mori
logger = init_logger(__name__)
if current_platform.is_cuda_alike():
if has_deep_ep():
from .deepep_ht_prepare_finalize import DeepEPHTPrepareAndFinalize
from .deepep_ll_prepare_finalize import (
DEEPEP_QUANT_BLOCK_SHAPE,
DeepEPLLPrepareAndFinalize,
)
if has_mori():
from .mori_prepare_finalize import MoriPrepareAndFinalize
def maybe_roundup_layer_hidden_size(
hidden_size: int,
act_dtype: torch.dtype,
moe_parallel_config: FusedMoEParallelConfig,
) -> int:
"""
Given layer hidden size and MoE configurations, round up hidden_size
if necessary.
Args:
hidden_size: Layer hidden-size
act_dtype: Data type of the layer activations.
moe_parallel_config: Fused MoE parallelization strategy configuration.
Return:
Rounded up hidden_size if rounding up is required based on the configs
and all2all backend.
Original hidden size otherwise.
"""
if moe_parallel_config.use_deepep_ht_kernels:
hidden_size = DeepEPHTPrepareAndFinalize.maybe_roundup_layer_hidden_size(
hidden_size, act_dtype
)
if moe_parallel_config.use_deepep_ll_kernels:
hidden_size = DeepEPLLPrepareAndFinalize.maybe_roundup_layer_hidden_size(
hidden_size
)
return hidden_size
def maybe_make_prepare_finalize(
moe: FusedMoEConfig,
quant_config: FusedMoEQuantConfig | None,
routing_tables: tuple[torch.Tensor, torch.Tensor, torch.Tensor] | None = None,
allow_new_interface: bool = False,
) -> FusedMoEPrepareAndFinalize | None:
# NOTE(rob): we are migrating each quant_method to hold the MK
# in all cases. The allow_new_interface=False flag allow us to fall
# back to the old method for methods that have not yet been migrated.
#
# In old method:
# * maybe_init_modular_kernel() calls this function. If we are
# using no Dp/Ep or naive all2all, we return None this function
# returns None and no ModularKernelMethod is created. If non-naive
# all2all is used, this returns a PrepareAndFinalize object and
# a ModularKernelMethod is created.
# In new method:
# * maybe_make_prepare_finalize() is called from the oracle. We
# always return a PrepareAndFinalize object and the quant method
# holds the ModularKernel.
if not moe.moe_parallel_config.use_all2all_kernels:
if not allow_new_interface:
return None
# For DP/TP case, fall back to naive P/F.
if moe.moe_parallel_config.dp_size > 1:
logger.info_once(
"Detected DP deployment with no --enable-expert-parallel. "
"Falling back to AllGather+ReduceScatter dispatch/combine."
)
return MoEPrepareAndFinalizeNaiveEP(
is_sequence_parallel=moe.moe_parallel_config.is_sequence_parallel,
num_dispatchers=(
get_ep_group().device_communicator.all2all_manager.world_size
),
)
else:
return MoEPrepareAndFinalizeNoEP()
all2all_manager = get_ep_group().device_communicator.all2all_manager
assert all2all_manager is not None
prepare_finalize: FusedMoEPrepareAndFinalize | None = None
if moe.use_deepep_ht_kernels:
assert moe.dp_size == all2all_manager.dp_world_size
all_to_all_args: dict[str, Any] = dict()
handle = all2all_manager.get_handle(all_to_all_args)
prepare_finalize = DeepEPHTPrepareAndFinalize(
handle,
num_dispatchers=all2all_manager.world_size,
dp_size=all2all_manager.dp_world_size,
rank_expert_offset=all2all_manager.rank * moe.num_local_experts,
)
elif moe.use_deepep_ll_kernels:
assert quant_config is not None
global_to_physical = physical_to_global = local_expert_global_ids = None
if routing_tables is not None:
(
global_to_physical,
physical_to_global,
local_expert_global_ids,
) = routing_tables
all_to_all_args = dict(
max_num_tokens_per_dp_rank=moe.max_num_tokens,
token_hidden_size=moe.hidden_dim,
num_ep_ranks=all2all_manager.world_size,
num_global_experts=moe.num_experts,
num_local_experts=moe.num_experts // all2all_manager.world_size,
)
handle = all2all_manager.get_handle(all_to_all_args)
# Note: We may want to use FP8 dispatch just to reduce
# data movement.
use_fp8_dispatch = (
quant_config.quant_dtype == current_platform.fp8_dtype()
and quant_config.block_shape == DEEPEP_QUANT_BLOCK_SHAPE
)
prepare_finalize = DeepEPLLPrepareAndFinalize(
handle,
max_tokens_per_rank=moe.max_num_tokens,
num_dispatchers=all2all_manager.world_size,
use_fp8_dispatch=use_fp8_dispatch,
global_to_physical=global_to_physical,
physical_to_global=physical_to_global,
local_expert_global_ids=local_expert_global_ids,
)
elif moe.use_mori_kernels:
assert quant_config is not None
# Note: We may want to use FP8 dispatch just to reduce
# data movement.
use_fp8_dispatch = (
quant_config.is_per_act_token or quant_config.is_block_quantized
)
# For PTPC (per token per channel) quant, the scale dim for each token is 1
# For 1x128 quant, the scale dim for each token is hidden_dim // 128
scale_dim = 1 if quant_config.is_per_act_token else moe.hidden_dim // 128
all_to_all_args = dict(
rank=all2all_manager.rank,
num_ep_ranks=all2all_manager.world_size,
quant_dtype=quant_config.quant_dtype,
token_hidden_size=moe.hidden_dim,
scale_dim=scale_dim,
scale_type_size=torch.float32.itemsize,
max_num_tokens_per_dp_rank=moe.max_num_tokens,
input_dtype=moe.in_dtype,
num_local_experts=moe.num_experts // all2all_manager.world_size,
num_experts_per_token=moe.experts_per_token,
)
handle = all2all_manager.get_handle(all_to_all_args)
prepare_finalize = MoriPrepareAndFinalize(
handle,
max_tokens_per_rank=moe.max_num_tokens,
num_dispatchers=all2all_manager.world_size,
use_fp8_dispatch=use_fp8_dispatch,
)
elif moe.use_fi_all2allv_kernels:
assert quant_config is not None
prepare_finalize = FlashInferA2APrepareAndFinalize(
num_dispatchers=all2all_manager.world_size,
)
elif moe.use_naive_all2all_kernels and allow_new_interface:
prepare_finalize = MoEPrepareAndFinalizeNaiveEP(
is_sequence_parallel=(moe.moe_parallel_config.is_sequence_parallel),
num_dispatchers=all2all_manager.world_size,
)
return prepare_finalize
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/fused_moe/all2all_utils.py",
"license": "Apache License 2.0",
"lines": 183,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/fused_moe/fused_moe_method_base.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from abc import abstractmethod
import torch
import vllm.model_executor.layers.fused_moe.modular_kernel as mk
from vllm.logger import init_logger
from vllm.model_executor.layers.fused_moe.config import (
FusedMoEConfig,
FusedMoEQuantConfig,
)
from vllm.model_executor.layers.fused_moe.modular_kernel import (
FusedMoEPermuteExpertsUnpermute,
FusedMoEPrepareAndFinalize,
)
from vllm.model_executor.layers.quantization.base_config import (
QuantizeMethodBase,
)
logger = init_logger(__name__)
class FusedMoEMethodBase(QuantizeMethodBase):
def __init__(self, moe: FusedMoEConfig):
super().__init__()
self.moe: FusedMoEConfig = moe
self.moe_quant_config: FusedMoEQuantConfig | None = None
self.moe_mk: mk.FusedMoEModularKernel | None = None
@property
def supports_internal_mk(self) -> bool:
# NOTE(rob): temporary attribute to indicate support for
# completed migration to the new internal MK interface.
return self.moe_mk is not None
@property
def mk_owns_shared_expert(self) -> bool:
# NOTE(rob): temporary attribute to indicate support for
# completed migration to the new internal MK interface.
return self.moe_mk is not None and self.moe_mk.shared_experts is not None
@abstractmethod
def create_weights(
self,
layer: torch.nn.Module,
num_experts: int,
hidden_size: int,
intermediate_size_per_partition: int,
params_dtype: torch.dtype,
**extra_weight_attrs,
):
raise NotImplementedError
def uses_weight_scale_2_pattern(self) -> bool:
"""
Returns True if this quantization method uses 'weight_scale_2' pattern
for per-tensor weight scales (e.g., FP4 variants), False otherwise.
This method should be overridden by subclasses that use the
'weight_scale_2' pattern instead of the standard 'weight_scale' pattern.
"""
return False
def maybe_make_prepare_finalize(
self,
routing_tables: tuple[torch.Tensor, torch.Tensor, torch.Tensor] | None = None,
) -> FusedMoEPrepareAndFinalize | None:
from .all2all_utils import maybe_make_prepare_finalize
return maybe_make_prepare_finalize(
self.moe, self.moe_quant_config, routing_tables
)
def select_gemm_impl(
self,
prepare_finalize: FusedMoEPrepareAndFinalize,
layer: torch.nn.Module,
) -> FusedMoEPermuteExpertsUnpermute:
# based on the all2all implementation, select the appropriate
# gemm implementation
raise NotImplementedError(
f"{self.__class__.__name__} must select appropriate gemm "
"implementation based on the prepare_finalize"
)
def prepare_dp_allgather_tensor(
self,
layer: "FusedMoE", # type: ignore[name-defined] # noqa: F821
hidden_states: torch.Tensor,
router_logits: torch.Tensor,
) -> tuple[torch.Tensor, list[torch.Tensor]]:
"""Hook to prepare tensors and extra tensors for DP allgather + EP dispatch."""
raise NotImplementedError(
"Method 'prepare_dp_allgather_tensor' is not implemented in "
f"{self.__class__.__name__}."
)
@abstractmethod
def get_fused_moe_quant_config(
self, layer: torch.nn.Module
) -> FusedMoEQuantConfig | None:
raise NotImplementedError
@property
def topk_indices_dtype(self) -> torch.dtype | None:
if self.moe_mk is not None:
return self.moe_mk.prepare_finalize.topk_indices_dtype()
return None
@property
def supports_eplb(self) -> bool:
return False
@property
def method_name(self) -> str:
return self.__class__.__name__
@property
def is_monolithic(self) -> bool:
return False
def apply(
self,
layer: "FusedMoE", # type: ignore[name-defined] # noqa: F821
x: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
shared_experts_input: torch.Tensor | None,
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
raise NotImplementedError
def apply_monolithic(
self,
layer: "FusedMoE", # type: ignore[name-defined] # noqa: F821
x: torch.Tensor,
router_logits: torch.Tensor,
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
raise NotImplementedError
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/fused_moe/fused_moe_method_base.py",
"license": "Apache License 2.0",
"lines": 118,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/fused_moe/fused_moe_modular_method.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import torch
from vllm.logger import init_logger
from vllm.model_executor.custom_op import CustomOp
from vllm.model_executor.layers.fused_moe.config import (
FusedMoEQuantConfig,
)
from vllm.model_executor.layers.fused_moe.fused_moe_method_base import (
FusedMoEMethodBase,
)
from vllm.model_executor.layers.fused_moe.modular_kernel import (
FusedMoEModularKernel,
FusedMoEPrepareAndFinalize,
)
logger = init_logger(__name__)
# --8<-- [start:modular_fused_moe]
@CustomOp.register("modular_fused_moe")
class FusedMoEModularMethod(FusedMoEMethodBase, CustomOp):
# --8<-- [end:modular_fused_moe]
def __init__(
self, old_quant_method: FusedMoEMethodBase, experts: FusedMoEModularKernel
):
super().__init__(old_quant_method.moe)
self.moe_quant_config = old_quant_method.moe_quant_config
self.moe_mk = experts
self.disable_expert_map = getattr(
old_quant_method,
"disable_expert_map",
not self.moe_mk.supports_expert_map(),
)
self.old_quant_method = old_quant_method
logger.debug("Swapping out %s", self.old_quant_method.__class__.__name__)
@staticmethod
def make(
moe_layer: torch.nn.Module,
old_quant_method: FusedMoEMethodBase,
prepare_finalize: FusedMoEPrepareAndFinalize,
shared_experts: torch.nn.Module | None,
inplace: bool = False,
) -> "FusedMoEModularMethod":
return FusedMoEModularMethod(
old_quant_method,
FusedMoEModularKernel(
prepare_finalize,
old_quant_method.select_gemm_impl(prepare_finalize, moe_layer),
shared_experts,
moe_parallel_config=moe_layer.moe_parallel_config,
inplace=inplace,
),
)
@property
def supports_eplb(self) -> bool:
return self.old_quant_method.supports_eplb
@property
def method_name(self) -> str:
return self.old_quant_method.method_name
def create_weights(
self,
layer: torch.nn.Module,
num_experts: int,
hidden_size: int,
intermediate_size_per_partition: int,
params_dtype: torch.dtype,
**extra_weight_attrs,
):
raise NotImplementedError
def get_fused_moe_quant_config(
self, layer: torch.nn.Module
) -> FusedMoEQuantConfig | None:
return self.moe_quant_config
def apply(
self,
layer: "FusedMoE", # type: ignore[name-defined] # noqa: F821
x: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
shared_experts_input: torch.Tensor | None,
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
assert self.moe_mk is not None
return self.moe_mk(
hidden_states=x,
w1=layer.w13_weight,
w2=layer.w2_weight,
topk_weights=topk_weights,
topk_ids=topk_ids,
activation=layer.activation,
global_num_experts=layer.global_num_experts,
apply_router_weight_on_input=layer.apply_router_weight_on_input,
expert_map=None if self.disable_expert_map else layer.expert_map,
shared_experts_input=shared_experts_input,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/fused_moe/fused_moe_modular_method.py",
"license": "Apache License 2.0",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/fused_moe/unquantized_fused_moe_method.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Callable
import torch
import torch.nn.functional as F
from torch.nn import Module
from torch.nn.parameter import Parameter
import vllm.envs as envs
import vllm.model_executor.layers.fused_moe.modular_kernel as mk
from vllm._aiter_ops import rocm_aiter_ops
from vllm.logger import init_logger
from vllm.model_executor.custom_op import CustomOp
from vllm.model_executor.layers.fused_moe.config import (
FUSED_MOE_UNQUANTIZED_CONFIG,
FusedMoEConfig,
FusedMoEQuantConfig,
biased_moe_quant_config,
)
from vllm.model_executor.layers.fused_moe.fused_moe_method_base import (
FusedMoEMethodBase,
)
from vllm.model_executor.layers.fused_moe.modular_kernel import (
FusedMoEActivationFormat,
FusedMoEPermuteExpertsUnpermute,
FusedMoEPrepareAndFinalize,
)
from vllm.model_executor.layers.fused_moe.oracle.unquantized import (
UnquantizedMoeBackend,
convert_to_unquantized_kernel_format,
make_unquantized_moe_kernel,
select_unquantized_moe_backend,
)
from vllm.model_executor.layers.quantization.utils.flashinfer_utils import (
convert_moe_weights_to_flashinfer_trtllm_block_layout,
)
from vllm.model_executor.utils import replace_parameter, set_weight_attrs
from vllm.platforms import current_platform
from vllm.platforms.interface import CpuArchEnum
if current_platform.is_cuda_alike() or current_platform.is_xpu():
from .fused_batched_moe import BatchedTritonExperts
from .fused_moe import TritonExperts
else:
TritonExperts = None # type: ignore
logger = init_logger(__name__)
# --8<-- [start:unquantized_fused_moe]
@CustomOp.register("unquantized_fused_moe")
class UnquantizedFusedMoEMethod(FusedMoEMethodBase, CustomOp):
"""MoE method without quantization."""
# --8<-- [end:unquantized_fused_moe]
def __init__(self, moe: FusedMoEConfig):
super().__init__(moe)
self.unquantized_backend = select_unquantized_moe_backend(
moe_config=self.moe,
use_ep=self.moe.moe_parallel_config.use_ep,
use_dp=self.moe.moe_parallel_config.dp_size > 1,
)
# AITER only supports gated activations (silu/gelu), so disable it
# for non-gated MoE (is_act_and_mul=False)
self.rocm_aiter_moe_enabled = (
rocm_aiter_ops.is_fused_moe_enabled() and moe.is_act_and_mul
)
self.kernel: mk.FusedMoEModularKernel | None = None
self._is_monolithic = (
current_platform.is_cpu()
or self.unquantized_backend == UnquantizedMoeBackend.FLASHINFER_TRTLLM
)
if self.is_monolithic:
self.apply_monolithic: Callable = self._select_monolithic()
def _select_monolithic(self) -> Callable:
"""Select the monolithic implementation based on platform."""
if current_platform.is_cpu():
return self.forward_monolithic_cpu
else:
return self.forward_monolithic_cuda
def forward_native(
self,
layer: "FusedMoE", # type: ignore[name-defined] # noqa: F821
x: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
shared_experts_input: torch.Tensor | None,
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
return self.forward_cuda(layer, x, topk_weights, topk_ids, shared_experts_input)
@property
def is_monolithic(self) -> bool:
return self._is_monolithic
@property
def supports_eplb(self) -> bool:
return True
def maybe_make_prepare_finalize(
self,
routing_tables: tuple[torch.Tensor, torch.Tensor, torch.Tensor] | None = None,
) -> FusedMoEPrepareAndFinalize | None:
if self.unquantized_backend == UnquantizedMoeBackend.AITER:
return None
else:
return super().maybe_make_prepare_finalize(routing_tables)
def select_gemm_impl(
self,
prepare_finalize: FusedMoEPrepareAndFinalize,
layer: torch.nn.Module,
) -> FusedMoEPermuteExpertsUnpermute:
assert self.moe_quant_config is not None
if (
prepare_finalize.activation_format
== FusedMoEActivationFormat.BatchedExperts
):
logger.debug("BatchedTritonExperts %s", self.moe)
return BatchedTritonExperts(
moe_config=self.moe,
quant_config=self.moe_quant_config,
max_num_tokens=self.moe.max_num_tokens,
num_dispatchers=prepare_finalize.num_dispatchers(),
)
else:
logger.debug("TritonExperts %s", self.moe)
return TritonExperts(
moe_config=self.moe,
quant_config=self.moe_quant_config,
)
def create_weights(
self,
layer: torch.nn.Module,
num_experts: int,
hidden_size: int,
intermediate_size_per_partition: int,
params_dtype: torch.dtype,
**extra_weight_attrs,
):
if self.moe.is_act_and_mul:
w13_up_dim = 2 * intermediate_size_per_partition
else:
w13_up_dim = intermediate_size_per_partition
# Fused gate_up_proj (column parallel)
w13_weight = torch.nn.Parameter(
torch.empty(
num_experts,
w13_up_dim,
hidden_size,
dtype=params_dtype,
),
requires_grad=False,
)
layer.register_parameter("w13_weight", w13_weight)
set_weight_attrs(w13_weight, extra_weight_attrs)
if self.moe.has_bias:
w13_bias = torch.nn.Parameter(
torch.zeros(num_experts, w13_up_dim, dtype=params_dtype),
requires_grad=False,
)
layer.register_parameter("w13_bias", w13_bias)
set_weight_attrs(w13_bias, extra_weight_attrs)
# down_proj (row parallel)
w2_weight = torch.nn.Parameter(
torch.empty(
num_experts,
hidden_size,
intermediate_size_per_partition,
dtype=params_dtype,
),
requires_grad=False,
)
layer.register_parameter("w2_weight", w2_weight)
set_weight_attrs(w2_weight, extra_weight_attrs)
if self.moe.has_bias:
w2_bias = torch.nn.Parameter(
torch.zeros(num_experts, hidden_size, dtype=params_dtype),
requires_grad=False,
)
layer.register_parameter("w2_bias", w2_bias)
set_weight_attrs(w2_bias, extra_weight_attrs)
def _maybe_pad_weight(self, weight: torch.Tensor) -> torch.Tensor:
# Pad the weight tensor. This is an optimization on ROCm platform, which
# can benefit from tensors located far enough from one another in memory
if (
envs.VLLM_ROCM_MOE_PADDING
and current_platform.is_rocm()
and weight.stride(-1) == 1
and (weight.stride(-2) * weight.element_size()) % 512 == 0
):
num_pad = 256 // weight.element_size()
weight = F.pad(weight, (0, num_pad), "constant", 0)[..., :-num_pad]
torch.cuda.empty_cache()
return weight
def _setup_kernel(
self,
layer: Module,
w13: torch.Tensor,
w2: torch.Tensor,
) -> None:
# Shuffle weights to runtime format.
w13, w2 = convert_to_unquantized_kernel_format(
self.unquantized_backend,
layer=layer,
w13_weight=w13,
w2_weight=w2,
)
replace_parameter(layer, "w13_weight", w13)
replace_parameter(layer, "w2_weight", w2)
# Setup Modular Kernel for TP Case
self.moe_quant_config = self.get_fused_moe_quant_config(layer)
assert self.moe_quant_config is not None
self.kernel = make_unquantized_moe_kernel(
backend=self.unquantized_backend,
quant_config=self.moe_quant_config,
moe_config=self.moe,
)
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
super().process_weights_after_loading(layer)
# Padding the weight for better performance on ROCm
layer.w13_weight.data = self._maybe_pad_weight(layer.w13_weight.data)
layer.w2_weight.data = self._maybe_pad_weight(layer.w2_weight.data)
if self.unquantized_backend == UnquantizedMoeBackend.FLASHINFER_TRTLLM:
_cache_permute_indices: dict[torch.Size, torch.Tensor] = {}
# Swap halves to arrange as [w3; w1] (kernel expectation)
w1_w, w3_w = torch.chunk(layer.w13_weight.data, 2, dim=1)
w13_weight_swapped = torch.cat([w3_w, w1_w], dim=1)
layer.w13_weight.data = w13_weight_swapped.contiguous()
w13_weights_shuffled, w2_weights_shuffled = (
convert_moe_weights_to_flashinfer_trtllm_block_layout(
_cache_permute_indices,
layer.w13_weight.data,
layer.w2_weight.data,
)
)
layer.w13_weight = Parameter(w13_weights_shuffled, requires_grad=False)
layer.w2_weight = Parameter(w2_weights_shuffled, requires_grad=False)
elif self.unquantized_backend == UnquantizedMoeBackend.CPU:
from vllm.model_executor.layers.fused_moe import cpu_fused_moe
if current_platform.get_cpu_architecture() == CpuArchEnum.X86:
from vllm.model_executor.layers.utils import check_cpu_sgl_kernel
dtype_w13 = layer.w13_weight.dtype
_, n_w13, k_w13 = layer.w13_weight.size()
dtype_w2 = layer.w2_weight.dtype
_, n_w2, k_w2 = layer.w2_weight.size()
if (
envs.VLLM_CPU_SGL_KERNEL
and check_cpu_sgl_kernel(n_w13, k_w13, dtype_w13)
and check_cpu_sgl_kernel(n_w2, k_w2, dtype_w2)
):
packed_w13_weight = torch.ops._C.convert_weight_packed(
layer.w13_weight
)
assert packed_w13_weight.size() == layer.w13_weight.size()
layer.w13_weight.copy_(packed_w13_weight)
del packed_w13_weight
packed_w2_weight = torch.ops._C.convert_weight_packed(
layer.w2_weight
)
assert packed_w2_weight.size() == layer.w2_weight.size()
layer.w2_weight.copy_(packed_w2_weight)
self.cpu_fused_moe: Callable = cpu_fused_moe.SGLFusedMOE(layer)
else:
self.cpu_fused_moe = cpu_fused_moe.CPUFusedMOE(layer)
else:
self.cpu_fused_moe = cpu_fused_moe.CPUFusedMOE(layer)
elif current_platform.is_cuda_alike() or current_platform.is_xpu():
self._setup_kernel(
layer=layer,
w13=layer.w13_weight,
w2=layer.w2_weight,
)
def apply(
self,
layer: "FusedMoE", # type: ignore[name-defined] # noqa: F821
x: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
shared_experts_input: torch.Tensor | None,
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
return self.forward(
layer=layer,
x=x,
topk_weights=topk_weights,
topk_ids=topk_ids,
shared_experts_input=shared_experts_input,
)
def get_fused_moe_quant_config(self, layer: torch.nn.Module) -> FusedMoEQuantConfig:
if self.moe.has_bias:
return biased_moe_quant_config(
layer.w13_bias,
layer.w2_bias,
)
else:
return FUSED_MOE_UNQUANTIZED_CONFIG
def forward_cuda(
self,
layer: "FusedMoE", # type: ignore[name-defined] # noqa: F821
x: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
shared_experts_input: torch.Tensor | None,
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
assert self.kernel is not None
return self.kernel(
hidden_states=x,
w1=layer.w13_weight,
w2=layer.w2_weight,
topk_weights=topk_weights,
topk_ids=topk_ids,
activation=layer.activation,
apply_router_weight_on_input=layer.apply_router_weight_on_input,
global_num_experts=layer.global_num_experts,
expert_map=layer.expert_map,
shared_experts_input=shared_experts_input,
)
def forward_monolithic_cuda(
self,
layer: "FusedMoE", # type: ignore[name-defined] # noqa: F821
x: torch.Tensor,
router_logits: torch.Tensor,
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
import vllm.model_executor.layers.fused_moe.flashinfer_trtllm_moe # noqa: F401
assert self.unquantized_backend == UnquantizedMoeBackend.FLASHINFER_TRTLLM
return torch.ops.vllm.flashinfer_fused_moe_bf16(
routing_logits=router_logits,
routing_bias=layer.e_score_correction_bias,
hidden_states=x,
gemm1_weights=layer.w13_weight,
gemm2_weights=layer.w2_weight,
num_experts=layer.global_num_experts,
top_k=layer.top_k,
n_group=layer.num_expert_group,
topk_group=layer.topk_group,
intermediate_size=layer.intermediate_size_per_partition,
local_expert_offset=layer.ep_rank * layer.local_num_experts,
local_num_experts=layer.local_num_experts,
routing_method_type=layer.routing_method_type,
)
def forward_monolithic_cpu(
self,
layer: "FusedMoE", # type: ignore[name-defined] # noqa: F821
x: torch.Tensor,
router_logits: torch.Tensor,
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
return self.cpu_fused_moe(
layer,
x,
layer.use_grouped_topk,
layer.top_k,
router_logits,
layer.renormalize,
layer.topk_group,
layer.num_expert_group,
layer.global_num_experts,
layer.expert_map,
layer.custom_routing_function,
layer.scoring_func,
layer.routed_scaling_factor,
layer.e_score_correction_bias,
layer.apply_router_weight_on_input,
layer.activation,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/fused_moe/unquantized_fused_moe_method.py",
"license": "Apache License 2.0",
"lines": 353,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/entrypoints/sagemaker/test_sagemaker_handler_overrides.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Integration tests for handler override functionality.
Tests real customer usage scenarios:
- Using @custom_ping_handler and @custom_invocation_handler decorators
to override handlers
- Setting environment variables for handler specifications
- Writing customer scripts with custom_sagemaker_ping_handler() and
custom_sagemaker_invocation_handler() functions
- Priority: env vars > decorators > customer script files > framework
defaults
Note: These tests focus on validating server responses rather than directly calling
get_ping_handler() and get_invoke_handler() to ensure full integration testing.
"""
import os
import tempfile
import pytest
import requests
from ...utils import RemoteOpenAIServer
from .conftest import (
MODEL_NAME_SMOLLM,
)
class TestHandlerOverrideIntegration:
"""Integration tests simulating real customer usage scenarios.
Each test simulates a fresh server startup where customers:
- Use @custom_ping_handler and @custom_invocation_handler decorators
- Set environment variables (CUSTOM_FASTAPI_PING_HANDLER, etc.)
- Write customer scripts with custom_sagemaker_ping_handler() and
custom_sagemaker_invocation_handler() functions
"""
def setup_method(self):
"""Setup for each test - simulate fresh server startup."""
self._clear_caches()
self._clear_env_vars()
def teardown_method(self):
"""Cleanup after each test."""
self._clear_env_vars()
def _clear_caches(self):
"""Clear handler registry and function loader cache."""
try:
from model_hosting_container_standards.common.handler import (
handler_registry,
)
from model_hosting_container_standards.sagemaker.sagemaker_loader import (
SageMakerFunctionLoader,
)
handler_registry.clear()
SageMakerFunctionLoader._default_function_loader = None
except ImportError:
pytest.skip("model-hosting-container-standards not available")
def _clear_env_vars(self):
"""Clear SageMaker environment variables."""
try:
from model_hosting_container_standards.common.fastapi.config import (
FastAPIEnvVars,
)
from model_hosting_container_standards.sagemaker.config import (
SageMakerEnvVars,
)
# Clear SageMaker env vars
for var in [
SageMakerEnvVars.SAGEMAKER_MODEL_PATH,
SageMakerEnvVars.CUSTOM_SCRIPT_FILENAME,
]:
os.environ.pop(var, None)
# Clear FastAPI env vars
for var in [
FastAPIEnvVars.CUSTOM_FASTAPI_PING_HANDLER,
FastAPIEnvVars.CUSTOM_FASTAPI_INVOCATION_HANDLER,
]:
os.environ.pop(var, None)
except ImportError:
pass
@pytest.mark.asyncio
async def test_customer_script_functions_auto_loaded(self):
"""Test customer scenario: script functions automatically override
framework defaults."""
try:
from model_hosting_container_standards.sagemaker.config import (
SageMakerEnvVars,
)
except ImportError:
pytest.skip("model-hosting-container-standards not available")
# Customer writes a script file with ping() and invoke() functions
with tempfile.NamedTemporaryFile(mode="w", suffix=".py", delete=False) as f:
f.write(
"""
from fastapi import Request
async def custom_sagemaker_ping_handler():
return {
"status": "healthy",
"source": "customer_override",
"message": "Custom ping from customer script"
}
async def custom_sagemaker_invocation_handler(request: Request):
return {
"predictions": ["Custom response from customer script"],
"source": "customer_override"
}
"""
)
script_path = f.name
try:
script_dir = os.path.dirname(script_path)
script_name = os.path.basename(script_path)
# Customer sets SageMaker environment variables to point to their script
env_vars = {
SageMakerEnvVars.SAGEMAKER_MODEL_PATH: script_dir,
SageMakerEnvVars.CUSTOM_SCRIPT_FILENAME: script_name,
}
args = [
"--dtype",
"bfloat16",
"--max-model-len",
"2048",
"--enforce-eager",
"--max-num-seqs",
"32",
]
with RemoteOpenAIServer(
MODEL_NAME_SMOLLM, args, env_dict=env_vars
) as server:
# Customer tests their server and sees their overrides work
# automatically
ping_response = requests.get(server.url_for("ping"))
assert ping_response.status_code == 200
ping_data = ping_response.json()
invoke_response = requests.post(
server.url_for("invocations"),
json={
"model": MODEL_NAME_SMOLLM,
"messages": [{"role": "user", "content": "Hello"}],
"max_tokens": 5,
},
)
assert invoke_response.status_code == 200
invoke_data = invoke_response.json()
# Customer sees their functions are used
assert ping_data["source"] == "customer_override"
assert ping_data["message"] == "Custom ping from customer script"
assert invoke_data["source"] == "customer_override"
assert invoke_data["predictions"] == [
"Custom response from customer script"
]
finally:
os.unlink(script_path)
@pytest.mark.asyncio
async def test_customer_decorator_usage(self):
"""Test customer scenario: using @custom_ping_handler and
@custom_invocation_handler decorators."""
try:
from model_hosting_container_standards.sagemaker.config import (
SageMakerEnvVars,
)
except ImportError:
pytest.skip("model-hosting-container-standards not available")
# Customer writes a script file with decorators
with tempfile.NamedTemporaryFile(mode="w", suffix=".py", delete=False) as f:
f.write(
"""
import model_hosting_container_standards.sagemaker as sagemaker_standards
from fastapi import Request
@sagemaker_standards.custom_ping_handler
async def my_ping():
return {
"type": "ping",
"source": "customer_decorator"
}
@sagemaker_standards.custom_invocation_handler
async def my_invoke(request: Request):
return {
"type": "invoke",
"source": "customer_decorator"
}
"""
)
script_path = f.name
try:
script_dir = os.path.dirname(script_path)
script_name = os.path.basename(script_path)
env_vars = {
SageMakerEnvVars.SAGEMAKER_MODEL_PATH: script_dir,
SageMakerEnvVars.CUSTOM_SCRIPT_FILENAME: script_name,
}
args = [
"--dtype",
"bfloat16",
"--max-model-len",
"2048",
"--enforce-eager",
"--max-num-seqs",
"32",
]
with RemoteOpenAIServer(
MODEL_NAME_SMOLLM, args, env_dict=env_vars
) as server:
ping_response = requests.get(server.url_for("ping"))
assert ping_response.status_code == 200
ping_data = ping_response.json()
invoke_response = requests.post(
server.url_for("invocations"),
json={
"model": MODEL_NAME_SMOLLM,
"messages": [{"role": "user", "content": "Hello"}],
"max_tokens": 5,
},
)
assert invoke_response.status_code == 200
invoke_data = invoke_response.json()
# Customer sees their handlers are used by the server
assert ping_data["source"] == "customer_decorator"
assert invoke_data["source"] == "customer_decorator"
finally:
os.unlink(script_path)
@pytest.mark.asyncio
async def test_handler_priority_order(self):
"""Test priority: @custom_ping_handler/@custom_invocation_handler
decorators vs script functions."""
try:
from model_hosting_container_standards.sagemaker.config import (
SageMakerEnvVars,
)
except ImportError:
pytest.skip("model-hosting-container-standards not available")
# Customer writes a script with both decorator and regular functions
with tempfile.NamedTemporaryFile(mode="w", suffix=".py", delete=False) as f:
f.write(
"""
import model_hosting_container_standards.sagemaker as sagemaker_standards
from fastapi import Request
# Customer uses @custom_ping_handler decorator (higher priority than script functions)
@sagemaker_standards.custom_ping_handler
async def decorated_ping():
return {
"status": "healthy",
"source": "ping_decorator_in_script",
"priority": "decorator"
}
# Customer also has a regular function (lower priority than
# @custom_ping_handler decorator)
async def custom_sagemaker_ping_handler():
return {
"status": "healthy",
"source": "script_function",
"priority": "function"
}
# Customer has a regular invoke function
async def custom_sagemaker_invocation_handler(request: Request):
return {
"predictions": ["Script function response"],
"source": "script_invoke_function",
"priority": "function"
}
"""
)
script_path = f.name
try:
script_dir = os.path.dirname(script_path)
script_name = os.path.basename(script_path)
env_vars = {
SageMakerEnvVars.SAGEMAKER_MODEL_PATH: script_dir,
SageMakerEnvVars.CUSTOM_SCRIPT_FILENAME: script_name,
}
args = [
"--dtype",
"bfloat16",
"--max-model-len",
"2048",
"--enforce-eager",
"--max-num-seqs",
"32",
]
with RemoteOpenAIServer(
MODEL_NAME_SMOLLM, args, env_dict=env_vars
) as server:
ping_response = requests.get(server.url_for("ping"))
assert ping_response.status_code == 200
ping_data = ping_response.json()
invoke_response = requests.post(
server.url_for("invocations"),
json={
"model": MODEL_NAME_SMOLLM,
"messages": [{"role": "user", "content": "Hello"}],
"max_tokens": 5,
},
)
assert invoke_response.status_code == 200
invoke_data = invoke_response.json()
# @custom_ping_handler decorator has higher priority than
# script function
assert ping_data["source"] == "ping_decorator_in_script"
assert ping_data["priority"] == "decorator"
# Script function is used for invoke
assert invoke_data["source"] == "script_invoke_function"
assert invoke_data["priority"] == "function"
finally:
os.unlink(script_path)
@pytest.mark.asyncio
async def test_environment_variable_script_loading(self):
"""Test that environment variables correctly specify script location
and loading."""
try:
from model_hosting_container_standards.sagemaker.config import (
SageMakerEnvVars,
)
except ImportError:
pytest.skip("model-hosting-container-standards not available")
# Customer writes a script in a specific directory
with tempfile.NamedTemporaryFile(mode="w", suffix=".py", delete=False) as f:
f.write(
"""
from fastapi import Request
async def custom_sagemaker_ping_handler():
return {
"status": "healthy",
"source": "env_loaded_script",
"method": "environment_variable_loading"
}
async def custom_sagemaker_invocation_handler(request: Request):
return {
"predictions": ["Loaded via environment variables"],
"source": "env_loaded_script",
"method": "environment_variable_loading"
}
"""
)
script_path = f.name
try:
script_dir = os.path.dirname(script_path)
script_name = os.path.basename(script_path)
# Test environment variable script loading
env_vars = {
SageMakerEnvVars.SAGEMAKER_MODEL_PATH: script_dir,
SageMakerEnvVars.CUSTOM_SCRIPT_FILENAME: script_name,
}
args = [
"--dtype",
"bfloat16",
"--max-model-len",
"2048",
"--enforce-eager",
"--max-num-seqs",
"32",
]
with RemoteOpenAIServer(
MODEL_NAME_SMOLLM, args, env_dict=env_vars
) as server:
ping_response = requests.get(server.url_for("ping"))
assert ping_response.status_code == 200
ping_data = ping_response.json()
invoke_response = requests.post(
server.url_for("invocations"),
json={
"model": MODEL_NAME_SMOLLM,
"messages": [{"role": "user", "content": "Hello"}],
"max_tokens": 5,
},
)
assert invoke_response.status_code == 200
invoke_data = invoke_response.json()
# Verify that the script was loaded via environment variables
assert ping_data["source"] == "env_loaded_script"
assert ping_data["method"] == "environment_variable_loading"
assert invoke_data["source"] == "env_loaded_script"
assert invoke_data["method"] == "environment_variable_loading"
finally:
os.unlink(script_path)
@pytest.mark.asyncio
async def test_framework_default_handlers(self):
"""Test that framework default handlers work when no customer
overrides exist."""
args = [
"--dtype",
"bfloat16",
"--max-model-len",
"2048",
"--enforce-eager",
"--max-num-seqs",
"32",
]
# Explicitly pass empty env_dict to ensure no SageMaker env vars are set
# This prevents pollution from previous tests
try:
from model_hosting_container_standards.common.fastapi.config import (
FastAPIEnvVars,
)
from model_hosting_container_standards.sagemaker.config import (
SageMakerEnvVars,
)
env_dict = {
SageMakerEnvVars.SAGEMAKER_MODEL_PATH: "",
SageMakerEnvVars.CUSTOM_SCRIPT_FILENAME: "",
FastAPIEnvVars.CUSTOM_FASTAPI_PING_HANDLER: "",
FastAPIEnvVars.CUSTOM_FASTAPI_INVOCATION_HANDLER: "",
}
except ImportError:
env_dict = {}
with RemoteOpenAIServer(MODEL_NAME_SMOLLM, args, env_dict=env_dict) as server:
# Test that default ping works
ping_response = requests.get(server.url_for("ping"))
assert ping_response.status_code == 200
# Test that default invocations work
invoke_response = requests.post(
server.url_for("invocations"),
json={
"model": MODEL_NAME_SMOLLM,
"messages": [{"role": "user", "content": "Hello"}],
"max_tokens": 5,
},
)
assert invoke_response.status_code == 200
@pytest.mark.asyncio
async def test_handler_env_var_override(self):
"""Test CUSTOM_FASTAPI_PING_HANDLER and CUSTOM_FASTAPI_INVOCATION_HANDLER
environment variable overrides."""
try:
from model_hosting_container_standards.common.fastapi.config import (
FastAPIEnvVars,
)
from model_hosting_container_standards.sagemaker.config import (
SageMakerEnvVars,
)
except ImportError:
pytest.skip("model-hosting-container-standards not available")
# Create a script with both env var handlers and script functions
with tempfile.NamedTemporaryFile(mode="w", suffix=".py", delete=False) as f:
f.write(
"""
from fastapi import Request, Response
import json
async def env_var_ping_handler(raw_request: Request) -> Response:
return Response(
content=json.dumps({
"status": "healthy",
"source": "env_var_ping",
"method": "environment_variable"
}),
media_type="application/json"
)
async def env_var_invoke_handler(raw_request: Request) -> Response:
return Response(
content=json.dumps({
"predictions": ["Environment variable response"],
"source": "env_var_invoke",
"method": "environment_variable"
}),
media_type="application/json"
)
async def custom_sagemaker_ping_handler():
return {
"status": "healthy",
"source": "script_ping",
"method": "script_function"
}
async def custom_sagemaker_invocation_handler(request: Request):
return {
"predictions": ["Script function response"],
"source": "script_invoke",
"method": "script_function"
}
"""
)
script_path = f.name
try:
script_dir = os.path.dirname(script_path)
script_name = os.path.basename(script_path)
# Set environment variables to override both handlers
env_vars = {
SageMakerEnvVars.SAGEMAKER_MODEL_PATH: script_dir,
SageMakerEnvVars.CUSTOM_SCRIPT_FILENAME: script_name,
FastAPIEnvVars.CUSTOM_FASTAPI_PING_HANDLER: (
f"{script_name}:env_var_ping_handler"
),
FastAPIEnvVars.CUSTOM_FASTAPI_INVOCATION_HANDLER: (
f"{script_name}:env_var_invoke_handler"
),
}
args = [
"--dtype",
"bfloat16",
"--max-model-len",
"2048",
"--enforce-eager",
"--max-num-seqs",
"32",
]
with RemoteOpenAIServer(
MODEL_NAME_SMOLLM, args, env_dict=env_vars
) as server:
# Test ping handler override
ping_response = requests.get(server.url_for("ping"))
assert ping_response.status_code == 200
ping_data = ping_response.json()
# Environment variable should override script function
assert ping_data["method"] == "environment_variable"
assert ping_data["source"] == "env_var_ping"
# Test invocation handler override
invoke_response = requests.post(
server.url_for("invocations"),
json={
"model": MODEL_NAME_SMOLLM,
"messages": [{"role": "user", "content": "Hello"}],
"max_tokens": 5,
},
)
assert invoke_response.status_code == 200
invoke_data = invoke_response.json()
# Environment variable should override script function
assert invoke_data["method"] == "environment_variable"
assert invoke_data["source"] == "env_var_invoke"
finally:
os.unlink(script_path)
@pytest.mark.asyncio
async def test_env_var_priority_over_decorator_and_script(self):
"""Test that environment variables have highest priority over decorators
and script functions for both ping and invocation handlers."""
try:
from model_hosting_container_standards.common.fastapi.config import (
FastAPIEnvVars,
)
from model_hosting_container_standards.sagemaker.config import (
SageMakerEnvVars,
)
except ImportError:
pytest.skip("model-hosting-container-standards not available")
# Create a script with all three handler types for both ping and invocation
with tempfile.NamedTemporaryFile(mode="w", suffix=".py", delete=False) as f:
f.write(
"""
import model_hosting_container_standards.sagemaker as sagemaker_standards
from fastapi import Request, Response
import json
# Environment variable handlers (highest priority)
async def env_priority_ping(raw_request: Request) -> Response:
return Response(
content=json.dumps({
"status": "healthy",
"source": "env_var",
"priority": "environment_variable"
}),
media_type="application/json"
)
async def env_priority_invoke(raw_request: Request) -> Response:
return Response(
content=json.dumps({
"predictions": ["Environment variable response"],
"source": "env_var",
"priority": "environment_variable"
}),
media_type="application/json"
)
# Decorator handlers (medium priority)
@sagemaker_standards.custom_ping_handler
async def decorator_ping(raw_request: Request) -> Response:
return Response(
content=json.dumps({
"status": "healthy",
"source": "decorator",
"priority": "decorator"
}),
media_type="application/json"
)
@sagemaker_standards.custom_invocation_handler
async def decorator_invoke(raw_request: Request) -> Response:
return Response(
content=json.dumps({
"predictions": ["Decorator response"],
"source": "decorator",
"priority": "decorator"
}),
media_type="application/json"
)
# Script functions (lowest priority)
async def custom_sagemaker_ping_handler():
return {
"status": "healthy",
"source": "script",
"priority": "script_function"
}
async def custom_sagemaker_invocation_handler(request: Request):
return {
"predictions": ["Script function response"],
"source": "script",
"priority": "script_function"
}
"""
)
script_path = f.name
try:
script_dir = os.path.dirname(script_path)
script_name = os.path.basename(script_path)
# Set environment variables to specify highest priority handlers
env_vars = {
SageMakerEnvVars.SAGEMAKER_MODEL_PATH: script_dir,
SageMakerEnvVars.CUSTOM_SCRIPT_FILENAME: script_name,
FastAPIEnvVars.CUSTOM_FASTAPI_PING_HANDLER: (
f"{script_name}:env_priority_ping"
),
FastAPIEnvVars.CUSTOM_FASTAPI_INVOCATION_HANDLER: (
f"{script_name}:env_priority_invoke"
),
}
args = [
"--dtype",
"bfloat16",
"--max-model-len",
"2048",
"--enforce-eager",
"--max-num-seqs",
"32",
]
with RemoteOpenAIServer(
MODEL_NAME_SMOLLM, args, env_dict=env_vars
) as server:
# Test ping handler priority
ping_response = requests.get(server.url_for("ping"))
assert ping_response.status_code == 200
ping_data = ping_response.json()
# Environment variable has highest priority and should be used
assert ping_data["priority"] == "environment_variable"
assert ping_data["source"] == "env_var"
# Test invocation handler priority
invoke_response = requests.post(
server.url_for("invocations"),
json={
"model": MODEL_NAME_SMOLLM,
"messages": [{"role": "user", "content": "Hello"}],
"max_tokens": 5,
},
)
assert invoke_response.status_code == 200
invoke_data = invoke_response.json()
# Environment variable has highest priority and should be used
assert invoke_data["priority"] == "environment_variable"
assert invoke_data["source"] == "env_var"
finally:
os.unlink(script_path)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/entrypoints/sagemaker/test_sagemaker_handler_overrides.py",
"license": "Apache License 2.0",
"lines": 637,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/entrypoints/sagemaker/test_sagemaker_lora_adapters.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import openai # use the official async_client for correctness check
import pytest
import requests
from ...utils import RemoteOpenAIServer
from .conftest import MODEL_NAME_SMOLLM
@pytest.mark.asyncio
async def test_sagemaker_load_adapter_happy_path(
async_client: openai.AsyncOpenAI,
basic_server_with_lora: RemoteOpenAIServer,
smollm2_lora_files,
):
# The SageMaker standards library creates a POST /adapters endpoint
# that maps to the load_lora_adapter handler with request shape:
# {"lora_name": "body.name", "lora_path": "body.src"}
load_response = requests.post(
basic_server_with_lora.url_for("adapters"),
json={"name": "smollm2-lora-sagemaker", "src": smollm2_lora_files},
)
load_response.raise_for_status()
models = await async_client.models.list()
models = models.data
dynamic_lora_model = models[-1]
assert dynamic_lora_model.root == smollm2_lora_files
assert dynamic_lora_model.parent == MODEL_NAME_SMOLLM
assert dynamic_lora_model.id == "smollm2-lora-sagemaker"
@pytest.mark.asyncio
async def test_sagemaker_unload_adapter_happy_path(
async_client: openai.AsyncOpenAI,
basic_server_with_lora: RemoteOpenAIServer,
smollm2_lora_files,
):
# First, load an adapter
adapter_name = "smollm2-lora-sagemaker-unload"
load_response = requests.post(
basic_server_with_lora.url_for("adapters"),
json={"name": adapter_name, "src": smollm2_lora_files},
)
load_response.raise_for_status()
# Verify it's in the models list
models = await async_client.models.list()
adapter_ids = [model.id for model in models.data]
assert adapter_name in adapter_ids
# Now unload it using DELETE /adapters/{adapter_name}
# The SageMaker standards maps this to unload_lora_adapter with:
# {"lora_name": "path_params.adapter_name"}
unload_response = requests.delete(
basic_server_with_lora.url_for("adapters", adapter_name),
)
unload_response.raise_for_status()
# Verify it's no longer in the models list
models = await async_client.models.list()
adapter_ids = [model.id for model in models.data]
assert adapter_name not in adapter_ids
@pytest.mark.asyncio
async def test_sagemaker_load_adapter_not_found(
basic_server_with_lora: RemoteOpenAIServer,
):
load_response = requests.post(
basic_server_with_lora.url_for("adapters"),
json={"name": "nonexistent-adapter", "src": "/path/does/not/exist"},
)
assert load_response.status_code == 404
@pytest.mark.asyncio
async def test_sagemaker_load_adapter_invalid_files(
basic_server_with_lora: RemoteOpenAIServer,
tmp_path,
):
invalid_files = tmp_path / "invalid_adapter"
invalid_files.mkdir()
(invalid_files / "adapter_config.json").write_text("not valid json")
load_response = requests.post(
basic_server_with_lora.url_for("adapters"),
json={"name": "invalid-adapter", "src": str(invalid_files)},
)
assert load_response.status_code == 400
@pytest.mark.asyncio
async def test_sagemaker_unload_nonexistent_adapter(
basic_server_with_lora: RemoteOpenAIServer,
):
# Attempt to unload an adapter that doesn't exist
unload_response = requests.delete(
basic_server_with_lora.url_for("adapters", "nonexistent-adapter-name"),
)
assert unload_response.status_code in (400, 404)
@pytest.mark.asyncio
async def test_sagemaker_invocations_with_adapter(
basic_server_with_lora: RemoteOpenAIServer,
smollm2_lora_files,
):
# First, load an adapter via SageMaker endpoint
adapter_name = "smollm2-lora-invoke-test"
load_response = requests.post(
basic_server_with_lora.url_for("adapters"),
json={"name": adapter_name, "src": smollm2_lora_files},
)
load_response.raise_for_status()
# Now test the /invocations endpoint with the adapter
invocation_response = requests.post(
basic_server_with_lora.url_for("invocations"),
headers={
"X-Amzn-SageMaker-Adapter-Identifier": adapter_name,
},
json={
"prompt": "Hello, how are you?",
"max_tokens": 10,
},
)
invocation_response.raise_for_status()
invocation_output = invocation_response.json()
# Verify we got a valid completion response
assert "choices" in invocation_output
assert len(invocation_output["choices"]) > 0
assert "text" in invocation_output["choices"][0]
@pytest.mark.asyncio
async def test_sagemaker_multiple_adapters_load_unload(
async_client: openai.AsyncOpenAI,
basic_server_with_lora: RemoteOpenAIServer,
smollm2_lora_files,
):
adapter_names = [f"sagemaker-adapter-{i}" for i in range(5)]
# Load all adapters
for adapter_name in adapter_names:
load_response = requests.post(
basic_server_with_lora.url_for("adapters"),
json={"name": adapter_name, "src": smollm2_lora_files},
)
load_response.raise_for_status()
# Verify all are in the models list
models = await async_client.models.list()
adapter_ids = [model.id for model in models.data]
for adapter_name in adapter_names:
assert adapter_name in adapter_ids
# Unload all adapters
for adapter_name in adapter_names:
unload_response = requests.delete(
basic_server_with_lora.url_for("adapters", adapter_name),
)
unload_response.raise_for_status()
# Verify all are removed from models list
models = await async_client.models.list()
adapter_ids = [model.id for model in models.data]
for adapter_name in adapter_names:
assert adapter_name not in adapter_ids
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/entrypoints/sagemaker/test_sagemaker_lora_adapters.py",
"license": "Apache License 2.0",
"lines": 145,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/entrypoints/sagemaker/test_sagemaker_middleware_integration.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Integration test for middleware loader functionality.
Tests that customer middlewares get called correctly with a vLLM server.
"""
import os
import tempfile
import pytest
import requests
from ...utils import RemoteOpenAIServer
from .conftest import (
MODEL_NAME_SMOLLM,
)
class TestMiddlewareIntegration:
"""Integration test for middleware with vLLM server."""
def setup_method(self):
"""Setup for each test - simulate fresh server startup."""
self._clear_caches()
def _clear_caches(self):
"""Clear middleware registry and function loader cache."""
try:
from model_hosting_container_standards.common.fastapi.middleware import (
middleware_registry,
)
from model_hosting_container_standards.common.fastapi.middleware.source.decorator_loader import ( # noqa: E501
decorator_loader,
)
from model_hosting_container_standards.sagemaker.sagemaker_loader import (
SageMakerFunctionLoader,
)
middleware_registry.clear_middlewares()
decorator_loader.clear()
SageMakerFunctionLoader._default_function_loader = None
except ImportError:
pytest.skip("model-hosting-container-standards not available")
@pytest.mark.asyncio
async def test_customer_middleware_with_vllm_server(self):
"""Test that customer middlewares work with actual vLLM server.
Tests decorator-based middlewares (@custom_middleware, @input_formatter,
@output_formatter)
on multiple endpoints (chat/completions, invocations).
"""
try:
from model_hosting_container_standards.sagemaker.config import (
SageMakerEnvVars,
)
except ImportError:
pytest.skip("model-hosting-container-standards not available")
# Customer writes a middleware script with multiple decorators
with tempfile.NamedTemporaryFile(mode="w", suffix=".py", delete=False) as f:
f.write(
"""
from model_hosting_container_standards.common.fastapi.middleware import (
custom_middleware, input_formatter, output_formatter
)
# Global flag to track if input formatter was called
_input_formatter_called = False
@input_formatter
async def customer_input_formatter(request):
# Process input - mark that input formatter was called
global _input_formatter_called
_input_formatter_called = True
return request
@custom_middleware("throttle")
async def customer_throttle_middleware(request, call_next):
response = await call_next(request)
response.headers["X-Customer-Throttle"] = "applied"
order = response.headers.get("X-Middleware-Order", "")
response.headers["X-Middleware-Order"] = order + "throttle,"
return response
@output_formatter
async def customer_output_formatter(response):
global _input_formatter_called
response.headers["X-Customer-Processed"] = "true"
# Since input_formatter and output_formatter are combined into
# pre_post_process middleware,
# if output_formatter is called, input_formatter should have been called too
if _input_formatter_called:
response.headers["X-Input-Formatter-Called"] = "true"
order = response.headers.get("X-Middleware-Order", "")
response.headers["X-Middleware-Order"] = order + "output_formatter,"
return response
"""
)
script_path = f.name
try:
script_dir = os.path.dirname(script_path)
script_name = os.path.basename(script_path)
# Set environment variables to point to customer script
env_vars = {
SageMakerEnvVars.SAGEMAKER_MODEL_PATH: script_dir,
SageMakerEnvVars.CUSTOM_SCRIPT_FILENAME: script_name,
}
args = [
"--dtype",
"bfloat16",
"--max-model-len",
"2048",
"--enforce-eager",
"--max-num-seqs",
"32",
]
with RemoteOpenAIServer(
MODEL_NAME_SMOLLM, args, env_dict=env_vars
) as server:
# Test 1: Middlewares applied to chat/completions endpoint
chat_response = requests.post(
server.url_for("v1/chat/completions"),
json={
"model": MODEL_NAME_SMOLLM,
"messages": [{"role": "user", "content": "Hello"}],
"max_tokens": 5,
"temperature": 0.0,
},
)
assert chat_response.status_code == 200
# Verify all middlewares were executed
assert "X-Customer-Throttle" in chat_response.headers
assert chat_response.headers["X-Customer-Throttle"] == "applied"
assert "X-Customer-Processed" in chat_response.headers
assert chat_response.headers["X-Customer-Processed"] == "true"
# Verify input formatter was called
assert "X-Input-Formatter-Called" in chat_response.headers
assert chat_response.headers["X-Input-Formatter-Called"] == "true"
# Verify middleware execution order
execution_order = chat_response.headers.get(
"X-Middleware-Order", ""
).rstrip(",")
order_parts = execution_order.split(",") if execution_order else []
assert "throttle" in order_parts
assert "output_formatter" in order_parts
# Test 2: Middlewares applied to invocations endpoint
invocations_response = requests.post(
server.url_for("invocations"),
json={
"model": MODEL_NAME_SMOLLM,
"messages": [{"role": "user", "content": "Hello"}],
"max_tokens": 5,
"temperature": 0.0,
},
)
assert invocations_response.status_code == 200
# Verify all middlewares were executed
assert "X-Customer-Throttle" in invocations_response.headers
assert invocations_response.headers["X-Customer-Throttle"] == "applied"
assert "X-Customer-Processed" in invocations_response.headers
assert invocations_response.headers["X-Customer-Processed"] == "true"
# Verify input formatter was called
assert "X-Input-Formatter-Called" in invocations_response.headers
assert (
invocations_response.headers["X-Input-Formatter-Called"] == "true"
)
finally:
os.unlink(script_path)
@pytest.mark.asyncio
async def test_middleware_with_ping_endpoint(self):
"""Test that middlewares work with SageMaker ping endpoint."""
try:
from model_hosting_container_standards.sagemaker.config import (
SageMakerEnvVars,
)
except ImportError:
pytest.skip("model-hosting-container-standards not available")
# Customer writes a middleware script
with tempfile.NamedTemporaryFile(mode="w", suffix=".py", delete=False) as f:
f.write(
"""
from model_hosting_container_standards.common.fastapi.middleware import (
custom_middleware
)
@custom_middleware("pre_post_process")
async def ping_tracking_middleware(request, call_next):
response = await call_next(request)
if request.url.path == "/ping":
response.headers["X-Ping-Tracked"] = "true"
return response
"""
)
script_path = f.name
try:
script_dir = os.path.dirname(script_path)
script_name = os.path.basename(script_path)
env_vars = {
SageMakerEnvVars.SAGEMAKER_MODEL_PATH: script_dir,
SageMakerEnvVars.CUSTOM_SCRIPT_FILENAME: script_name,
}
args = [
"--dtype",
"bfloat16",
"--max-model-len",
"2048",
"--enforce-eager",
"--max-num-seqs",
"32",
]
with RemoteOpenAIServer(
MODEL_NAME_SMOLLM, args, env_dict=env_vars
) as server:
# Test ping endpoint with middleware
response = requests.get(server.url_for("ping"))
assert response.status_code == 200
assert "X-Ping-Tracked" in response.headers
assert response.headers["X-Ping-Tracked"] == "true"
finally:
os.unlink(script_path)
@pytest.mark.asyncio
async def test_middleware_env_var_override(self):
"""Test middleware environment variable overrides."""
try:
from model_hosting_container_standards.common.fastapi.config import (
FastAPIEnvVars,
)
from model_hosting_container_standards.sagemaker.config import (
SageMakerEnvVars,
)
except ImportError:
pytest.skip("model-hosting-container-standards not available")
# Create a script with middleware functions specified via env vars
with tempfile.NamedTemporaryFile(mode="w", suffix=".py", delete=False) as f:
f.write(
"""
from fastapi import Request
# Global flag to track if pre_process was called
_pre_process_called = False
async def env_throttle_middleware(request, call_next):
response = await call_next(request)
response.headers["X-Env-Throttle"] = "applied"
return response
async def env_pre_process(request: Request) -> Request:
# Mark that pre_process was called
global _pre_process_called
_pre_process_called = True
return request
async def env_post_process(response):
global _pre_process_called
if hasattr(response, 'headers'):
response.headers["X-Env-Post-Process"] = "applied"
# Since pre_process and post_process are combined into
# pre_post_process middleware,
# if post_process is called, pre_process should have been called too
if _pre_process_called:
response.headers["X-Pre-Process-Called"] = "true"
return response
"""
)
script_path = f.name
try:
script_dir = os.path.dirname(script_path)
script_name = os.path.basename(script_path)
# Set environment variables for middleware
# Use script_name with .py extension as per plugin example
env_vars = {
SageMakerEnvVars.SAGEMAKER_MODEL_PATH: script_dir,
SageMakerEnvVars.CUSTOM_SCRIPT_FILENAME: script_name,
FastAPIEnvVars.CUSTOM_FASTAPI_MIDDLEWARE_THROTTLE: (
f"{script_name}:env_throttle_middleware"
),
FastAPIEnvVars.CUSTOM_PRE_PROCESS: f"{script_name}:env_pre_process",
FastAPIEnvVars.CUSTOM_POST_PROCESS: f"{script_name}:env_post_process",
}
args = [
"--dtype",
"bfloat16",
"--max-model-len",
"2048",
"--enforce-eager",
"--max-num-seqs",
"32",
]
with RemoteOpenAIServer(
MODEL_NAME_SMOLLM, args, env_dict=env_vars
) as server:
response = requests.get(server.url_for("ping"))
assert response.status_code == 200
# Check if environment variable middleware was applied
headers = response.headers
# Verify that env var middlewares were applied
assert "X-Env-Throttle" in headers, (
"Throttle middleware should be applied via env var"
)
assert headers["X-Env-Throttle"] == "applied"
assert "X-Env-Post-Process" in headers, (
"Post-process middleware should be applied via env var"
)
assert headers["X-Env-Post-Process"] == "applied"
# Verify that pre_process was called
assert "X-Pre-Process-Called" in headers, (
"Pre-process should be called via env var"
)
assert headers["X-Pre-Process-Called"] == "true"
finally:
os.unlink(script_path)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/entrypoints/sagemaker/test_sagemaker_middleware_integration.py",
"license": "Apache License 2.0",
"lines": 292,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/entrypoints/sagemaker/test_sagemaker_stateful_sessions.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import openai # use the official client for correctness check
import pytest
import requests
from ...utils import RemoteOpenAIServer
from .conftest import (
HEADER_SAGEMAKER_CLOSED_SESSION_ID,
HEADER_SAGEMAKER_NEW_SESSION_ID,
HEADER_SAGEMAKER_SESSION_ID,
MODEL_NAME_SMOLLM,
)
CLOSE_BADREQUEST_CASES = [
(
"nonexistent_session_id",
{"session_id": "nonexistent-session-id"},
{},
"session not found",
),
("malformed_close_request", {}, {"extra-field": "extra-field-data"}, None),
]
@pytest.mark.asyncio
async def test_create_session_badrequest(basic_server_with_lora: RemoteOpenAIServer):
bad_response = requests.post(
basic_server_with_lora.url_for("invocations"),
json={"requestType": "NEW_SESSION", "extra-field": "extra-field-data"},
)
assert bad_response.status_code == 400
@pytest.mark.asyncio
@pytest.mark.parametrize(
"test_name,session_id_change,request_body_change,expected_error",
CLOSE_BADREQUEST_CASES,
)
async def test_close_session_badrequest(
basic_server_with_lora: RemoteOpenAIServer,
test_name: str,
session_id_change: dict[str, str],
request_body_change: dict[str, str],
expected_error: str | None,
):
# first attempt to create a session
url = basic_server_with_lora.url_for("invocations")
create_response = requests.post(url, json={"requestType": "NEW_SESSION"})
create_response.raise_for_status()
valid_session_id, expiration = create_response.headers.get(
HEADER_SAGEMAKER_NEW_SESSION_ID, ""
).split(";")
assert valid_session_id
close_request_json = {"requestType": "CLOSE"}
if request_body_change:
close_request_json.update(request_body_change)
bad_session_id = session_id_change.get("session_id")
bad_close_response = requests.post(
url,
headers={HEADER_SAGEMAKER_SESSION_ID: bad_session_id or valid_session_id},
json=close_request_json,
)
# clean up created session, should succeed
clean_up_response = requests.post(
url,
headers={HEADER_SAGEMAKER_SESSION_ID: valid_session_id},
json={"requestType": "CLOSE"},
)
clean_up_response.raise_for_status()
assert bad_close_response.status_code == 400
if expected_error:
assert expected_error in bad_close_response.json()["error"]["message"]
@pytest.mark.asyncio
async def test_close_session_invalidrequest(
basic_server_with_lora: RemoteOpenAIServer, async_client: openai.AsyncOpenAI
):
# first attempt to create a session
url = basic_server_with_lora.url_for("invocations")
create_response = requests.post(url, json={"requestType": "NEW_SESSION"})
create_response.raise_for_status()
valid_session_id, expiration = create_response.headers.get(
HEADER_SAGEMAKER_NEW_SESSION_ID, ""
).split(";")
assert valid_session_id
close_request_json = {"requestType": "CLOSE"}
invalid_close_response = requests.post(
url,
# no headers to specify session_id
json=close_request_json,
)
# clean up created session, should succeed
clean_up_response = requests.post(
url,
headers={HEADER_SAGEMAKER_SESSION_ID: valid_session_id},
json={"requestType": "CLOSE"},
)
clean_up_response.raise_for_status()
assert invalid_close_response.status_code == 424
assert "invalid session_id" in invalid_close_response.json()["error"]["message"]
@pytest.mark.asyncio
async def test_session(basic_server_with_lora: RemoteOpenAIServer):
# first attempt to create a session
url = basic_server_with_lora.url_for("invocations")
create_response = requests.post(url, json={"requestType": "NEW_SESSION"})
create_response.raise_for_status()
valid_session_id, expiration = create_response.headers.get(
HEADER_SAGEMAKER_NEW_SESSION_ID, ""
).split(";")
assert valid_session_id
# test invocation with session id
request_args = {
"model": MODEL_NAME_SMOLLM,
"prompt": "what is 1+1?",
"max_completion_tokens": 5,
"temperature": 0.0,
"logprobs": False,
}
invocation_response = requests.post(
basic_server_with_lora.url_for("invocations"),
headers={HEADER_SAGEMAKER_SESSION_ID: valid_session_id},
json=request_args,
)
invocation_response.raise_for_status()
# close created session, should succeed
close_response = requests.post(
url,
headers={HEADER_SAGEMAKER_SESSION_ID: valid_session_id},
json={"requestType": "CLOSE"},
)
close_response.raise_for_status()
assert (
close_response.headers.get(HEADER_SAGEMAKER_CLOSED_SESSION_ID)
== valid_session_id
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/entrypoints/sagemaker/test_sagemaker_stateful_sessions.py",
"license": "Apache License 2.0",
"lines": 129,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/v1/structured_output/test_backend_guidance.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import time
from concurrent.futures import Future
import pytest
from transformers import AutoTokenizer
from vllm.config import StructuredOutputsConfig, VllmConfig
from vllm.config.model import ModelConfig
from vllm.config.parallel import ParallelConfig
from vllm.config.speculative import SpeculativeConfig
from vllm.sampling_params import SamplingParams, StructuredOutputsParams
from vllm.v1.request import Request
from vllm.v1.structured_output import StructuredOutputManager
from vllm.v1.structured_output.backend_guidance import GuidanceBackend
from vllm.v1.structured_output.backend_types import StructuredOutputOptions
TOKENIZER = "gpt2"
def test_backend_guidance_rollback_terminated():
# Test that the backend guidance successfully rollbacks from a
# terminated state. This can happen with speculative decoding,
# where the draft model proposes EOS and it is verified by the
# guidance backend. In that case we are in a stopped state, but
# it should be reverted in case EOS is not accepted by the target
# model.
structured_outputs_config = StructuredOutputsConfig(backend="guidance")
vllm_config = VllmConfig(structured_outputs_config=structured_outputs_config)
tokenizer = AutoTokenizer.from_pretrained(TOKENIZER)
backend = GuidanceBackend(
vllm_config,
tokenizer=tokenizer,
vocab_size=50257,
)
grammar = backend.compile_grammar(
StructuredOutputOptions.JSON, '{"type": "object"}'
)
prompt = tokenizer.encode('{"a": "b"}')
assert len(prompt) > 1
dummy_wrong = tokenizer.encode('{"a"}')
for token in prompt:
assert grammar.accept_tokens("", [token])
assert not grammar.is_terminated()
assert grammar.accept_tokens("", [tokenizer.eos_token_id])
assert grammar.is_terminated()
# Giving any other token should also be accepted
assert grammar.accept_tokens("", dummy_wrong)
# Rollback is done from where state was terminated, so from '}' not EOS
grammar.rollback(len(prompt) - 1)
assert not grammar.is_terminated()
assert grammar.validate_tokens([tokenizer.eos_token_id]) == []
assert grammar.validate_tokens(dummy_wrong) != dummy_wrong
assert grammar.accept_tokens("", prompt[1:])
assert not grammar.is_terminated()
assert grammar.accept_tokens("", [tokenizer.eos_token_id])
assert grammar.is_terminated()
# Rollback of <= 0 should not change the terminated state
grammar.rollback(0)
assert grammar.is_terminated()
grammar.rollback(-1)
assert grammar.is_terminated()
def test_grammar_bitmask_with_specdec():
tokenizer = AutoTokenizer.from_pretrained(TOKENIZER)
prompt = tokenizer.encode('{"a": "b"}')
vllm_config = VllmConfig(
model_config=ModelConfig(tokenizer=TOKENIZER),
structured_outputs_config=StructuredOutputsConfig(backend="guidance"),
speculative_config=SpeculativeConfig(model="[ngram]", num_speculative_tokens=3),
)
structured_output_manager = StructuredOutputManager(vllm_config)
for i in range(1, 2):
sampling_params = SamplingParams(
structured_outputs=StructuredOutputsParams(
json='{"type": "object"}',
),
)
sampling_params.structured_outputs._backend = "guidance"
sampling_params.update_from_generation_config({}, tokenizer.eos_token_id)
my_req_id = f"my_req_id_{i}"
request = Request(
my_req_id,
prompt_token_ids=prompt[:i],
sampling_params=sampling_params,
pooling_params=None,
)
structured_output_manager.grammar_init(request)
def grammar_bitmask(req: Request, tokens: list[int]) -> None:
structured_output_manager.grammar_bitmask(
requests={req.request_id: req},
structured_output_request_ids={req.request_id: 0},
scheduled_spec_decode_tokens={req.request_id: tokens},
)
# At this point, we rolled-back, so should not be terminated
assert not req.structured_output_request.grammar.is_terminated()
# The grammar might not yet be compiled, so we wait for it
while not request.structured_output_request._check_grammar_completion():
continue
assert request.structured_output_request.grammar.accept_tokens(
request.request_id, prompt[:i]
)
grammar_bitmask(request, prompt[i:] + [tokenizer.eos_token_id])
grammar_bitmask(
request, prompt[i:] + [tokenizer.eos_token_id] + prompt
) # EOS not the final token
grammar_bitmask(request, prompt[i:]) # EOS not present
grammar_bitmask(request, prompt[i:] + [tokenizer.eos_token_id])
@pytest.mark.parametrize("async_grammar", [True, False])
def test_grammar_init_async_and_sync(async_grammar):
"""Test grammar initialization works correctly in both async and sync modes.
This test validates that the distributed_executor_backend config option
correctly controls whether grammar compilation happens asynchronously
(via executor.submit) or synchronously. When set to "external_launcher",
grammar compilation is synchronous to avoid deadlocks.
"""
tokenizer = AutoTokenizer.from_pretrained(TOKENIZER)
prompt = tokenizer.encode('{"a": "b"}')
# Use "external_launcher" for sync mode, None for async mode
executor_backend = None if async_grammar else "external_launcher"
vllm_config = VllmConfig(
model_config=ModelConfig(tokenizer=TOKENIZER),
structured_outputs_config=StructuredOutputsConfig(backend="guidance"),
parallel_config=ParallelConfig(distributed_executor_backend=executor_backend),
)
structured_output_manager = StructuredOutputManager(vllm_config)
sampling_params = SamplingParams(
structured_outputs=StructuredOutputsParams(
json='{"type": "object"}',
),
)
sampling_params.structured_outputs._backend = "guidance"
sampling_params.update_from_generation_config({}, tokenizer.eos_token_id)
request = Request(
"test_request",
prompt_token_ids=prompt,
sampling_params=sampling_params,
pooling_params=None,
)
structured_output_manager.grammar_init(request)
# Check the internal _grammar type immediately after init
# Before _check_grammar_completion is called, async mode should have a Future
raw_grammar = request.structured_output_request._grammar
if async_grammar:
assert isinstance(raw_grammar, Future), (
"Async mode should store a Future before completion"
)
else:
assert not isinstance(raw_grammar, Future), (
"Sync mode should store the grammar directly, not a Future"
)
# Wait for grammar to be ready (handles both async and sync cases)
start_time = time.time()
while not request.structured_output_request._check_grammar_completion():
if time.time() - start_time > 5: # 5-second timeout
pytest.fail("Grammar compilation timed out")
time.sleep(0.01)
# After completion, _grammar should no longer be a Future
assert not isinstance(request.structured_output_request._grammar, Future)
# Verify grammar is properly initialized and functional
grammar = request.structured_output_request.grammar
assert grammar is not None
assert not grammar.is_terminated()
# Verify the grammar can accept valid tokens
assert grammar.accept_tokens(request.request_id, prompt)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/structured_output/test_backend_guidance.py",
"license": "Apache License 2.0",
"lines": 160,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/_aiter_ops.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import functools
from collections.abc import Callable
import torch
from torch._ops import OpOverload
import vllm.envs as envs
from vllm.platforms import current_platform
from vllm.utils.torch_utils import direct_register_custom_op
from vllm.v1.attention.ops.rocm_aiter_mla_sparse import (
rocm_aiter_sparse_attn_indexer,
rocm_aiter_sparse_attn_indexer_fake,
)
# fp8_dtype is not cached.
# on ROCm the fp8_dtype always calls is_fp8_fnuz
# which is a host op, so we cache it once here.
FP8_DTYPE = current_platform.fp8_dtype()
def is_aiter_found() -> bool:
from importlib.util import find_spec
return find_spec("aiter") is not None
# `find_spec` is not torch.compile compatible.
# In cases where aiter availability might have
# been checked in forward passes that are torch compiled.
# we keep this global outside to not cause torch compile breaks.
IS_AITER_FOUND = is_aiter_found()
def is_aiter_found_and_supported() -> bool:
"""Check if AITER library is available and platform supports it.
Checks: platform (ROCm), device arch (gfx9), and library existence.
Does NOT check environment variables - that's handled by rocm_aiter_ops.is_enabled().
This function determines if aiter CAN be used, not if it SHOULD be used.
Separation of concerns:
- This function: Can aiter work on this system? (platform + library availability)
- rocm_aiter_ops.is_enabled(): Should aiter be used by default? (adds env var check)
- Backend selection: Can explicitly request aiter regardless of env var
This allows explicit backend selection via attention_config to work even when
VLLM_ROCM_USE_AITER=0, while preventing unwanted JIT warnings for auto-discovery.
"""
if current_platform.is_rocm() and IS_AITER_FOUND:
from vllm.platforms.rocm import on_gfx9
return on_gfx9()
return False
def if_aiter_supported(func: Callable) -> Callable:
"""Decorator that only executes the function if
ROCm AITER package is supported and enabled on gfx9 archs.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if is_aiter_found_and_supported():
return func(*args, **kwargs)
return None
return wrapper
def _rocm_aiter_fused_moe_impl(
hidden_states: torch.Tensor,
w1: torch.Tensor,
w2: torch.Tensor,
topk_weight: torch.Tensor,
topk_ids: torch.Tensor,
expert_mask: torch.Tensor | None = None,
activation_method: int = 0,
quant_method: int = 0,
doweight_stage1: bool = False,
w1_scale: torch.Tensor | None = None,
w2_scale: torch.Tensor | None = None,
a1_scale: torch.Tensor | None = None,
a2_scale: torch.Tensor | None = None,
num_local_tokens: torch.Tensor | None = None,
output_dtype: torch.dtype | None = None,
hidden_pad: int = 0,
intermediate_pad: int = 0,
bias1: torch.Tensor | None = None,
bias2: torch.Tensor | None = None,
) -> torch.Tensor:
from aiter import ActivationType, QuantType
from aiter.fused_moe import fused_moe
activation = ActivationType(activation_method)
quant_type = QuantType(quant_method)
return fused_moe(
hidden_states,
w1,
w2,
topk_weight,
topk_ids,
expert_mask,
activation,
quant_type,
doweight_stage1,
w1_scale,
w2_scale,
a1_scale,
a2_scale,
num_local_tokens=num_local_tokens,
dtype=output_dtype,
hidden_pad=hidden_pad,
intermediate_pad=intermediate_pad,
bias1=bias1,
bias2=bias2,
)
def _rocm_aiter_fused_moe_fake(
hidden_states: torch.Tensor,
w1: torch.Tensor,
w2: torch.Tensor,
topk_weight: torch.Tensor,
topk_ids: torch.Tensor,
expert_mask: torch.Tensor | None = None,
activation_method: int = 0,
quant_method: int = 0,
doweight_stage1: bool = False,
w1_scale: torch.Tensor | None = None,
w2_scale: torch.Tensor | None = None,
a1_scale: torch.Tensor | None = None,
a2_scale: torch.Tensor | None = None,
num_local_tokens: torch.Tensor | None = None,
output_dtype: torch.dtype | None = None,
) -> torch.Tensor:
if output_dtype is not None:
return torch.empty_like(hidden_states, dtype=output_dtype)
return torch.empty_like(hidden_states)
def _rocm_aiter_asm_moe_tkw1_impl(
hidden_states: torch.Tensor,
w1: torch.Tensor,
w2: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
fc1_scale: torch.Tensor | None = None,
fc2_scale: torch.Tensor | None = None,
fc1_smooth_scale: torch.Tensor | None = None,
fc2_smooth_scale: torch.Tensor | None = None,
a16: bool = False,
per_tensor_quant_scale: torch.Tensor | None = None,
expert_mask: torch.Tensor | None = None,
activation_method: int = 0,
) -> torch.Tensor:
from aiter import ActivationType
from aiter.fused_moe_bf16_asm import asm_moe_tkw1
activation = ActivationType(activation_method)
return asm_moe_tkw1(
hidden_states,
w1,
w2,
topk_weights,
topk_ids,
fc1_scale=fc1_scale,
fc2_scale=fc2_scale,
fc1_smooth_scale=fc1_smooth_scale,
fc2_smooth_scale=fc2_smooth_scale,
a16=a16,
per_tensor_quant_scale=per_tensor_quant_scale,
expert_mask=expert_mask,
activation=activation,
)
def _rocm_aiter_asm_moe_tkw1_fake(
hidden_states: torch.Tensor,
w1: torch.Tensor,
w2: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
fc1_scale: torch.Tensor | None = None,
fc2_scale: torch.Tensor | None = None,
fc1_smooth_scale: torch.Tensor | None = None,
fc2_smooth_scale: torch.Tensor | None = None,
a16: bool = False,
per_tensor_quant_scale: torch.Tensor | None = None,
expert_mask: torch.Tensor | None = None,
activation_method: int = 0,
) -> torch.Tensor:
return torch.empty_like(hidden_states)
def _rocm_aiter_topk_softmax_impl(
topk_weights: torch.Tensor,
topk_indices: torch.Tensor,
token_expert_indices: torch.Tensor,
gating_output: torch.Tensor,
renormalize: bool,
) -> None:
from aiter import topk_softmax
topk_softmax(
topk_weights, topk_indices, token_expert_indices, gating_output, renormalize
)
def _rocm_aiter_topk_softmax_fake(
topk_weights: torch.Tensor,
topk_indices: torch.Tensor,
token_expert_indices: torch.Tensor,
gating_output: torch.Tensor,
renormalize: bool,
) -> None:
pass
def _rocm_aiter_topk_sigmoid_impl(
topk_weights: torch.Tensor,
topk_indices: torch.Tensor,
gating_output: torch.Tensor,
) -> None:
from aiter import topk_sigmoid
topk_sigmoid(topk_weights, topk_indices, gating_output)
def _rocm_aiter_topk_sigmoid_fake(
topk_weights: torch.Tensor,
topk_indices: torch.Tensor,
gating_output: torch.Tensor,
) -> None:
pass
def _rocm_aiter_biased_grouped_topk_impl(
gating_output: torch.Tensor,
correction_bias: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
num_expert_group: int,
topk_group: int,
need_renorm: bool,
routed_scaling_factor: float = 1.0, # mul to topk_weights
) -> None:
from aiter import biased_grouped_topk
biased_grouped_topk(
gating_output,
correction_bias,
topk_weights,
topk_ids,
num_expert_group,
topk_group,
need_renorm,
routed_scaling_factor,
)
def _rocm_aiter_biased_grouped_topk_fake(
gating_output: torch.Tensor,
correction_bias: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
num_expert_group: int,
topk_group: int,
need_renorm: bool,
routed_scaling_factor: float = 1.0, # mul to topk_weights
) -> None:
pass
def _rocm_aiter_grouped_topk_impl(
gating_output: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
num_expert_group: int,
topk_group: int,
need_renorm: bool,
scoring_func: str = "softmax",
routed_scaling_factor: float = 1.0, # mul to topk_weights
) -> None:
is_softmax = scoring_func == "softmax"
from aiter import grouped_topk
grouped_topk(
gating_output,
topk_weights,
topk_ids,
num_expert_group,
topk_group,
need_renorm,
is_softmax,
routed_scaling_factor,
)
def _rocm_aiter_grouped_topk_fake(
gating_output: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
num_expert_group: int,
topk_group: int,
need_renorm: bool,
scoring_func: str = "softmax",
routed_scaling_factor: float = 1.0, # mul to topk_weights
) -> None:
pass
def _rocm_aiter_fused_topk_impl(
x: torch.Tensor,
router_logits: torch.Tensor,
top_k: int,
gate_up: bool,
) -> tuple[torch.Tensor, torch.Tensor]:
from aiter.fused_moe import fused_topk
# fused_topk returns (topk_weights, topk_indices)
return fused_topk(x, router_logits, top_k, gate_up)
def _rocm_aiter_fused_topk_fake(
x: torch.Tensor,
router_logits: torch.Tensor,
top_k: int,
gate_up: bool,
) -> None:
# tuple[torch.Tensor, torch.Tensor]:
pass
# Cache whether aiter supports FP8 MLA parameters
_AITER_MLA_SUPPORTS_FP8: bool | None = None
def _check_aiter_mla_fp8_support() -> bool:
"""Check if aiter.mla.mla_decode_fwd supports q_scale and kv_scale parameters."""
global _AITER_MLA_SUPPORTS_FP8
if _AITER_MLA_SUPPORTS_FP8 is None:
try:
import inspect
from aiter.mla import mla_decode_fwd
sig = inspect.signature(mla_decode_fwd)
_AITER_MLA_SUPPORTS_FP8 = (
"q_scale" in sig.parameters and "kv_scale" in sig.parameters
)
except (
ImportError,
ModuleNotFoundError,
AttributeError,
ValueError,
TypeError,
):
# ImportError/ModuleNotFoundError: aiter.mla module not available
# AttributeError: mla_decode_fwd doesn't exist
# ValueError: mla_decode_fwd has no signature (e.g., built-in)
# TypeError: mla_decode_fwd is not a callable
_AITER_MLA_SUPPORTS_FP8 = False
return _AITER_MLA_SUPPORTS_FP8
def _rocm_aiter_mla_decode_fwd_impl(
q: torch.Tensor,
kv_buffer: torch.Tensor,
o: torch.Tensor,
qo_indptr: torch.Tensor,
max_seqlen_qo: int,
kv_indptr: torch.Tensor | None = None,
kv_indices: torch.Tensor | None = None,
kv_last_page_lens: torch.Tensor | None = None,
sm_scale: float = 1.0,
logit_cap: float = 0.0,
q_scale: torch.Tensor | None = None,
kv_scale: torch.Tensor | None = None,
) -> None:
from aiter.mla import mla_decode_fwd
kwargs: dict[str, float | torch.Tensor | None] = {
"sm_scale": sm_scale,
"logit_cap": logit_cap,
}
# Only pass q_scale and kv_scale if the aiter library supports them
if _check_aiter_mla_fp8_support():
kwargs["q_scale"] = q_scale
kwargs["kv_scale"] = kv_scale
mla_decode_fwd(
q,
kv_buffer.view(-1, 1, 1, q.shape[-1]),
o,
qo_indptr,
kv_indptr,
kv_indices,
kv_last_page_lens,
max_seqlen_qo,
**kwargs,
)
def _rocm_aiter_mla_decode_fwd_fake(
q: torch.Tensor,
kv_buffer: torch.Tensor,
o: torch.Tensor,
qo_indptr: torch.Tensor,
max_seqlen_qo: int,
kv_indptr: torch.Tensor | None = None,
kv_indices: torch.Tensor | None = None,
kv_last_page_lens: torch.Tensor | None = None,
sm_scale: float = 1.0,
logit_cap: float = 0.0,
q_scale: torch.Tensor | None = None,
kv_scale: torch.Tensor | None = None,
) -> None:
pass
def _rocm_aiter_gemm_a8w8_impl(
A: torch.Tensor,
B: torch.Tensor,
As: torch.Tensor,
Bs: torch.Tensor,
bias: torch.Tensor | None = None,
output_dtype: torch.dtype = torch.float16,
) -> torch.Tensor:
from aiter import gemm_a8w8_CK
# gemm_a8w8_CK(a, b, scale_a, scale_b, bias) expects
# a to be [M, K]
# b to be [N, K]
# CutlassInt8ScaledMMLinearKernel prepare weight `w_q` in [K, N] format
return gemm_a8w8_CK(A, B, As, Bs, bias, output_dtype)
def _rocm_aiter_gemm_a8w8_fake(
A: torch.Tensor,
B: torch.Tensor,
As: torch.Tensor,
Bs: torch.Tensor,
bias: torch.Tensor | None = None,
output_dtype: torch.dtype = torch.float16,
) -> torch.Tensor:
m = A.shape[0]
n = B.shape[0]
Y = torch.empty(m, n, dtype=output_dtype, device=A.device)
return Y
def _rocm_aiter_triton_gemm_a8w8_blockscale_impl(
A: torch.Tensor,
B: torch.Tensor,
As: torch.Tensor,
Bs: torch.Tensor,
output_dtype: torch.dtype = torch.float16,
) -> torch.Tensor:
from aiter.ops.triton.gemm_a8w8_blockscale import gemm_a8w8_blockscale
return gemm_a8w8_blockscale(A, B, As, Bs, dtype=output_dtype)
def _rocm_aiter_triton_gemm_a8w8_blockscale_fake(
A: torch.Tensor,
B: torch.Tensor,
As: torch.Tensor,
Bs: torch.Tensor,
output_dtype: torch.dtype = torch.float16,
) -> torch.Tensor:
m = A.shape[0]
n = B.shape[0]
Y = torch.empty(m, n, dtype=output_dtype, device=A.device)
return Y
def _rocm_aiter_gemm_a8w8_blockscale_impl(
A: torch.Tensor,
B: torch.Tensor,
As: torch.Tensor,
Bs: torch.Tensor,
output_dtype: torch.dtype = torch.float16,
) -> torch.Tensor:
from aiter import gemm_a8w8_blockscale
return gemm_a8w8_blockscale(A, B, As, Bs, dtype=output_dtype)
def _rocm_aiter_gemm_a8w8_blockscale_fake(
A: torch.Tensor,
B: torch.Tensor,
As: torch.Tensor,
Bs: torch.Tensor,
output_dtype: torch.dtype = torch.float16,
) -> torch.Tensor:
m = A.shape[0]
n = B.shape[0]
Y = torch.empty(m, n, dtype=output_dtype, device=A.device)
return Y
def _rocm_aiter_rms_norm_impl(
x: torch.Tensor, weight: torch.Tensor, variance_epsilon: float
) -> torch.Tensor:
from aiter import rms_norm
if x.dim() > 2:
x_original_shape = x.shape
x = x.reshape(-1, x_original_shape[-1])
x = rms_norm(x, weight, variance_epsilon)
return x.reshape(x_original_shape)
return rms_norm(x, weight, variance_epsilon)
def _rocm_aiter_rms_norm_fake(
x: torch.Tensor, weight: torch.Tensor, variance_epsilon: float
) -> torch.Tensor:
return torch.empty_like(x)
def _rocm_aiter_rmsnorm2d_fwd_with_add_impl(
x: torch.Tensor,
residual: torch.Tensor,
weight: torch.Tensor,
variance_epsilon: float,
) -> tuple[torch.Tensor, torch.Tensor]:
from aiter import rmsnorm2d_fwd_with_add
residual_out = torch.empty_like(residual)
out = torch.empty_like(x)
rmsnorm2d_fwd_with_add(
out, # output
x, # input
residual, # residual input
residual_out, # residual output
weight,
variance_epsilon,
)
return out, residual_out
def _rocm_aiter_rmsnorm2d_fwd_with_add_fake(
x: torch.Tensor,
residual: torch.Tensor,
weight: torch.Tensor,
variance_epsilon: float,
) -> tuple[torch.Tensor, torch.Tensor]:
residual_out = torch.empty_like(residual)
out = torch.empty_like(x)
return out, residual_out
def _rocm_aiter_rmsnorm_fused_add_dynamic_quant_impl(
x: torch.Tensor,
residual: torch.Tensor,
weight: torch.Tensor,
epsilon: float,
quant_dtype: torch.dtype,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
import aiter as rocm_aiter
assert quant_dtype in [torch.int8, FP8_DTYPE]
y_scale = torch.empty(x.shape[0], 1, dtype=torch.float32, device=x.device)
out = torch.empty(x.shape, dtype=quant_dtype, device=x.device)
residual_out = torch.empty_like(x)
rocm_aiter.rmsnorm2d_fwd_with_add_dynamicquant(
out,
x,
residual,
residual_out,
y_scale,
weight,
epsilon,
use_model_sensitive_rmsnorm=0,
)
return out, residual_out, y_scale
def _rocm_aiter_rmsnorm_fused_add_dynamic_quant_fake(
x: torch.Tensor,
residual: torch.Tensor,
weight: torch.Tensor,
epsilon: float,
quant_dtype: torch.dtype,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
y_scale = torch.empty(x.shape[0], 1, dtype=torch.float32, device=x.device)
out = torch.empty(x.shape, dtype=quant_dtype, device=x.device)
residual_out = torch.empty_like(x)
return out, residual_out, y_scale
def _rocm_aiter_rmsnorm_fused_dynamic_quant_impl(
x: torch.Tensor,
weight: torch.Tensor,
epsilon: float,
quant_dtype: torch.dtype,
) -> tuple[torch.Tensor, torch.Tensor]:
import aiter as rocm_aiter
assert quant_dtype in [torch.int8, FP8_DTYPE]
y_scale = torch.empty(x.shape[0], 1, dtype=torch.float32, device=x.device)
out = torch.empty(x.shape, dtype=quant_dtype, device=x.device)
rocm_aiter.rmsnorm2d_fwd_with_dynamicquant(
out, x, y_scale, weight, epsilon, use_model_sensitive_rmsnorm=0
)
return out, y_scale
def _rocm_aiter_rmsnorm_fused_dynamic_quant_fake(
x: torch.Tensor,
weight: torch.Tensor,
epsilon: float,
quant_dtype: torch.dtype,
) -> tuple[torch.Tensor, torch.Tensor]:
y_scale = torch.empty(x.shape[0], 1, dtype=torch.float32, device=x.device)
out = torch.empty(x.shape, dtype=quant_dtype, device=x.device)
return out, y_scale
def _rocm_aiter_per_tensor_quant_impl(
x: torch.Tensor,
quant_dtype: torch.dtype,
scale: torch.Tensor | None = None,
) -> tuple[torch.Tensor, torch.Tensor]:
from aiter.ops.quant import per_tensor_quant_hip
return per_tensor_quant_hip(x, scale, quant_dtype)
def _rocm_aiter_per_tensor_quant_fake(
x: torch.Tensor,
quant_dtype: torch.dtype,
scale: torch.Tensor | None = None,
) -> tuple[torch.Tensor, torch.Tensor]:
return torch.empty_like(x, dtype=quant_dtype), torch.empty(
1, dtype=torch.float32, device=x.device
)
def _rocm_aiter_per_token_quant_impl(
x: torch.Tensor, quant_dtype: torch.dtype, scale: torch.Tensor | None = None
) -> tuple[torch.Tensor, torch.Tensor]:
from aiter.ops.quant import dynamic_per_token_scaled_quant
assert quant_dtype in [torch.int8, FP8_DTYPE]
out_shape = x.shape
out = torch.empty(x.shape, dtype=FP8_DTYPE, device=x.device)
if scale is None:
scale = torch.empty((*out_shape[:-1], 1), dtype=torch.float32, device=x.device)
dynamic_per_token_scaled_quant(
out,
x,
scale,
scale_ub=None,
shuffle_scale=False,
num_rows=None,
num_rows_factor=1,
)
return out, scale
def _rocm_aiter_per_token_quant_fake(
x: torch.Tensor, quant_dtype: torch.dtype, scale: torch.Tensor | None = None
) -> tuple[torch.Tensor, torch.Tensor]:
out_shape = x.shape
return (
torch.empty(x.shape, dtype=FP8_DTYPE, device=x.device),
torch.empty((*out_shape[:-1], 1), dtype=torch.float32, device=x.device),
)
def _rocm_aiter_rmsnorm_with_add_fp8_group_quant_impl(
x: torch.Tensor,
residual: torch.Tensor,
weight: torch.Tensor,
variance_epsilon: float,
group_size: int,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
from aiter.ops.triton.fused_fp8_quant import fused_rms_fp8_group_quant
(x_quant, x_quant_scales), _, _, res = fused_rms_fp8_group_quant(
x,
weight,
variance_epsilon,
None,
None,
None,
group_size=group_size,
dtype_quant=FP8_DTYPE,
res1=residual,
)
return (
x_quant,
res,
x_quant_scales,
)
def _rocm_aiter_rmsnorm_with_add_fp8_group_quant_fake(
x: torch.Tensor,
residual: torch.Tensor,
weight: torch.Tensor,
variance_epsilon: float,
group_size: int,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
M, N = x.shape
scale_shape = (M, (N + group_size - 1) // group_size)
return (
torch.empty_like(x, dtype=FP8_DTYPE, device=x.device),
torch.empty_like(residual, device=residual.device),
torch.empty(scale_shape, dtype=torch.float32, device=x.device),
)
def _rocm_aiter_rmsnorm_fp8_group_quant_impl(
x: torch.Tensor,
weight: torch.Tensor,
variance_epsilon: float,
group_size: int,
) -> tuple[torch.Tensor, torch.Tensor]:
from aiter.ops.triton.fused_fp8_quant import fused_rms_fp8_group_quant
(x_quant, x_quant_scales), _, _, res = fused_rms_fp8_group_quant(
x,
weight,
variance_epsilon,
None,
None,
None,
group_size=group_size,
dtype_quant=FP8_DTYPE,
res1=None,
)
return (x_quant, x_quant_scales)
def _rocm_aiter_rmsnorm_fp8_group_quant_fake(
x: torch.Tensor,
weight: torch.Tensor,
variance_epsilon: float,
group_size: int,
) -> tuple[torch.Tensor, torch.Tensor]:
M, N = x.shape
scale_shape = (M, (N + group_size - 1) // group_size)
return (
torch.empty_like(x, dtype=FP8_DTYPE, device=x.device),
torch.empty(scale_shape, dtype=torch.float32, device=x.device),
)
def _rocm_aiter_group_fp8_quant_impl(
x: torch.Tensor,
group_size: int,
) -> tuple[torch.Tensor, torch.Tensor]:
assert x.shape[-1] % group_size == 0, "Input shape must be divisible by group size"
from aiter import QuantType, get_hip_quant
aiter_per1x128_quant = get_hip_quant(QuantType.per_1x128)
return aiter_per1x128_quant(x.contiguous(), quant_dtype=FP8_DTYPE)
def _rocm_aiter_group_fp8_quant_fake(
x: torch.Tensor,
group_size: int,
) -> tuple[torch.Tensor, torch.Tensor]:
M, N = x.shape
x_fp8 = torch.empty((M, N), dtype=FP8_DTYPE, device=x.device)
out_bs = torch.empty(
(
M,
(N + group_size - 1) // group_size,
),
dtype=torch.float32,
device=x.device,
)
return x_fp8, out_bs
def _rocm_aiter_act_mul_and_fp8_group_quant_impl(
x: torch.Tensor,
group_size: int,
) -> tuple[torch.Tensor, torch.Tensor]:
from aiter.ops.triton.activation import act_mul_and_fp8_group_quant
return act_mul_and_fp8_group_quant(
x,
activation="silu",
group_size=group_size,
dtype_quant=FP8_DTYPE,
)
def _rocm_aiter_act_mul_and_fp8_group_quant_fake(
x: torch.Tensor,
group_size: int,
) -> tuple[torch.Tensor, torch.Tensor]:
M, N = x.shape
assert N % 2 == 0
N_half = N // 2
x_fp8 = torch.empty((M, N_half), dtype=FP8_DTYPE, device=x.device)
out_bs = torch.empty(
(
M,
(N_half + group_size - 1) // group_size,
),
dtype=torch.float32,
device=x.device,
)
return x_fp8, out_bs
def _rocm_aiter_triton_add_rmsnorm_pad_impl(
x: torch.Tensor,
weight: torch.Tensor,
variance_epsilon: float,
residual: torch.Tensor,
x_pad_to_multiple: int,
) -> tuple[torch.Tensor, torch.Tensor]:
from aiter.ops.triton.fused_add_rmsnorm_pad import fused_add_rmsnorm_pad
return fused_add_rmsnorm_pad(
x,
weight,
variance_epsilon,
residual,
x_pad_to_multiple=x_pad_to_multiple,
)
def _rocm_aiter_triton_add_rmsnorm_pad_fake(
x: torch.Tensor,
weight: torch.Tensor,
variance_epsilon: float,
residual: torch.Tensor,
x_pad_to_multiple: int,
) -> tuple[torch.Tensor, torch.Tensor]:
M, N = x.shape
if x_pad_to_multiple > 0:
N_out = (N + x_pad_to_multiple - 1) // x_pad_to_multiple * x_pad_to_multiple
else:
N_out = N
out = torch.empty((M, N_out), dtype=x.dtype, device=x.device)
residual_out = torch.empty_like(residual)
return out, residual_out
def _triton_rotary_embedding_impl(
positions: torch.Tensor,
query: torch.Tensor,
key: torch.Tensor,
head_size: int,
cos_sin_cache: torch.Tensor,
is_neox: bool,
offsets: torch.Tensor | None = None,
) -> None:
# Modifies query and key in-place
from aiter.ops.triton.rope.rope import (
rope_cached_thd_positions_offsets_2c_fwd_inplace,
)
num_tokens = positions.numel()
cos, sin = cos_sin_cache.chunk(2, dim=-1)
query_shape = query.shape
key_shape = key.shape
rotate_style = 0 if is_neox else 1
rotary_dim = head_size
query = query.view(num_tokens, -1, head_size)
key = key.view(num_tokens, -1, head_size)
query_ = query[..., :rotary_dim]
key_ = key[..., :rotary_dim]
positions = positions.view(*query.shape[:1])
rope_cached_thd_positions_offsets_2c_fwd_inplace(
query_,
key_,
cos,
sin,
positions,
offsets,
rotate_style,
reuse_freqs_front_part=True,
nope_first=False,
)
query = query.view(query_shape)
key = key.view(key_shape)
def _triton_rotary_embedding_fake(
positions: torch.Tensor,
query: torch.Tensor,
key: torch.Tensor,
head_size: int,
cos_sin_cache: torch.Tensor,
is_neox_style: bool,
offsets: torch.Tensor | None = None,
) -> None:
return
# Global flag to ensure ops are registered only once
_OPS_REGISTERED = False
class rocm_aiter_ops:
"""ROCm AITER operations wrapper for AMD GPU acceleration in vLLM.
This class centralizes the import and registration of AITER ops,
and provides a unified interface for checking if AITER is enabled.
Operations are only available on supported gfx9
architectures when aiter is installed.
The class uses environment variables to control which features are enabled,
allowing fine-grained control over which AITER optimizations are used.
Environment Variables:
VLLM_ROCM_USE_AITER: Main toggle for all AITER operations.
VLLM_ROCM_USE_AITER_LINEAR: Controls GEMM and quantization ops.
VLLM_ROCM_USE_AITER_RMSNORM: Controls RMSNorm operations.
VLLM_ROCM_USE_AITER_MOE: Controls MoE (Mixture of Experts) ops.
VLLM_ROCM_USE_AITER_MLA: Controls MLA (Multi-head Latent Attention) ops.
VLLM_ROCM_USE_AITER_MHA: Controls MHA ops including flash_attn_varlen.
VLLM_ROCM_USE_AITER_UNIFIED_ATTENTION: Controls Triton unified attention.
VLLM_ROCM_USE_AITER_FP8BMM: Controls FP8 batched matrix multiply.
VLLM_ROCM_USE_AITER_FP4_ASM_GEMM: Controls FP4 assembly GEMM.
VLLM_ROCM_USE_AITER_TRITON_ROPE: Controls Triton rotary embeddings.
VLLM_ROCM_USE_AITER_FUSION_SHARED_EXPERTS: Controls shared expert fusion.
VLLM_ROCM_USE_AITER_TRITON_GEMM: Controls Triton unquantized GEMM.
Note:
The environment variables are assigned when the module is imported,
so you can't change the environment variables after the module is imported.
This is done out of performance consideration. Accessing environment variables
is expensive as described in issue https://github.com/vllm-project/vllm/issues/17067
so we don't want to do it repeatedly, especially in the hot path (the forward pass).
You can call the refresh_env_variables() function to reload the env variables
after monkey patching the env variables in the unit test.
Check Functions:
All check functions (is_*_enabled) are decorated with @if_aiter_supported,
which verifies: (1) platform is ROCm, (2) device arch is gfx9, and
(3) aiter library is installed. The check function then also verifies
the corresponding environment variable is enabled.
i.e. ___
is_enabled() == current_platform.is_rocm() and | checked by
current_platform.is_on_gfx9() and | @if_aiter_supported
IS_AITER_FOUND and _______________|
cls._AITER_ENABLED -----> Check by the logic in `is_enabled()`
Example:
from vllm._aiter_ops import rocm_aiter_ops
# Check if aiter is enabled before using operations
if rocm_aiter_ops.is_enabled():
result = rocm_aiter_ops.rms_norm(x, weight, epsilon)
Operations:
- RMS normalization: rms_norm, rms_norm2d_with_add
- GEMM operations: gemm_a8w8, gemm_a8w8_blockscale
- Fused MoE: fused_moe, asm_moe_tkw1
- Routing: topk_softmax, biased_grouped_topk, grouped_topk
- MLA decode: mla_decode_fwd
- Quantization: per_tensor_quant, per_token_quant, group_fp8_quant
- Triton ops: triton_rotary_embed, triton_fp8_bmm, triton_gemm_a8w8_blockscale
"""
# Check if the env variable is set
_AITER_ENABLED = envs.VLLM_ROCM_USE_AITER
_LINEAR_ENABLED = envs.VLLM_ROCM_USE_AITER_LINEAR
_RMSNORM_ENABLED = envs.VLLM_ROCM_USE_AITER_RMSNORM
_FMOE_ENABLED = envs.VLLM_ROCM_USE_AITER_MOE
_MLA_ENABLED = envs.VLLM_ROCM_USE_AITER_MLA
_MHA_ENABLED = envs.VLLM_ROCM_USE_AITER_MHA
_SHUFFLE_KV_CACHE_ENABLED = envs.VLLM_ROCM_SHUFFLE_KV_CACHE_LAYOUT
_TRITON_UNIFIED_ATTN_ENABLED = envs.VLLM_ROCM_USE_AITER_UNIFIED_ATTENTION
# TODO: Consolidate under _LINEAR_ENABLED
_FP8BMM_ENABLED = envs.VLLM_ROCM_USE_AITER_FP8BMM
_FP4BMM_ENABLED = envs.VLLM_ROCM_USE_AITER_FP4BMM
# TODO: Consolidate under _LINEAR_ENABLED
_FP4_GEMM_DYNAMIC_QUANT_ASM = envs.VLLM_ROCM_USE_AITER_FP4_ASM_GEMM
# TODO: Consolidate under VLLM_ROCM_USE_AITER_ROPE
_TRITON_ROTARY_EMBED = envs.VLLM_ROCM_USE_AITER_TRITON_ROPE
_MOE_SHARED_EXPERTS_ENABLED = envs.VLLM_ROCM_USE_AITER_FUSION_SHARED_EXPERTS
# TODO: Consolidate under _LINEAR_ENABLED
_TRITON_UNQUANT_GEMM = envs.VLLM_ROCM_USE_AITER_TRITON_GEMM
@classmethod
def refresh_env_variables(cls):
"""
Since the environment variables are assigned when the module is imported,
This is a helper function to reload all the env variables from
the environment variables.
for example, after monkey patching the env variables in the unit test,
you can call this function to reload the env variables.
"""
cls._AITER_ENABLED = envs.VLLM_ROCM_USE_AITER
cls._LINEAR_ENABLED = envs.VLLM_ROCM_USE_AITER_LINEAR
cls._RMSNORM_ENABLED = envs.VLLM_ROCM_USE_AITER_RMSNORM
cls._FMOE_ENABLED = envs.VLLM_ROCM_USE_AITER_MOE
cls._MLA_ENABLED = envs.VLLM_ROCM_USE_AITER_MLA
cls._MHA_ENABLED = envs.VLLM_ROCM_USE_AITER_MHA
cls._SHUFFLE_KV_CACHE_ENABLED = envs.VLLM_ROCM_SHUFFLE_KV_CACHE_LAYOUT
cls._TRITON_UNIFIED_ATTN_ENABLED = envs.VLLM_ROCM_USE_AITER_UNIFIED_ATTENTION
cls._FP8BMM_ENABLED = envs.VLLM_ROCM_USE_AITER_FP8BMM
cls._FP4BMM_ENABLED = envs.VLLM_ROCM_USE_AITER_FP4BMM
cls._FP4_GEMM_DYNAMIC_QUANT_ASM = envs.VLLM_ROCM_USE_AITER_FP4_ASM_GEMM
cls._TRITON_ROTARY_EMBED = envs.VLLM_ROCM_USE_AITER_TRITON_ROPE
cls._MOE_SHARED_EXPERTS_ENABLED = envs.VLLM_ROCM_USE_AITER_FUSION_SHARED_EXPERTS
cls._TRITON_UNQUANT_GEMM = envs.VLLM_ROCM_USE_AITER_TRITON_GEMM
@staticmethod
def get_aiter_activation_type(activation_str: str):
"""
Given an activation type as a string, returns the corresponding aiter ActivationType enum.
Supported activation types: "no", "none", "silu", "gelu", "swiglu".
Returns None if the mapping fails.
Args:
activation_str (str): Activation type as string.
Returns:
Aiter ActivationType enum value, or None if not found.
"""
# Import only locally, since aiter may not always be available.
try:
from aiter import ActivationType
except ImportError:
return None
if not isinstance(activation_str, str):
return None
name = activation_str.strip().lower()
mapping = {
"none": ActivationType.No,
"no": ActivationType.No,
"silu": ActivationType.Silu,
"gelu": ActivationType.Gelu,
"swiglu": ActivationType.Swiglu,
}
return mapping.get(name)
@staticmethod
def get_aiter_quant_type(quant_type_str: str):
"""
Given a quantization type as a string, returns the corresponding aiter QuantType enum.
Supported quantization types: "no", "per_tensor", "per_token", "per_1x32", "per_1x128", "per_128x128".
Returns None if the mapping fails.
Args:
quant_type_str (str): Quantization type as string.
Returns:
Aiter QuantType enum value, or None if not found.
"""
try:
from aiter import QuantType
except ImportError:
return None
if not isinstance(quant_type_str, str):
return None
name = quant_type_str.strip().lower()
mapping = {
"no": QuantType.No,
"per_tensor": QuantType.per_Tensor,
"per_token": QuantType.per_Token,
"per_1x32": QuantType.per_1x32,
"per_1x128": QuantType.per_1x128,
"per_128x128": QuantType.per_128x128,
}
return mapping.get(name)
@classmethod
@if_aiter_supported
def is_enabled(cls) -> bool:
return cls._AITER_ENABLED
@classmethod
@if_aiter_supported
def is_linear_enabled(cls) -> bool:
return cls._AITER_ENABLED and cls._LINEAR_ENABLED
@classmethod
@if_aiter_supported
def is_linear_fp8_enabled(cls) -> bool:
return cls.is_linear_enabled()
@classmethod
@if_aiter_supported
def is_rmsnorm_enabled(cls) -> bool:
return cls._AITER_ENABLED and cls._RMSNORM_ENABLED
@classmethod
@if_aiter_supported
def is_fused_moe_enabled(cls) -> bool:
return cls._AITER_ENABLED and cls._FMOE_ENABLED
@classmethod
@if_aiter_supported
def is_fusion_moe_shared_experts_enabled(cls) -> bool:
return cls.is_fused_moe_enabled() and cls._MOE_SHARED_EXPERTS_ENABLED
@classmethod
@if_aiter_supported
def is_mla_enabled(cls) -> bool:
return cls._AITER_ENABLED and cls._MLA_ENABLED
@classmethod
@if_aiter_supported
def is_mha_enabled(cls) -> bool:
return cls._AITER_ENABLED and cls._MHA_ENABLED
@classmethod
@if_aiter_supported
def is_shuffle_kv_cache_enabled(cls) -> bool:
return cls._SHUFFLE_KV_CACHE_ENABLED
@classmethod
@if_aiter_supported
def is_triton_unified_attn_enabled(cls) -> bool:
return cls._AITER_ENABLED and cls._TRITON_UNIFIED_ATTN_ENABLED
@classmethod
@if_aiter_supported
def is_fp8bmm_enabled(cls) -> bool:
return cls._AITER_ENABLED and cls._FP8BMM_ENABLED
@classmethod
@if_aiter_supported
def is_fp4bmm_enabled(cls) -> bool:
from vllm.platforms.rocm import on_gfx950
return cls._AITER_ENABLED and cls._FP4BMM_ENABLED and on_gfx950()
@classmethod
@if_aiter_supported
def is_asm_fp4_gemm_dynamic_quant_enabled(cls) -> bool:
from vllm.platforms.rocm import on_gfx950
return cls._AITER_ENABLED and cls._FP4_GEMM_DYNAMIC_QUANT_ASM and on_gfx950()
@classmethod
@if_aiter_supported
def is_triton_rotary_embed_enabled(cls) -> bool:
return cls._AITER_ENABLED and cls._TRITON_ROTARY_EMBED
@classmethod
@if_aiter_supported
def is_triton_gemm_enabled(cls) -> bool:
return cls._AITER_ENABLED and cls._TRITON_UNQUANT_GEMM
@staticmethod
@if_aiter_supported
def register_ops_once() -> None:
global _OPS_REGISTERED
if not _OPS_REGISTERED:
# register all the custom ops here
direct_register_custom_op(
op_name="rocm_aiter_asm_moe_tkw1",
op_func=_rocm_aiter_asm_moe_tkw1_impl,
mutates_args=[],
fake_impl=_rocm_aiter_asm_moe_tkw1_fake,
dispatch_key=current_platform.dispatch_key,
)
direct_register_custom_op(
op_name="rocm_aiter_fused_moe",
op_func=_rocm_aiter_fused_moe_impl,
mutates_args=[],
fake_impl=_rocm_aiter_fused_moe_fake,
dispatch_key=current_platform.dispatch_key,
)
direct_register_custom_op(
op_name="rocm_aiter_topk_softmax",
op_func=_rocm_aiter_topk_softmax_impl,
mutates_args=["topk_weights", "topk_indices", "token_expert_indices"],
fake_impl=_rocm_aiter_topk_softmax_fake,
dispatch_key=current_platform.dispatch_key,
)
direct_register_custom_op(
op_name="rocm_aiter_topk_sigmoid",
op_func=_rocm_aiter_topk_sigmoid_impl,
mutates_args=["topk_weights", "topk_indices"],
fake_impl=_rocm_aiter_topk_sigmoid_fake,
dispatch_key=current_platform.dispatch_key,
)
direct_register_custom_op(
op_name="rocm_aiter_biased_grouped_topk",
op_func=_rocm_aiter_biased_grouped_topk_impl,
mutates_args=["topk_weights", "topk_ids"],
fake_impl=_rocm_aiter_biased_grouped_topk_fake,
dispatch_key=current_platform.dispatch_key,
)
direct_register_custom_op(
op_name="rocm_aiter_grouped_topk",
op_func=_rocm_aiter_grouped_topk_impl,
mutates_args=["topk_weights", "topk_ids"],
fake_impl=_rocm_aiter_grouped_topk_fake,
dispatch_key=current_platform.dispatch_key,
)
direct_register_custom_op(
op_name="rocm_aiter_fused_topk",
op_func=_rocm_aiter_fused_topk_impl,
mutates_args=[],
fake_impl=_rocm_aiter_fused_topk_fake,
dispatch_key=current_platform.dispatch_key,
)
direct_register_custom_op(
op_name="rocm_aiter_mla_decode_fwd",
op_func=_rocm_aiter_mla_decode_fwd_impl,
mutates_args=["o"],
fake_impl=_rocm_aiter_mla_decode_fwd_fake,
)
direct_register_custom_op(
op_name="rocm_aiter_gemm_a8w8",
op_func=_rocm_aiter_gemm_a8w8_impl,
mutates_args=[],
fake_impl=_rocm_aiter_gemm_a8w8_fake,
dispatch_key=current_platform.dispatch_key,
)
direct_register_custom_op(
op_name="rocm_aiter_triton_gemm_a8w8_blockscale",
op_func=_rocm_aiter_triton_gemm_a8w8_blockscale_impl,
fake_impl=_rocm_aiter_triton_gemm_a8w8_blockscale_fake,
)
direct_register_custom_op(
op_name="rocm_aiter_gemm_a8w8_blockscale",
op_func=_rocm_aiter_gemm_a8w8_blockscale_impl,
fake_impl=_rocm_aiter_gemm_a8w8_blockscale_fake,
)
direct_register_custom_op(
op_name="rocm_aiter_rms_norm",
op_func=_rocm_aiter_rms_norm_impl,
fake_impl=_rocm_aiter_rms_norm_fake,
)
direct_register_custom_op(
op_name="rocm_aiter_rmsnorm2d_fwd_with_add",
op_func=_rocm_aiter_rmsnorm2d_fwd_with_add_impl,
fake_impl=_rocm_aiter_rmsnorm2d_fwd_with_add_fake,
dispatch_key=current_platform.dispatch_key,
)
direct_register_custom_op(
op_name="rocm_aiter_rmsnorm_fused_dynamic_quant",
op_func=_rocm_aiter_rmsnorm_fused_dynamic_quant_impl,
fake_impl=_rocm_aiter_rmsnorm_fused_dynamic_quant_fake,
dispatch_key=current_platform.dispatch_key,
)
direct_register_custom_op(
op_name="rocm_aiter_rmsnorm_fused_add_dynamic_quant",
op_func=_rocm_aiter_rmsnorm_fused_add_dynamic_quant_impl,
fake_impl=_rocm_aiter_rmsnorm_fused_add_dynamic_quant_fake,
dispatch_key=current_platform.dispatch_key,
)
direct_register_custom_op(
op_name="rocm_aiter_rmsnorm_fp8_group_quant",
op_func=_rocm_aiter_rmsnorm_fp8_group_quant_impl,
fake_impl=_rocm_aiter_rmsnorm_fp8_group_quant_fake,
)
direct_register_custom_op(
op_name="rocm_aiter_rmsnorm_with_add_fp8_group_quant",
op_func=_rocm_aiter_rmsnorm_with_add_fp8_group_quant_impl,
fake_impl=_rocm_aiter_rmsnorm_with_add_fp8_group_quant_fake,
)
direct_register_custom_op(
op_name="rocm_aiter_act_mul_and_fp8_group_quant",
op_func=_rocm_aiter_act_mul_and_fp8_group_quant_impl,
fake_impl=_rocm_aiter_act_mul_and_fp8_group_quant_fake,
)
direct_register_custom_op(
op_name="rocm_aiter_triton_add_rmsnorm_pad",
op_func=_rocm_aiter_triton_add_rmsnorm_pad_impl,
fake_impl=_rocm_aiter_triton_add_rmsnorm_pad_fake,
dispatch_key=current_platform.dispatch_key,
)
direct_register_custom_op(
op_name="rocm_aiter_group_fp8_quant",
op_func=_rocm_aiter_group_fp8_quant_impl,
fake_impl=_rocm_aiter_group_fp8_quant_fake,
)
direct_register_custom_op(
op_name="rocm_aiter_per_tensor_quant",
op_func=_rocm_aiter_per_tensor_quant_impl,
mutates_args=[],
fake_impl=_rocm_aiter_per_tensor_quant_fake,
dispatch_key=current_platform.dispatch_key,
)
direct_register_custom_op(
op_name="rocm_aiter_per_token_quant",
op_func=_rocm_aiter_per_token_quant_impl,
fake_impl=_rocm_aiter_per_token_quant_fake,
dispatch_key=current_platform.dispatch_key,
)
direct_register_custom_op(
op_name="rocm_aiter_sparse_attn_indexer",
op_func=rocm_aiter_sparse_attn_indexer,
mutates_args=["topk_indices_buffer"],
fake_impl=rocm_aiter_sparse_attn_indexer_fake,
dispatch_key=current_platform.dispatch_key,
)
# Register rocm aiter rotary embedding custom op
direct_register_custom_op(
op_name="rocm_aiter_triton_rotary_embedding",
op_func=_triton_rotary_embedding_impl,
mutates_args=["query", "key"], # These tensors are modified in-place
fake_impl=_triton_rotary_embedding_fake,
)
_OPS_REGISTERED = True
@staticmethod
def get_rmsnorm_fused_add_op() -> OpOverload:
return torch.ops.vllm.rocm_aiter_rmsnorm2d_fwd_with_add.default
@staticmethod
def get_rmsnorm_op() -> OpOverload:
return torch.ops.vllm.rocm_aiter_rms_norm.default
@staticmethod
def get_rmsnorm_fused_add_dynamic_quant_op() -> OpOverload:
return torch.ops.vllm.rocm_aiter_rmsnorm_fused_add_dynamic_quant.default
@staticmethod
def get_rmsnorm_fused_dynamic_quant_op() -> OpOverload:
return torch.ops.vllm.rocm_aiter_rmsnorm_fused_dynamic_quant.default
@staticmethod
def get_rmsnorm_group_fused_quant_op() -> OpOverload:
return torch.ops.vllm.rocm_aiter_rmsnorm_fp8_group_quant.default
@staticmethod
def get_rmsnorm_group_add_fused_quant_op() -> OpOverload:
return torch.ops.vllm.rocm_aiter_rmsnorm_with_add_fp8_group_quant.default
@staticmethod
def get_per_token_quant_op() -> OpOverload:
return torch.ops.vllm.rocm_aiter_per_token_quant.default
@staticmethod
def get_group_quant_op() -> OpOverload:
return torch.ops.vllm.rocm_aiter_group_fp8_quant.default
@staticmethod
def get_act_mul_fused_fp8_group_quant_op() -> OpOverload:
return torch.ops.vllm.rocm_aiter_act_mul_and_fp8_group_quant.default
@staticmethod
def get_triton_add_rmsnorm_pad_op() -> OpOverload:
return torch.ops.vllm.rocm_aiter_triton_add_rmsnorm_pad.default
@staticmethod
def get_triton_rotary_embedding_op() -> OpOverload:
return torch.ops.vllm.rocm_aiter_triton_rotary_embedding.default
@staticmethod
def rms_norm(
x: torch.Tensor, weight: torch.Tensor, variance_epsilon: float
) -> torch.Tensor:
return torch.ops.vllm.rocm_aiter_rms_norm(x, weight, variance_epsilon)
@staticmethod
def rms_norm2d_with_add(
x: torch.Tensor,
residual: torch.Tensor,
weight: torch.Tensor,
variance_epsilon: float,
) -> tuple[torch.Tensor, torch.Tensor]:
return torch.ops.vllm.rocm_aiter_rmsnorm2d_fwd_with_add(
x, residual, weight, variance_epsilon
)
@staticmethod
def gemm_a8w8(
A: torch.Tensor,
B: torch.Tensor,
As: torch.Tensor,
Bs: torch.Tensor,
bias: torch.Tensor | None = None,
output_dtype: torch.dtype = torch.float16,
) -> torch.Tensor:
return torch.ops.vllm.rocm_aiter_gemm_a8w8(A, B, As, Bs, bias, output_dtype)
@staticmethod
def triton_gemm_a8w8_blockscale(
A: torch.Tensor,
B: torch.Tensor,
As: torch.Tensor,
Bs: torch.Tensor,
block_size: list[int],
output_dtype: torch.dtype = torch.float16,
) -> torch.Tensor:
return torch.ops.vllm.rocm_aiter_triton_gemm_a8w8_blockscale(
A, B, As, Bs, output_dtype
)
@staticmethod
def gemm_a8w8_blockscale(
A: torch.Tensor,
B: torch.Tensor,
As: torch.Tensor,
Bs: torch.Tensor,
block_size: list[int],
output_dtype: torch.dtype = torch.float16,
) -> torch.Tensor:
return torch.ops.vllm.rocm_aiter_gemm_a8w8_blockscale(
A, B, As, Bs, output_dtype
)
@staticmethod
def fused_moe(
hidden_states: torch.Tensor,
w1: torch.Tensor,
w2: torch.Tensor,
topk_weight: torch.Tensor,
topk_ids: torch.Tensor,
expert_mask: torch.Tensor | None = None,
activation_method: int = 0,
quant_method: int = 0,
doweight_stage1: bool = False,
w1_scale: torch.Tensor | None = None,
w2_scale: torch.Tensor | None = None,
a1_scale: torch.Tensor | None = None,
a2_scale: torch.Tensor | None = None,
num_local_tokens: torch.Tensor | None = None,
output_dtype: torch.dtype | None = None,
hidden_pad: int = 0,
intermediate_pad: int = 0,
bias1: torch.Tensor | None = None,
bias2: torch.Tensor | None = None,
) -> torch.Tensor:
return torch.ops.vllm.rocm_aiter_fused_moe(
hidden_states,
w1,
w2,
topk_weight,
topk_ids,
expert_mask,
activation_method,
quant_method,
doweight_stage1,
w1_scale,
w2_scale,
a1_scale,
a2_scale,
num_local_tokens,
output_dtype,
hidden_pad,
intermediate_pad,
bias1,
bias2,
)
@staticmethod
def asm_moe_tkw1(
hidden_states: torch.Tensor,
w1: torch.Tensor,
w2: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
fc1_scale: torch.Tensor | None = None,
fc2_scale: torch.Tensor | None = None,
fc1_smooth_scale: torch.Tensor | None = None,
fc2_smooth_scale: torch.Tensor | None = None,
a16: bool = False,
per_tensor_quant_scale: torch.Tensor | None = None,
expert_mask: torch.Tensor | None = None,
activation_method: int = 0,
) -> torch.Tensor:
return torch.ops.vllm.rocm_aiter_asm_moe_tkw1(
hidden_states,
w1,
w2,
topk_weights,
topk_ids,
fc1_scale,
fc2_scale,
fc1_smooth_scale,
fc2_smooth_scale,
a16,
per_tensor_quant_scale,
expert_mask,
activation_method,
)
@staticmethod
def topk_softmax(
topk_weights: torch.Tensor,
topk_indices: torch.Tensor,
token_expert_indices: torch.Tensor,
gating_output: torch.Tensor,
renormalize: bool,
) -> tuple[torch.Tensor, ...]:
torch.ops.vllm.rocm_aiter_topk_softmax(
topk_weights, topk_indices, token_expert_indices, gating_output, renormalize
)
return topk_weights, topk_indices
@staticmethod
def topk_sigmoid(
topk_weights: torch.Tensor,
topk_indices: torch.Tensor,
token_expert_indices: torch.Tensor,
gating_output: torch.Tensor,
renormalize: bool,
) -> tuple[torch.Tensor, ...]:
torch.ops.vllm.rocm_aiter_topk_sigmoid(
topk_weights, topk_indices, gating_output
)
return topk_weights, topk_indices
@staticmethod
def biased_grouped_topk(
gating_output: torch.Tensor,
correction_bias: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
num_expert_group: int,
topk_group: int,
need_renorm: bool,
routed_scaling_factor: float = 1.0,
) -> None:
torch.ops.vllm.rocm_aiter_biased_grouped_topk(
gating_output,
correction_bias,
topk_weights,
topk_ids,
num_expert_group,
topk_group,
need_renorm,
routed_scaling_factor,
)
@staticmethod
def grouped_topk(
gating_output: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
num_expert_group: int,
topk_group: int,
need_renorm: bool,
scoring_func: str = "softmax",
routed_scaling_factor: float = 1.0,
) -> None:
torch.ops.vllm.rocm_aiter_grouped_topk(
gating_output,
topk_weights,
topk_ids,
num_expert_group,
topk_group,
need_renorm,
scoring_func,
routed_scaling_factor,
)
@staticmethod
def fused_topk(
x: torch.Tensor,
router_logits: torch.Tensor,
top_k: int,
gate_up: bool,
) -> tuple[torch.Tensor, torch.Tensor]:
return torch.ops.vllm.rocm_aiter_fused_topk(x, router_logits, top_k, gate_up)
@staticmethod
def mla_decode_fwd(
q: torch.Tensor,
kv_buffer: torch.Tensor,
o: torch.Tensor,
sm_scale: float,
qo_indptr: torch.Tensor,
max_seqlen_qo: int,
kv_indptr: torch.Tensor | None = None,
kv_indices: torch.Tensor | None = None,
kv_last_page_lens: torch.Tensor | None = None,
logit_cap: float = 0.0,
q_scale: torch.Tensor | None = None,
kv_scale: torch.Tensor | None = None,
):
torch.ops.vllm.rocm_aiter_mla_decode_fwd(
q,
kv_buffer.view(-1, 1, 1, q.shape[-1]),
o,
qo_indptr,
max_seqlen_qo,
kv_indptr,
kv_indices,
kv_last_page_lens,
sm_scale=sm_scale,
logit_cap=logit_cap,
q_scale=q_scale,
kv_scale=kv_scale,
)
@staticmethod
def per_tensor_quant(
x: torch.Tensor,
quant_dtype: torch.dtype,
scale: torch.Tensor | None = None,
) -> tuple[torch.Tensor, torch.Tensor]:
return torch.ops.vllm.rocm_aiter_per_tensor_quant(x, quant_dtype, scale)
@staticmethod
def per_token_quant(
x: torch.Tensor,
quant_dtype: torch.dtype,
scale: torch.Tensor | None = None,
) -> tuple[torch.Tensor, torch.Tensor]:
return torch.ops.vllm.rocm_aiter_per_token_quant(x, quant_dtype, scale)
@staticmethod
def triton_fp4_gemm_dynamic_qaunt(
x: torch.Tensor,
weight: torch.Tensor,
weight_scale: torch.Tensor,
out_dtype: torch.dtype | None = torch.bfloat16,
x_scales: torch.Tensor | None = None,
) -> torch.Tensor:
from aiter.ops.triton.gemm_afp4wfp4 import gemm_afp4wfp4
from aiter.ops.triton.quant import dynamic_mxfp4_quant
if x_scales is None:
x_q, x_s = dynamic_mxfp4_quant(x)
else:
x_q = x
x_s = x_scales
y = torch.empty(
x_q.shape[0], weight.shape[0], device=x_q.device, dtype=out_dtype
)
gemm_afp4wfp4(x_q, weight, x_s, weight_scale.T, out_dtype, y)
return y
@staticmethod
def triton_rope_and_cache(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
positions: torch.Tensor,
cos_sin_cache: torch.Tensor,
is_neox: bool,
key_cache: torch.Tensor,
value_cache: torch.Tensor,
layer_slot_mapping: torch.Tensor,
k_scale: torch.Tensor,
v_scale: torch.Tensor,
flash_layout: bool,
apply_scale: bool,
):
from aiter.ops.triton.fused_kv_cache import fused_qk_rope_reshape_and_cache
cos, sin = cos_sin_cache.chunk(2, dim=-1)
fused_qk_rope_reshape_and_cache(
query,
key,
value,
key_cache,
value_cache,
layer_slot_mapping,
positions,
cos,
sin,
k_scale,
v_scale,
is_neox,
flash_layout=flash_layout,
apply_scale=apply_scale,
q_out=query,
k_out=key,
output_zeros=False,
)
@staticmethod
def batched_gemm_a16wfp4(
X: torch.Tensor,
W: torch.Tensor,
w_scale: torch.Tensor,
Y: torch.Tensor,
transpose_bm: bool | None = False,
prequant: bool | None = False,
y_scale: torch.Tensor | None = None,
) -> torch.Tensor:
# ruff: noqa: E501 # isort: skip
from aiter.ops.triton.batched_gemm_a16wfp4 import batched_gemm_a16wfp4
return batched_gemm_a16wfp4(
X,
W,
w_scale,
y=Y,
transpose_bm=transpose_bm,
prequant=prequant,
y_scale=y_scale,
)
@staticmethod
def triton_fp8_bmm(
X: torch.Tensor,
WQ: torch.Tensor,
w_scale: torch.Tensor,
group_size: int = 128,
bias: torch.Tensor | None = None,
dtype: torch.dtype | None = torch.bfloat16,
splitK: int | None = None,
YQ: torch.Tensor | None = None,
transpose_bm: bool | None = False,
config: dict | None = None,
) -> torch.Tensor:
# ruff: noqa: E501 # isort: skip
from aiter.ops.triton.batched_gemm_a8w8_a_per_token_group_prequant_w_per_batched_tensor_quant import (
batched_gemm_a8w8_a_per_token_group_prequant_w_per_batched_tensor_quant as aiter_triton_fp8_bmm,
)
return aiter_triton_fp8_bmm(
X,
WQ,
w_scale,
group_size=group_size,
bias=bias,
dtype=dtype,
splitK=splitK,
YQ=YQ,
transpose_bm=transpose_bm,
config=config,
)
@staticmethod
def group_fp8_quant(
input_2d: torch.Tensor,
group_size: int = 128,
) -> tuple[torch.Tensor, torch.Tensor]:
assert group_size == 128, "Group size must be 128"
return torch.ops.vllm.rocm_aiter_group_fp8_quant(input_2d, group_size)
@staticmethod
def is_triton_gemm_w8a8_tuned(n: int, k: int) -> bool:
return (n, k) in [
(1024, 8192),
(2112, 7168),
(3072, 1536),
(32768, 8192),
(4096, 7168),
(4608, 7168),
(512, 7168),
(7168, 2048),
(7168, 256),
(8192, 1024),
(8192, 32768),
]
@staticmethod
def is_triton_gemm_afp4wfp4_presh_ws_tuned(n: int, k: int) -> bool:
return (n, k) in [
(8192, 4096),
(1280, 8192),
(16384, 53248),
(106496, 16384),
(57344, 8192),
(8192, 2048),
(2560, 8192),
(10240, 8192),
(16384, 16384),
(8192, 28672),
(28672, 8192),
(18432, 16384),
(8192, 1024),
(7168, 8192),
(5120, 8192),
(8192, 8192),
(8192, 7168),
(14336, 8192),
(8192, 14336),
(8192, 3584),
]
@staticmethod
def shuffle_weight(
self, tensor: torch.Tensor, layout: tuple[int, int] = (16, 16)
) -> torch.Tensor:
from aiter.ops.shuffle import shuffle_weight
return shuffle_weight(tensor, layout=layout)
@staticmethod
def shuffle_weight_a16w4(
tensor: "torch.Tensor",
nLane: int,
gate_up: bool,
) -> "torch.Tensor":
"""
Shuffles the weight tensor into (A16W4) layout for AITER kernels.
Args:
tensor: The input weight tensor to be shuffled.
layout: The block layout to use, defaults to (16, 4).
Returns:
torch.Tensor: The shuffled tensor.
"""
from aiter.ops.shuffle import shuffle_weight_a16w4
return shuffle_weight_a16w4(tensor, nLane, gate_up)
@staticmethod
def shuffle_scale_a16w4(
tensor: "torch.Tensor",
num_experts: int,
gate_up: bool,
) -> "torch.Tensor":
"""
Shuffles the scale tensor into (A16W4) layout for AITER kernels.
Args:
tensor: The input scale tensor to be shuffled.
num_experts: Number of experts, needed for reshaping logic.
gate_up: Whether the scale is for w13 (True) or w2 (False).
Returns:
torch.Tensor: The shuffled scale tensor.
"""
from aiter.ops.shuffle import shuffle_scale_a16w4
return shuffle_scale_a16w4(tensor, num_experts, gate_up)
@staticmethod
def shuffle_weights(
*tensors: torch.Tensor, layout: tuple[int, int] = (16, 16)
) -> tuple[torch.Tensor, ...]:
"""
Applies shuffle_weight function from AITER to each
input tensor and returns them.
Rearranges (shuffles) the input tensor/s
into a specified block layout for optimized computation.
Args:
*tensors: Variable number of torch.Tensor objects.
layout: A pair of integers specifying the block sizes used to divide
the tensors during shuffling. Default is (16, 16).
Returns:
A Tuple of shuffled tensors.
"""
from aiter.ops.shuffle import shuffle_weight
return tuple(shuffle_weight(tensor, layout=layout) for tensor in tensors)
@staticmethod
def flash_attn_varlen_func(
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
cu_seqlens_q: torch.Tensor,
cu_seqlens_k: torch.Tensor,
max_seqlen_q: int,
max_seqlen_k: int,
min_seqlen_q: int | None = None,
dropout_p: float = 0.0,
softmax_scale: float | None = None,
causal: bool = False,
window_size: tuple[int, int] | None = None,
alibi_slopes: torch.Tensor | None = None,
return_lse: bool = False,
out: torch.Tensor | None = None,
):
"""
Flash attention with variable length sequences.
This function is NOT wrapped with @is_aiter_supported decorator
to allow explicit backend selection via attention_config to work
even when VLLM_ROCM_USE_AITER=0.
Note: This performs lazy import of aiter.flash_attn_varlen_func
"""
from aiter import flash_attn_varlen_func
return flash_attn_varlen_func(
q=q,
k=k,
v=v,
cu_seqlens_q=cu_seqlens_q,
cu_seqlens_k=cu_seqlens_k,
max_seqlen_q=max_seqlen_q,
max_seqlen_k=max_seqlen_k,
min_seqlen_q=min_seqlen_q,
dropout_p=dropout_p,
softmax_scale=softmax_scale,
causal=causal,
window_size=window_size,
alibi_slopes=alibi_slopes,
return_lse=return_lse,
out=out,
)
@staticmethod
def pa_fwd_asm(
Q: torch.Tensor,
K: torch.Tensor,
V: torch.Tensor,
block_tables: torch.Tensor,
context_lens: torch.Tensor,
block_tables_stride0: int,
K_QScale: torch.Tensor,
V_QScale: torch.Tensor,
out_: torch.Tensor,
):
"""
Paged attention forward pass using assembly kernel.
This function is NOT wrapped with @is_aiter_supported decorator
to allow explicit backend selection via attention_config to work
even when VLLM_ROCM_USE_AITER=0.
Note: This performs lazy import of aiter.pa_fwd_asm
"""
from aiter import pa_fwd_asm
return pa_fwd_asm(
Q=Q,
K=K,
V=V,
block_tables=block_tables,
context_lens=context_lens,
block_tables_stride0=block_tables_stride0,
K_QScale=K_QScale,
V_QScale=V_QScale,
out_=out_,
)
rocm_aiter_ops.register_ops_once()
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/_aiter_ops.py",
"license": "Apache License 2.0",
"lines": 1684,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/v1/test_outputs.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from unittest import TestCase
from vllm.v1.outputs import LogprobsLists
class TestLogprobsLists(TestCase):
def setUp(self):
self.logprobsLists = LogprobsLists(
logprob_token_ids=[
[1, 2], # Request 0 token 0
[3, 4], # Request 0 token 1
[5, 6], # Request 1 token 0
[7, 8], # Request 1 token 1
[9, 10], # Request 1 token 2
[11, 12], # Request 2 token 0
[13, 14], # Request 2 token 1
[15, 16], # Request 2 token 2
[17, 18], # Request 2 token 3
],
logprobs=[
[0.1, 0.2],
[0.3, 0.4],
[0.5, 0.6],
[0.7, 0.8],
[0.9, 1.0],
[1.1, 1.2],
[1.3, 1.4],
[1.5, 1.6],
[1.7, 1.8],
],
sampled_token_ranks=[1, 3, 5, 7, 9, 11, 13, 15, 17],
cu_num_generated_tokens=[0, 2, 5, 9],
)
def test_slice_without_cu_num_generated_tokens(self):
"""Test slicing without cu_num_generated_tokens"""
logprobsLists = LogprobsLists(
logprob_token_ids=[[1], [2], [3]],
logprobs=[[0.1], [0.2], [0.3]],
sampled_token_ranks=[1, 2, 3],
cu_num_generated_tokens=None,
)
sliced = logprobsLists.slice_request(1, num_positions=2)
assert sliced.logprob_token_ids == [[2], [3]]
assert sliced.logprobs == [[0.2], [0.3]]
assert sliced.sampled_token_ranks == [2, 3]
assert sliced.cu_num_generated_tokens is None
def test_slice_from_start(self):
"""Test slicing from the start position"""
sliced = self.logprobsLists.slice_request(0, num_positions=5)
assert len(sliced.logprob_token_ids) == 5
assert sliced.logprob_token_ids == [
[1, 2],
[3, 4],
[5, 6],
[7, 8],
[9, 10],
]
assert sliced.cu_num_generated_tokens is None
def test_slice_from_middle(self):
"""Test slicing from the middle position"""
sliced = self.logprobsLists.slice_request(1, num_positions=7)
assert len(sliced.logprob_token_ids) == 7
assert sliced.logprob_token_ids == [
[5, 6],
[7, 8],
[9, 10],
[11, 12],
[13, 14],
[15, 16],
[17, 18],
]
assert sliced.cu_num_generated_tokens is None
def test_slice_single_request(self):
"""Test slicing a single request"""
sliced = self.logprobsLists.slice_request(1, num_positions=3)
assert len(sliced.logprob_token_ids) == 3
assert sliced.logprob_token_ids == [[5, 6], [7, 8], [9, 10]]
assert sliced.cu_num_generated_tokens is None
def test_slice_last_request(self):
"""Test slicing the last request"""
sliced = self.logprobsLists.slice_request(2, num_positions=4)
assert len(sliced.logprob_token_ids) == 4
assert sliced.logprob_token_ids == [[11, 12], [13, 14], [15, 16], [17, 18]]
assert sliced.cu_num_generated_tokens is None
def test_slice_all_requests(self):
"""Test slicing all requests (full slice)"""
sliced = self.logprobsLists.slice_request(0, num_positions=9)
assert len(sliced.logprob_token_ids) == 9 # All tokens
assert sliced.logprob_token_ids == self.logprobsLists.logprob_token_ids
assert sliced.cu_num_generated_tokens is None
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/test_outputs.py",
"license": "Apache License 2.0",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/v1/e2e/test_lora_with_spec_decode.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
This script contains:
1. test lora with speculative decoding for batch inference
"""
import random
import numpy as np
import pytest
import torch
from vllm import LLM, SamplingParams
from vllm.distributed import cleanup_dist_env_and_memory
from vllm.lora.request import LoRARequest
from vllm.platforms import current_platform
LORA_TEST_PROMPT_MAP: dict[str, str] = {}
LORA_TEST_PROMPT_MAP["premjatin/qwen-linear-algebra-coder"] = """
### INSTRUCTION:
You are an AI assistant that generates Python code to solve linear
algebra problems.
### PROBLEM:
Find the eigenvalues and eigenvectors of the following 3x3 matrix:
[[3, 2, 0],
[2, 3, 0],
[0, 0, 2]]
### OUTPUT FORMAT (STRICT):
Numbers should be represented as integers only.
### PYTHON SOLUTION:
"""
SEED = 42
@pytest.mark.skipif(not current_platform.is_cuda(), reason="CUDA not available")
@pytest.mark.parametrize(
"model_setup",
[
(
"eagle3",
"Qwen/Qwen3-1.7B",
"AngelSlim/Qwen3-1.7B_eagle3",
"premjatin/qwen-linear-algebra-coder",
1,
)
],
)
def test_batch_inference_correctness(
monkeypatch: pytest.MonkeyPatch,
model_setup: tuple[str, str, str, str, int],
):
"""
Compare the outputs of a LLM with only Lora and a LLM with both SD and Lora.
Should be the same and no failure when doing batch inference.
model_setup: (method, model_name, spec_model_name, lora_path, tp_size)
"""
with monkeypatch.context() as m:
# Disable randomness
m.setenv("CUBLAS_WORKSPACE_CONFIG", ":4096:8")
torch.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)
torch.cuda.manual_seed_all(SEED)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
method, model_name, spec_model_name, lora_path, tp_size = model_setup
# without speculative decoding
ref_llm = LLM(
model=model_name,
trust_remote_code=True,
tensor_parallel_size=tp_size,
max_model_len=2048,
max_num_seqs=4,
enable_lora=True,
max_loras=1,
max_cpu_loras=1,
max_lora_rank=16,
)
prompts = [LORA_TEST_PROMPT_MAP[lora_path]] * 100
lora_request = LoRARequest("adapter", 1, lora_path)
sampling_params = SamplingParams(
temperature=0.0, top_p=1.0, top_k=-1, seed=SEED, max_tokens=128
)
ref_outputs = ref_llm.generate(
prompts, sampling_params, lora_request=lora_request
)
del ref_llm
torch.cuda.empty_cache()
cleanup_dist_env_and_memory()
lora_spec_llm = LLM(
model=model_name,
trust_remote_code=True,
tensor_parallel_size=tp_size,
speculative_config={
"method": method,
"model": spec_model_name,
"num_speculative_tokens": 3,
"max_model_len": 2048,
},
max_model_len=2048,
max_num_seqs=4,
enable_lora=True,
max_loras=1,
max_cpu_loras=1,
max_lora_rank=16,
)
lora_spec_outputs = lora_spec_llm.generate(
prompts, sampling_params, lora_request=lora_request
)
matches = 0
misses = 0
for ref_output, spec_output in zip(ref_outputs, lora_spec_outputs):
if ref_output.outputs[0].text == spec_output.outputs[0].text:
matches += 1
else:
misses += 1
print(f"ref_output: {ref_output.outputs[0].text}")
print(f"spec_output: {spec_output.outputs[0].text}")
# Heuristic: expect at least 90% of the prompts to match exactly
# Upon failure, inspect the outputs to check for inaccuracy.
print(f"match ratio: {matches}/{len(ref_outputs)}")
assert matches > int(0.90 * len(ref_outputs))
del lora_spec_llm
torch.cuda.empty_cache()
cleanup_dist_env_and_memory()
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/e2e/test_lora_with_spec_decode.py",
"license": "Apache License 2.0",
"lines": 120,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/reasoning/test_gptoss_reasoning_parser.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
from transformers import AutoTokenizer
from vllm.reasoning import ReasoningParser
from vllm.reasoning.gptoss_reasoning_parser import GptOssReasoningParser
REASONING_MODEL_NAME = "openai/gpt-oss-120b"
@pytest.fixture(scope="module")
def gpt_oss_tokenizer():
return AutoTokenizer.from_pretrained(REASONING_MODEL_NAME)
USER_MESSAGE_START = "<|start|>user<|message|>"
REASONING_SECTION_START = "<|end|><|start|>assistant<|channel|>analysis<|message|>"
END = "<|end|>"
ASSISTANT_START = "<|start|>assistant"
ASSISTANT_CONTENT_START_PREFIX = END + ASSISTANT_START + "<|channel|>final"
ASSISTANT_CONTENT_START_SUFFIX = "<|message|>"
ASSISTANT_CONTENT_START = (
ASSISTANT_CONTENT_START_PREFIX + ASSISTANT_CONTENT_START_SUFFIX
)
BASIC_CONTENT = {
"output": REASONING_SECTION_START
+ "This is reasoning"
+ ASSISTANT_CONTENT_START
+ "This is the rest",
"is_reasoning_end": True,
}
BASIC_REASONING_ONLY = {
"output": REASONING_SECTION_START + "This is reasoning" + "<|end|>",
"is_reasoning_end": False,
}
BASIC_NO_REASONING_NO_ASSISTANT = {
"output": USER_MESSAGE_START + "This is a user message",
"is_reasoning_end": False,
}
# Edge-case where the model omits the assistant tag entirely.
BASIC_NO_REASONING_ASSISTANT = {
"output": USER_MESSAGE_START + "This is a user message<|end|><|channel|>final",
"is_reasoning_end": True,
}
COMPLEX_CONTENT_INCOMPLETE_PREFIX_ONLY = {
"output": REASONING_SECTION_START
+ "This is reasoning"
+ ASSISTANT_CONTENT_START_PREFIX,
"is_reasoning_end": False,
}
COMPLEX_CONTENT_SUFFIX_ONLY = {
"output": REASONING_SECTION_START
+ "This is reasoning"
+ ASSISTANT_CONTENT_START_SUFFIX,
"is_reasoning_end": False,
}
COMPLEX_CONTENT_1_NO_SUFFIX = {
"output": REASONING_SECTION_START
+ "This is reasoning"
+ ASSISTANT_CONTENT_START_PREFIX
+ "<|constrain|> JSON ",
"is_reasoning_end": False,
}
COMPLEX_CONTENT_1 = {
"output": REASONING_SECTION_START
+ "This is reasoning"
+ ASSISTANT_CONTENT_START_PREFIX
+ "<|constrain|> JSON "
+ ASSISTANT_CONTENT_START_SUFFIX,
"is_reasoning_end": True,
}
COMPLEX_CONTENT_1_WITH_CONTENT = {
"output": REASONING_SECTION_START
+ "This is reasoning"
+ ASSISTANT_CONTENT_START_PREFIX
+ "<|constrain|> JSON "
+ ASSISTANT_CONTENT_START_SUFFIX
+ "This is the rest",
"is_reasoning_end": True,
}
COMPLEX_CONTENT_2 = {
"output": REASONING_SECTION_START
+ "This is reasoning"
+ ASSISTANT_CONTENT_START_PREFIX
+ "<|constrain|>ReplyAction "
+ ASSISTANT_CONTENT_START_SUFFIX
+ "This is the rest",
"is_reasoning_end": True,
}
MULTI_TURN_CONTENT = {
"output": USER_MESSAGE_START
+ "1st turn user message"
+ REASONING_SECTION_START
+ "1st turn reasoning"
+ ASSISTANT_CONTENT_START
+ "1st turn response"
+ END
+ USER_MESSAGE_START
+ "2nd turn user message"
+ END
+ ASSISTANT_START,
"is_reasoning_end": False,
}
TEST_CASES = [
BASIC_CONTENT,
BASIC_REASONING_ONLY,
COMPLEX_CONTENT_INCOMPLETE_PREFIX_ONLY,
COMPLEX_CONTENT_SUFFIX_ONLY,
COMPLEX_CONTENT_1_NO_SUFFIX,
COMPLEX_CONTENT_1,
COMPLEX_CONTENT_1_WITH_CONTENT,
COMPLEX_CONTENT_2,
MULTI_TURN_CONTENT,
]
@pytest.mark.parametrize(
"output, is_reasoning_end",
[(t["output"], t["is_reasoning_end"]) for t in TEST_CASES],
)
def test_gptoss_is_reasoning_end(
output,
is_reasoning_end,
gpt_oss_tokenizer,
):
output = gpt_oss_tokenizer.tokenize(output)
parser: ReasoningParser = GptOssReasoningParser(gpt_oss_tokenizer)
# Test is_reasoning_end
output_ids = gpt_oss_tokenizer.convert_tokens_to_ids(output)
actual_is_reasoning_end = parser.is_reasoning_end(output_ids)
assert is_reasoning_end == actual_is_reasoning_end
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/reasoning/test_gptoss_reasoning_parser.py",
"license": "Apache License 2.0",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/test_logprobs.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from vllm.logprobs import (
FlatLogprobs,
Logprob,
LogprobsOnePosition,
append_logprobs_for_next_position,
create_prompt_logprobs,
create_sample_logprobs,
)
def test_create_logprobs_non_flat() -> None:
prompt_logprobs = create_prompt_logprobs(flat_logprobs=False)
assert isinstance(prompt_logprobs, list)
# Ensure first prompt position logprobs is None
assert len(prompt_logprobs) == 1
assert prompt_logprobs[0] is None
sample_logprobs = create_sample_logprobs(flat_logprobs=False)
assert isinstance(sample_logprobs, list)
assert len(sample_logprobs) == 0
def test_create_logprobs_flat() -> None:
prompt_logprobs = create_prompt_logprobs(flat_logprobs=True)
assert isinstance(prompt_logprobs, FlatLogprobs)
assert prompt_logprobs.start_indices == [0]
assert prompt_logprobs.end_indices == [0]
assert len(prompt_logprobs.token_ids) == 0
assert len(prompt_logprobs.logprobs) == 0
assert len(prompt_logprobs.ranks) == 0
assert len(prompt_logprobs.decoded_tokens) == 0
# Ensure first prompt position logprobs is empty
assert len(prompt_logprobs) == 1
assert prompt_logprobs[0] == dict()
sample_logprobs = create_sample_logprobs(flat_logprobs=True)
assert isinstance(sample_logprobs, FlatLogprobs)
assert len(sample_logprobs.start_indices) == 0
assert len(sample_logprobs.end_indices) == 0
assert len(sample_logprobs.token_ids) == 0
assert len(sample_logprobs.logprobs) == 0
assert len(sample_logprobs.ranks) == 0
assert len(sample_logprobs.decoded_tokens) == 0
assert len(sample_logprobs) == 0
def test_append_logprobs_for_next_position_none_flat() -> None:
logprobs = create_sample_logprobs(flat_logprobs=False)
append_logprobs_for_next_position(
logprobs,
token_ids=[1],
logprobs=[0.1],
decoded_tokens=["1"],
rank=10,
num_logprobs=-1,
)
append_logprobs_for_next_position(
logprobs,
token_ids=[2, 3],
logprobs=[0.2, 0.3],
decoded_tokens=["2", "3"],
rank=11,
num_logprobs=-1,
)
assert isinstance(logprobs, list)
assert logprobs == [
{1: Logprob(logprob=0.1, rank=10, decoded_token="1")},
{
2: Logprob(logprob=0.2, rank=11, decoded_token="2"),
3: Logprob(logprob=0.3, rank=1, decoded_token="3"),
},
]
def test_append_logprobs_for_next_position_flat() -> None:
logprobs = create_sample_logprobs(flat_logprobs=True)
append_logprobs_for_next_position(
logprobs,
token_ids=[1],
logprobs=[0.1],
decoded_tokens=["1"],
rank=10,
num_logprobs=-1,
)
append_logprobs_for_next_position(
logprobs,
token_ids=[2, 3],
logprobs=[0.2, 0.3],
decoded_tokens=["2", "3"],
rank=11,
num_logprobs=-1,
)
assert isinstance(logprobs, FlatLogprobs)
assert logprobs.start_indices == [0, 1]
assert logprobs.end_indices == [1, 3]
assert logprobs.token_ids == [1, 2, 3]
assert logprobs.logprobs == [0.1, 0.2, 0.3]
assert logprobs.ranks == [10, 11, 1]
assert logprobs.decoded_tokens == ["1", "2", "3"]
LOGPROBS_ONE_POSITION_0: LogprobsOnePosition = {
1: Logprob(logprob=0.1, rank=10, decoded_token="10")
}
LOGPROBS_ONE_POSITION_1: LogprobsOnePosition = {
2: Logprob(logprob=0.2, rank=20, decoded_token="20"),
3: Logprob(logprob=0.3, rank=30, decoded_token="30"),
}
LOGPROBS_ONE_POSITION_2: LogprobsOnePosition = {
4: Logprob(logprob=0.4, rank=40, decoded_token="40"),
5: Logprob(logprob=0.5, rank=50, decoded_token="50"),
6: Logprob(logprob=0.6, rank=60, decoded_token="60"),
}
def test_flat_logprobs_append() -> None:
logprobs = FlatLogprobs()
logprobs.append(LOGPROBS_ONE_POSITION_0)
logprobs.append(LOGPROBS_ONE_POSITION_1)
assert logprobs.start_indices == [0, 1]
assert logprobs.end_indices == [1, 3]
assert logprobs.token_ids == [1, 2, 3]
assert logprobs.logprobs == [0.1, 0.2, 0.3]
assert logprobs.ranks == [10, 20, 30]
assert logprobs.decoded_tokens == ["10", "20", "30"]
logprobs.append(LOGPROBS_ONE_POSITION_2)
assert logprobs.start_indices == [0, 1, 3]
assert logprobs.end_indices == [1, 3, 6]
assert logprobs.token_ids == [1, 2, 3, 4, 5, 6]
assert logprobs.logprobs == [0.1, 0.2, 0.3, 0.4, 0.5, 0.6]
assert logprobs.ranks == [10, 20, 30, 40, 50, 60]
assert logprobs.decoded_tokens == ["10", "20", "30", "40", "50", "60"]
def test_flat_logprobs_extend() -> None:
logprobs = FlatLogprobs()
# Extend with list[LogprobsOnePosition]
logprobs.extend([LOGPROBS_ONE_POSITION_2, LOGPROBS_ONE_POSITION_0])
assert logprobs.start_indices == [0, 3]
assert logprobs.end_indices == [3, 4]
assert logprobs.token_ids == [4, 5, 6, 1]
assert logprobs.logprobs == [0.4, 0.5, 0.6, 0.1]
assert logprobs.ranks == [40, 50, 60, 10]
assert logprobs.decoded_tokens == ["40", "50", "60", "10"]
other_logprobs = FlatLogprobs()
other_logprobs.extend([LOGPROBS_ONE_POSITION_1, LOGPROBS_ONE_POSITION_0])
# Extend with another FlatLogprobs
logprobs.extend(other_logprobs)
assert logprobs.start_indices == [0, 3, 4, 6]
assert logprobs.end_indices == [3, 4, 6, 7]
assert logprobs.token_ids == [4, 5, 6, 1, 2, 3, 1]
assert logprobs.logprobs == [0.4, 0.5, 0.6, 0.1, 0.2, 0.3, 0.1]
assert logprobs.ranks == [40, 50, 60, 10, 20, 30, 10]
assert logprobs.decoded_tokens == ["40", "50", "60", "10", "20", "30", "10"]
def test_flat_logprobs_access() -> None:
logprobs = FlatLogprobs()
logprobs.extend(
[LOGPROBS_ONE_POSITION_1, LOGPROBS_ONE_POSITION_2, LOGPROBS_ONE_POSITION_0]
)
assert logprobs.start_indices == [0, 2, 5]
assert logprobs.end_indices == [2, 5, 6]
assert logprobs.token_ids == [2, 3, 4, 5, 6, 1]
assert logprobs.logprobs == [0.2, 0.3, 0.4, 0.5, 0.6, 0.1]
assert logprobs.ranks == [20, 30, 40, 50, 60, 10]
assert logprobs.decoded_tokens == ["20", "30", "40", "50", "60", "10"]
# Test __len__
assert len(logprobs) == 3
# Test __iter__
for actual_logprobs, expected_logprobs in zip(
logprobs,
[LOGPROBS_ONE_POSITION_1, LOGPROBS_ONE_POSITION_2, LOGPROBS_ONE_POSITION_0],
):
assert actual_logprobs == expected_logprobs
# Test __getitem__ : single item
assert logprobs[0] == LOGPROBS_ONE_POSITION_1
assert logprobs[1] == LOGPROBS_ONE_POSITION_2
assert logprobs[2] == LOGPROBS_ONE_POSITION_0
# Test __getitem__ : slice
logprobs02 = logprobs[:2]
assert len(logprobs02) == 2
assert logprobs02[0] == LOGPROBS_ONE_POSITION_1
assert logprobs02[1] == LOGPROBS_ONE_POSITION_2
assert logprobs02.start_indices == [0, 2]
assert logprobs02.end_indices == [2, 5]
assert logprobs02.token_ids == [2, 3, 4, 5, 6]
assert logprobs02.logprobs == [0.2, 0.3, 0.4, 0.5, 0.6]
assert logprobs02.ranks == [20, 30, 40, 50, 60]
assert logprobs02.decoded_tokens == ["20", "30", "40", "50", "60"]
logprobs_last2 = logprobs[-2:]
assert len(logprobs_last2) == 2
assert logprobs_last2[0] == LOGPROBS_ONE_POSITION_2
assert logprobs_last2[1] == LOGPROBS_ONE_POSITION_0
assert logprobs_last2.start_indices == [0, 3]
assert logprobs_last2.end_indices == [3, 4]
assert logprobs_last2.token_ids == [4, 5, 6, 1]
assert logprobs_last2.logprobs == [0.4, 0.5, 0.6, 0.1]
assert logprobs_last2.ranks == [40, 50, 60, 10]
assert logprobs_last2.decoded_tokens == ["40", "50", "60", "10"]
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/test_logprobs.py",
"license": "Apache License 2.0",
"lines": 184,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:examples/online_serving/openai_responses_client_with_tools.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Set up this example by starting a vLLM OpenAI-compatible server with tool call
options enabled.
Reasoning models can be used through the Responses API as seen here
https://platform.openai.com/docs/api-reference/responses
For example:
vllm serve Qwen/Qwen3-1.7B --reasoning-parser qwen3 \
--structured-outputs-config.backend xgrammar \
--enable-auto-tool-choice --tool-call-parser hermes
"""
import json
from openai import OpenAI
from utils import get_first_model
def get_weather(latitude: float, longitude: float) -> str:
"""
Mock function to simulate getting weather data.
In a real application, this would call an external weather API.
"""
return f"Current temperature at ({latitude}, {longitude}) is 20°C."
tools = [
{
"type": "function",
"name": "get_weather",
"description": "Get current temperature for provided coordinates in celsius.",
"parameters": {
"type": "object",
"properties": {
"latitude": {"type": "number"},
"longitude": {"type": "number"},
},
"required": ["latitude", "longitude"],
"additionalProperties": False,
},
"strict": True,
}
]
input_messages = [
{"role": "user", "content": "What's the weather like in Paris today?"}
]
def main():
base_url = "http://0.0.0.0:8000/v1"
client = OpenAI(base_url=base_url, api_key="empty")
model = get_first_model(client)
response = client.responses.create(
model=model, input=input_messages, tools=tools, tool_choice="required"
)
for out in response.output:
if out.type == "function_call":
print("Function call:", out.name, out.arguments)
tool_call = out
args = json.loads(tool_call.arguments)
result = get_weather(args["latitude"], args["longitude"])
input_messages.append(tool_call) # append model's function call message
input_messages.append(
{ # append result message
"type": "function_call_output",
"call_id": tool_call.call_id,
"output": str(result),
}
)
response_2 = client.responses.create(
model=model,
input=input_messages,
tools=tools,
)
print(response_2.output_text)
if __name__ == "__main__":
main()
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/online_serving/openai_responses_client_with_tools.py",
"license": "Apache License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/v1/entrypoints/openai/serving_responses/test_function_call.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import json
import openai # use the official client for correctness check
import pytest
MODEL_NAME = "Qwen/Qwen3-1.7B"
tools = [
{
"type": "function",
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"city": {
"type": "string",
"description": "The city to find the weather for, e.g. 'Vienna'",
"default": "Vienna",
},
"country": {
"type": "string",
"description": "The country that the city is in, e.g. 'Austria'",
},
"unit": {
"type": "string",
"description": "The unit to fetch the temperature in",
"enum": ["celsius", "fahrenheit"],
},
"options": {
"$ref": "#/$defs/WeatherOptions",
"description": "Optional parameters for weather query",
},
},
"required": ["country", "unit"],
"$defs": {
"WeatherOptions": {
"title": "WeatherOptions",
"type": "object",
"additionalProperties": False,
"properties": {
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"default": "celsius",
"description": "Temperature unit",
"title": "Temperature Unit",
},
"include_forecast": {
"type": "boolean",
"default": False,
"description": "Whether to include a 24-hour forecast",
"title": "Include Forecast",
},
"language": {
"type": "string",
"default": "zh-CN",
"description": "Language of the response",
"title": "Language",
"enum": ["zh-CN", "en-US", "ja-JP"],
},
},
},
},
},
},
{
"type": "function",
"name": "get_forecast",
"description": "Get the weather forecast for a given location",
"parameters": {
"type": "object",
"properties": {
"city": {
"type": "string",
"description": "The city to get the forecast for, e.g. 'Vienna'",
"default": "Vienna",
},
"country": {
"type": "string",
"description": "The country that the city is in, e.g. 'Austria'",
},
"days": {
"type": "integer",
"description": "Number of days to get the forecast for (1-7)",
},
"unit": {
"type": "string",
"description": "The unit to fetch the temperature in",
"enum": ["celsius", "fahrenheit"],
},
},
"required": ["country", "days", "unit"],
},
},
]
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
@pytest.mark.parametrize("tool_choice", ["auto", "required"])
async def test_function_tool_use(
client: openai.AsyncOpenAI, model_name: str, tool_choice: str
):
prompt = [
{
"role": "user",
"content": "Can you tell me what the current weather is in Berlin and the "
"forecast for the next 5 days, in fahrenheit?",
},
]
response = await client.responses.create(
model=model_name,
input=prompt,
tools=tools,
tool_choice=tool_choice,
temperature=0.0,
)
assert len(response.output) >= 1
tool_call = None
reasoning = None
for out in response.output:
if out.type == "function_call":
tool_call = out
if out.type == "reasoning":
reasoning = out
assert tool_call is not None
assert tool_call.type == "function_call"
assert json.loads(tool_call.arguments) is not None
assert reasoning is not None
assert reasoning.type == "reasoning"
@pytest.mark.asyncio
async def test_named_tool_use(client: openai.AsyncOpenAI):
def get_weather(latitude: float, longitude: float) -> str:
"""
Mock function to simulate getting weather data.
In a real application, this would call an external weather API.
"""
return f"Current temperature at ({latitude}, {longitude}) is 20°C."
tools = [
{
"type": "function",
"name": "get_weather",
"description": (
"Get current temperature for provided coordinates in celsius."
),
"parameters": {
"type": "object",
"properties": {
"latitude": {"type": "number"},
"longitude": {"type": "number"},
},
"required": ["latitude", "longitude"],
"additionalProperties": False,
},
"strict": True,
}
]
input_messages = [
{"role": "user", "content": "What's the weather like in Paris today?"}
]
response = await client.responses.create(
model=MODEL_NAME,
input=input_messages,
tools=tools,
tool_choice={"type": "function", "name": "get_weather"},
)
assert len(response.output) >= 1
for out in response.output:
if out.type == "function_call":
tool_call = out
assert tool_call is not None
assert tool_call.type == "function_call"
assert tool_call.name == "get_weather"
args = json.loads(tool_call.arguments)
assert args["latitude"] is not None
assert args["longitude"] is not None
# call the tool
result = get_weather(args["latitude"], args["longitude"])
input_messages.append(tool_call) # append model's function call message
input_messages.append(
{ # append result message
"type": "function_call_output",
"call_id": tool_call.call_id,
"output": str(result),
}
)
# create a new response with the tool call result
response_2 = await client.responses.create(model=MODEL_NAME, input=input_messages)
# check the output
assert len(response_2.output_text) > 0
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/entrypoints/openai/serving_responses/test_function_call.py",
"license": "Apache License 2.0",
"lines": 188,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/v1/kv_connector/unit/test_lmcache_integration.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# NOTE: if your PR has broken one of the tests here (sorry),
# kindly patch the corresponding integration in
# /vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/vllm_v1_adapter.py
# or reach out to @aposataC for assistance
# Assumption vs. Correctness Tests:
# these unit tests do *not* test correctness of LMCache-side or vLLM-side logic
# it is to ensure that assumptions LMCache makes about vLLM's interface are stable
import pytest
from vllm.platforms import current_platform
def assumes(obj, attr, is_callable=False, is_instance_of=None):
import inspect
from dataclasses import is_dataclass
assumption_msg = (
f"LMCache connector currently assumes that {obj} has a(n) {attr} attribute"
)
if hasattr(obj, attr):
attr_value = getattr(obj, attr)
elif is_dataclass(obj) and attr in getattr(obj, "__dataclass_fields__", {}):
field = obj.__dataclass_fields__[attr]
field_type = field.type
origin = getattr(field_type, "__origin__", None)
if origin is not None:
field_type = origin
attr_value = field_type
else:
raise AssertionError(assumption_msg)
if is_callable:
assumption_msg += f" and that {obj}.{attr} is a callable"
assert callable(attr_value), assumption_msg
if is_instance_of:
assumption_msg += f" and that {obj}.{attr} is an instance of {is_instance_of}"
if isinstance(attr_value, property):
fget = attr_value.fget
assert fget is not None, f"Property {obj}.{attr} has no fget"
sig = inspect.signature(fget)
ret_anno = sig.return_annotation
assert ret_anno is not inspect._empty, (
f"Property {obj}.{attr} has no return annotation"
)
assert ret_anno == is_instance_of, assumption_msg
else:
if isinstance(attr_value, type):
assert attr_value is is_instance_of, assumption_msg
else:
assert isinstance(attr_value, is_instance_of), assumption_msg
@pytest.mark.skipif(
current_platform.is_rocm(), reason="Requires libcudart.so, not available on ROCm"
)
def test_multimodal_interface():
# protect against interface changes
from vllm.multimodal.inputs import PlaceholderRange
assumes(PlaceholderRange, "offset")
assumes(PlaceholderRange, "length")
@pytest.mark.skipif(
current_platform.is_rocm(), reason="Requires libcudart.so, not available on ROCm"
)
def test_config_interface():
# protect against interface changes
from vllm.config import VllmConfig
from vllm.config.cache import CacheConfig
from vllm.config.kv_transfer import KVTransferConfig
from vllm.config.model import ModelConfig
from vllm.config.parallel import ParallelConfig
assumes(VllmConfig, "model_config")
assumes(VllmConfig, "cache_config")
assumes(VllmConfig, "parallel_config")
assumes(VllmConfig, "kv_transfer_config")
assumes(KVTransferConfig, "kv_role")
assumes(KVTransferConfig, "kv_connector_extra_config")
assumes(ModelConfig, "use_mla", is_instance_of=bool)
assumes(ModelConfig, "dtype")
assumes(ModelConfig, "max_model_len")
assumes(ModelConfig, "get_vocab_size", is_callable=True)
assumes(ModelConfig, "get_num_attention_heads", is_callable=True)
assumes(ModelConfig, "get_num_kv_heads", is_callable=True)
assumes(ModelConfig, "get_head_size", is_callable=True)
assumes(ModelConfig, "get_num_layers", is_callable=True)
assumes(ModelConfig, "get_num_kv_heads", is_callable=True)
assumes(ModelConfig, "model")
assumes(ParallelConfig, "world_size")
assumes(ParallelConfig, "rank")
assumes(ParallelConfig, "tensor_parallel_size")
assumes(ParallelConfig, "pipeline_parallel_size")
assumes(ParallelConfig, "data_parallel_size_local")
assumes(ParallelConfig, "data_parallel_rank_local")
assumes(CacheConfig, "cache_dtype")
assumes(CacheConfig, "block_size")
assumes(CacheConfig, "gpu_memory_utilization")
# kv metadata minimal case
from vllm.utils.torch_utils import get_kv_cache_torch_dtype
model_config = ModelConfig(dtype="bfloat16")
parallel_config = ParallelConfig()
cache_config = CacheConfig(cache_dtype="bfloat16")
kv_dtype = get_kv_cache_torch_dtype(cache_config.cache_dtype, model_config.dtype)
use_mla = False
chunk_size = 256
num_layer = model_config.get_num_layers(parallel_config)
num_kv_head = model_config.get_num_kv_heads(parallel_config)
head_size = model_config.get_head_size()
kv_shape = (num_layer, 1 if use_mla else 2, chunk_size, num_kv_head, head_size)
# dummy lmcache metadata creation example
_ = (
model_config.model,
parallel_config.world_size,
parallel_config.rank,
"vllm",
kv_dtype,
kv_shape,
use_mla,
)
@pytest.mark.skipif(
current_platform.is_rocm(), reason="Requires libcudart.so, not available on ROCm"
)
def test_request_interface():
# protect against interface changes
from types import NoneType
from vllm.sampling_params import SamplingParams
from vllm.v1.request import Request
sampling_params = SamplingParams(max_tokens=10)
sampling_params.update_from_generation_config({}, eos_token_id=100)
req = Request(
request_id="test_request",
prompt_token_ids=[1, 2, 3],
sampling_params=sampling_params,
pooling_params=None,
lora_request=None,
)
assumes(req, "mm_features", is_instance_of=(list, NoneType))
assumes(req, "request_id")
assumes(req, "priority")
assumes(req, "prompt_token_ids")
assumes(req, "sampling_params")
assumes(req, "num_tokens")
assumes(req, "kv_transfer_params", is_instance_of=(dict, NoneType))
from vllm.multimodal.inputs import MultiModalFeatureSpec
assumes(MultiModalFeatureSpec, "identifier")
assumes(MultiModalFeatureSpec, "mm_position")
def test_new_request_interface():
# protect against interface changes
from vllm.v1.core.sched.output import NewRequestData
assumes(NewRequestData, "req_id")
assumes(NewRequestData, "block_ids")
assumes(NewRequestData, "prompt_token_ids")
assumes(NewRequestData, "sampling_params")
def test_sampling_params_interface():
# protect against interface changes
from vllm.sampling_params import SamplingParams
assumes(SamplingParams, "extra_args")
# dumb example use case in LMCache
kv_transfer_params = {
"lmcache.tag.user": "example_user_1",
"lmcache.ttl": 60,
}
sampling_params = SamplingParams(
extra_args={"kv_transfer_params": kv_transfer_params}
)
assert sampling_params.extra_args["kv_transfer_params"] == kv_transfer_params
def test_tp_interface():
# protect against interface changes
import inspect
from vllm.distributed.parallel_state import get_tp_group
sig = inspect.signature(get_tp_group)
GroupCoordinator = sig.return_annotation
assumes(GroupCoordinator, "broadcast", is_callable=True)
assumes(GroupCoordinator, "broadcast_object", is_callable=True)
def test_forward_context_interface():
# protect against interface changes
from vllm.forward_context import ForwardContext
assumes(ForwardContext, "no_compile_layers", is_instance_of=dict)
assumes(ForwardContext, "virtual_engine")
assumes(ForwardContext, "attn_metadata")
def test_scheduler_output_interface():
# protect against interface changes
from vllm.v1.core.sched.output import SchedulerOutput
assumes(SchedulerOutput, "finished_req_ids")
assumes(SchedulerOutput, "scheduled_new_reqs", is_instance_of=list)
assumes(SchedulerOutput, "num_scheduled_tokens", is_instance_of=dict)
assumes(SchedulerOutput, "scheduled_cached_reqs")
from vllm.v1.core.sched.output import CachedRequestData
assumes(CachedRequestData, "req_ids", is_instance_of=list)
assumes(CachedRequestData, "new_block_ids", is_instance_of=list)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/kv_connector/unit/test_lmcache_integration.py",
"license": "Apache License 2.0",
"lines": 183,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/distributed/test_eplb_spec_decode.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from __future__ import annotations
import lm_eval
import pytest
from tests.utils import large_gpu_mark
from vllm.platforms import current_platform
def get_model_args(
model_name: str,
spec_model_name: str | None,
spec_method: str,
tp_size: int,
model_max_len: int,
use_async: bool = False,
) -> dict:
speculative_config = {
"method": spec_method,
"model": spec_model_name,
"num_speculative_tokens": 1,
"max_model_len": model_max_len,
}
eplb_config = {
"num_redundant_experts": tp_size,
"window_size": 128,
"step_interval": 1024,
"log_balancedness": False,
}
if use_async:
eplb_config["use_async"] = True
model_args = {
"pretrained": model_name,
"dtype": "auto",
"add_bos_token": True,
"tensor_parallel_size": tp_size,
"gpu_memory_utilization": 0.7,
"speculative_config": speculative_config,
"enable_expert_parallel": True,
"eplb_config": eplb_config,
"enable_eplb": True,
"max_model_len": model_max_len,
}
return model_args
pytestmark = pytest.mark.skipif(
current_platform.is_rocm(),
reason="EPLB with Spec Decode is a work in progress on ROCm.",
)
@pytest.mark.parametrize(
"model_setup",
[
pytest.param(
("mtp", "Qwen/Qwen3-Next-80B-A3B-Instruct", None, 4, 0.86),
marks=large_gpu_mark(min_gb=80),
),
pytest.param(
(
"eagle",
"meta-llama/Llama-4-Scout-17B-16E-Instruct",
"morgendave/EAGLE-Llama-4-Scout-17B-16E-Instruct",
4,
0.92,
),
marks=pytest.mark.skip(reason="Skipping due to CI OOM issues"),
),
],
ids=["qwen3_next_mtp", "llama4_eagle"],
)
def test_eplb_spec_decode(
monkeypatch: pytest.MonkeyPatch,
model_setup: tuple[str, str, str, int, float],
):
"""
Test the correctness of EPLB speculative decoding with GSM8K dataset.
Applicable to MoE models with mtp or eagle spec decode.
"""
method, model_name, spec_model_name, tp_size, expected_gsm8k_value = model_setup
TASK = "gsm8k"
FILTER = "exact_match,strict-match"
RTOL = 0.03
model_args = get_model_args(
model_name=model_name,
spec_model_name=spec_model_name,
spec_method=method,
tp_size=tp_size,
model_max_len=4096,
)
results = lm_eval.simple_evaluate(
model="vllm",
model_args=model_args,
tasks=TASK,
batch_size=64,
num_fewshot=8,
)
measured_value = results["results"][TASK][FILTER]
assert (
measured_value - RTOL < expected_gsm8k_value
and measured_value + RTOL > expected_gsm8k_value
), f"Expected: {expected_gsm8k_value} | Measured: {measured_value}"
@large_gpu_mark(min_gb=80)
def test_eplb_spec_decode_qwen3_next_mtp_async() -> None:
"""
Ensure async EPLB works with MTP speculative decoding for Qwen3-Next.
"""
TASK = "gsm8k"
FILTER = "exact_match,strict-match"
RTOL = 0.03
expected_gsm8k_value = 0.86
model_args = get_model_args(
model_name="Qwen/Qwen3-Next-80B-A3B-Instruct",
spec_model_name=None,
spec_method="mtp",
tp_size=4,
model_max_len=4096,
use_async=True,
)
results = lm_eval.simple_evaluate(
model="vllm",
model_args=model_args,
tasks=TASK,
batch_size=64,
num_fewshot=8,
)
measured_value = results["results"][TASK][FILTER]
assert (
measured_value - RTOL < expected_gsm8k_value
and measured_value + RTOL > expected_gsm8k_value
), f"Expected: {expected_gsm8k_value} | Measured: {measured_value}"
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/distributed/test_eplb_spec_decode.py",
"license": "Apache License 2.0",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/utils/registry.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from typing import Any, TypeVar
_T = TypeVar("_T", bound=type)
class ExtensionManager:
"""
A registry for managing pluggable extension classes.
This class provides a simple mechanism to register and instantiate
extension classes by name. It is commonly used to implement plugin
systems where different implementations can be swapped at runtime.
Examples:
Basic usage with a registry instance:
>>> FOO_REGISTRY = ExtensionManager()
>>> @FOO_REGISTRY.register("my_foo_impl")
... class MyFooImpl(Foo):
... def __init__(self, value):
... self.value = value
>>> foo_impl = FOO_REGISTRY.load("my_foo_impl", value=123)
"""
def __init__(self) -> None:
"""
Initialize an empty extension registry.
"""
self.name2class: dict[str, type] = {}
def register(self, name: str):
"""
Decorator to register a class with the given name.
"""
def wrap(cls_to_register: _T) -> _T:
self.name2class[name] = cls_to_register
return cls_to_register
return wrap
def load(self, cls_name: str, *args, **kwargs) -> Any:
"""
Instantiate and return a registered extension class by name.
"""
cls = self.name2class.get(cls_name)
assert cls is not None, f"Extension class {cls_name} not found"
return cls(*args, **kwargs)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/utils/registry.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/openpangu.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
from collections.abc import Callable, Iterable
from typing import Any
import torch
from torch import nn
from transformers import PretrainedConfig
from vllm.compilation.decorators import support_torch_compile
from vllm.config import CacheConfig, ParallelConfig, VllmConfig
from vllm.distributed import (
get_ep_group,
get_pp_group,
get_tensor_model_parallel_rank,
get_tensor_model_parallel_world_size,
get_tp_group,
tensor_model_parallel_all_gather,
)
from vllm.model_executor.layers.activation import SiluAndMul
from vllm.model_executor.layers.attention import (
Attention,
StaticSinkAttention,
)
from vllm.model_executor.layers.fused_moe import SharedFusedMoE
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.linear import (
ColumnParallelLinear,
MergedColumnParallelLinear,
QKVParallelLinear,
ReplicatedLinear,
RowParallelLinear,
)
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.mla import MLAModules, MultiHeadLatentAttentionWrapper
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.rotary_embedding import get_rope
from vllm.model_executor.layers.vocab_parallel_embedding import (
ParallelLMHead,
VocabParallelEmbedding,
)
from vllm.model_executor.model_loader.weight_utils import (
default_weight_loader,
maybe_remap_kv_scale_name,
)
from vllm.model_executor.models.interfaces import (
MixtureOfExperts,
SupportsLoRA,
SupportsPP,
)
from vllm.model_executor.models.utils import (
AutoWeightsLoader,
PPMissingLayer,
extract_layer_index,
is_pp_missing_parameter,
make_empty_intermediate_tensors_factory,
make_layers,
maybe_prefix,
sequence_parallel_chunk,
)
from vllm.model_executor.utils import set_weight_attrs
from vllm.platforms import current_platform
from vllm.sequence import IntermediateTensors
from vllm.transformers_utils.config import set_default_rope_theta
from vllm.v1.attention.backend import AttentionType
from vllm.v1.attention.backends.flash_attn_diffkv import FlashAttentionDiffKVBackend
def check_ffn_act_fn(act_fn: str):
if act_fn != "silu":
raise ValueError(
f"Unsupported activation: {act_fn}. Only silu is supported for now."
)
class OpenPanguMLP(nn.Module):
def __init__(
self,
hidden_size: int,
intermediate_size: int,
hidden_act: str,
quant_config: QuantizationConfig | None = None,
bias: bool = False,
reduce_results: bool = True,
is_sequence_parallel=False,
prefix: str = "",
) -> None:
super().__init__()
self.gate_up_proj = MergedColumnParallelLinear(
hidden_size,
[intermediate_size] * 2,
bias=bias,
quant_config=quant_config,
disable_tp=is_sequence_parallel,
prefix=f"{prefix}.gate_up_proj",
)
self.down_proj = RowParallelLinear(
intermediate_size,
hidden_size,
bias=bias,
quant_config=quant_config,
reduce_results=reduce_results,
disable_tp=is_sequence_parallel,
prefix=f"{prefix}.down_proj",
)
check_ffn_act_fn(hidden_act)
self.act_fn = SiluAndMul()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.down_proj(self.act_fn(self.gate_up_proj(x)[0]))[0]
class OpenPanguMoE(nn.Module):
def __init__(
self,
config: PretrainedConfig,
parallel_config: ParallelConfig,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
):
super().__init__()
self.tp_size = get_tensor_model_parallel_world_size()
self.tp_rank = get_tp_group().rank_in_group
self.routed_scaling_factor = config.routed_scaling_factor
self.ep_group = get_ep_group().device_group
self.ep_rank = self.ep_group.rank()
self.ep_size = self.ep_group.size()
self.n_routed_experts: int = config.n_routed_experts
self.n_shared_experts: int = config.n_shared_experts
self.is_sequence_parallel = parallel_config.use_sequence_parallel_moe
check_ffn_act_fn(config.hidden_act)
self.gate = ReplicatedLinear(
config.hidden_size,
config.n_routed_experts,
bias=False,
quant_config=None,
prefix=f"{prefix}.gate",
)
if (
hasattr(config, "router_enable_expert_bias")
and config.router_enable_expert_bias
):
self.gate.e_score_correction_bias = nn.Parameter(
torch.empty(self.n_routed_experts, dtype=torch.float32)
)
else:
self.gate.e_score_correction_bias = None
# Load balancing settings.
eplb_config = parallel_config.eplb_config
self.enable_eplb = parallel_config.enable_eplb
self.n_redundant_experts = eplb_config.num_redundant_experts
self.n_logical_experts = self.n_routed_experts
self.n_physical_experts = self.n_logical_experts + self.n_redundant_experts
self.n_local_physical_experts = self.n_physical_experts // self.ep_size
self.physical_expert_start = self.ep_rank * self.n_local_physical_experts
self.physical_expert_end = (
self.physical_expert_start + self.n_local_physical_experts
)
if config.n_shared_experts is not None:
intermediate_size = config.moe_intermediate_size * config.n_shared_experts
self.shared_experts = OpenPanguMLP(
hidden_size=config.hidden_size,
intermediate_size=intermediate_size,
hidden_act=config.hidden_act,
quant_config=quant_config,
is_sequence_parallel=self.is_sequence_parallel,
reduce_results=False,
prefix=f"{prefix}.shared_experts",
)
else:
self.shared_experts = None
self.experts = SharedFusedMoE(
shared_experts=self.shared_experts,
num_experts=config.n_routed_experts,
top_k=config.num_experts_per_tok,
hidden_size=config.hidden_size,
intermediate_size=config.moe_intermediate_size,
reduce_results=False,
renormalize=config.norm_topk_prob,
quant_config=quant_config,
use_grouped_topk=True,
num_expert_group=1,
topk_group=1,
prefix=f"{prefix}.experts",
scoring_func="sigmoid",
# we do scaling outside, set factor to 1.0 to avoid double mul
routed_scaling_factor=1.0,
e_score_correction_bias=self.gate.e_score_correction_bias,
enable_eplb=self.enable_eplb,
num_redundant_experts=self.n_redundant_experts,
is_sequence_parallel=self.is_sequence_parallel,
)
def forward(
self,
hidden_states: torch.Tensor,
) -> torch.Tensor:
num_tokens, hidden_dim = hidden_states.shape
hidden_states = hidden_states.view(-1, hidden_dim)
if self.is_sequence_parallel:
hidden_states = sequence_parallel_chunk(hidden_states)
router_logits, _ = self.gate(hidden_states)
fused_moe_out = self.experts(
hidden_states=hidden_states, router_logits=router_logits
)
shared_output, final_hidden_states = fused_moe_out
if self.shared_experts is None:
assert shared_output is None
if hidden_states.dtype != torch.float16:
final_hidden_states *= self.routed_scaling_factor
elif self.shared_experts is not None:
assert shared_output is not None
shared_output *= 1.0 / self.routed_scaling_factor
if self.shared_experts is not None:
assert shared_output is not None
final_hidden_states += shared_output
if self.is_sequence_parallel:
final_hidden_states = tensor_model_parallel_all_gather(
final_hidden_states, 0
)
final_hidden_states = final_hidden_states[:num_tokens]
elif self.tp_size > 1:
final_hidden_states = self.experts.maybe_all_reduce_tensor_model_parallel(
final_hidden_states
)
return final_hidden_states.view(num_tokens, hidden_dim)
class OpenPanguMLAAttention(nn.Module):
def __init__(
self,
config: PretrainedConfig,
hidden_size: int,
num_heads: int,
qk_nope_head_dim: int,
qk_rope_head_dim: int,
v_head_dim: int,
q_lora_rank: int | None,
kv_lora_rank: int,
max_position_embeddings: int = 8192,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
self.hidden_size = hidden_size
self.num_heads = num_heads
self.qk_nope_head_dim = qk_nope_head_dim
self.qk_rope_head_dim = qk_rope_head_dim
self.qk_head_dim = qk_nope_head_dim + qk_rope_head_dim
self.v_head_dim = v_head_dim
self.q_lora_rank = q_lora_rank
self.kv_lora_rank = kv_lora_rank
self.tp_size = get_tensor_model_parallel_world_size()
if num_heads % self.tp_size != 0:
raise ValueError(
f"num_heads {num_heads} is not divisible by tp_size {self.tp_size}."
)
self.num_local_heads = num_heads // self.tp_size
self.scaling = self.qk_head_dim**-0.5
self.max_position_embeddings = max_position_embeddings
self.prefix = prefix
if self.q_lora_rank is not None:
self.fused_qkv_a_proj = MergedColumnParallelLinear(
self.hidden_size,
[self.q_lora_rank, self.kv_lora_rank + self.qk_rope_head_dim],
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.fused_qkv_a_proj",
disable_tp=True,
)
self.q_a_layernorm = RMSNorm(self.q_lora_rank, eps=config.rms_norm_eps)
self.q_b_proj = ColumnParallelLinear(
q_lora_rank,
self.num_heads * self.qk_head_dim,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.q_b_proj",
)
else:
self.q_proj = ColumnParallelLinear(
self.hidden_size,
self.num_heads * self.qk_head_dim,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.q_proj",
)
self.kv_a_proj_with_mqa = ReplicatedLinear(
self.hidden_size,
self.kv_lora_rank + self.qk_rope_head_dim,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.kv_a_proj_with_mqa",
)
self.kv_a_layernorm = RMSNorm(self.kv_lora_rank, eps=config.rms_norm_eps)
self.kv_b_proj = ColumnParallelLinear(
self.kv_lora_rank,
self.num_heads * (self.qk_nope_head_dim + self.v_head_dim),
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.kv_b_proj",
)
self.o_proj = RowParallelLinear(
self.num_heads * self.v_head_dim,
self.hidden_size,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.o_proj",
)
# TODO: remove hard coding
set_default_rope_theta(config, default_theta=10000)
rope_parameters = {
"rope_theta": config.rope_parameters["rope_theta"],
"beta_fast": 32,
"beta_slow": 1,
"factor": 1,
"mscale": 1.0,
"mscale_all_dim": 1.0,
"original_max_position_embeddings": max_position_embeddings,
"type": "yarn",
"rope_type": "deepseek_yarn",
}
self.rotary_emb = get_rope(
qk_rope_head_dim,
max_position=max_position_embeddings,
rope_parameters=rope_parameters,
is_neox_style=False,
)
mla_modules = MLAModules(
kv_a_layernorm=self.kv_a_layernorm,
kv_b_proj=self.kv_b_proj,
rotary_emb=self.rotary_emb,
o_proj=self.o_proj,
fused_qkv_a_proj=self.fused_qkv_a_proj
if self.q_lora_rank is not None
else None,
kv_a_proj_with_mqa=self.kv_a_proj_with_mqa
if self.q_lora_rank is None
else None,
q_a_layernorm=self.q_a_layernorm if self.q_lora_rank is not None else None,
q_b_proj=self.q_b_proj if self.q_lora_rank is not None else None,
q_proj=self.q_proj if self.q_lora_rank is None else None,
indexer=None,
is_sparse=False,
topk_indices_buffer=None,
)
self.mla_attn = MultiHeadLatentAttentionWrapper(
self.hidden_size,
self.num_local_heads,
self.scaling,
self.qk_nope_head_dim,
self.qk_rope_head_dim,
self.v_head_dim,
self.q_lora_rank,
self.kv_lora_rank,
mla_modules,
cache_config,
quant_config,
prefix,
)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
) -> torch.Tensor:
return self.mla_attn(positions, hidden_states)
class OpenPanguEmbeddedAttention(nn.Module):
def __init__(
self,
config: PretrainedConfig,
hidden_size: int,
num_heads: int,
num_kv_heads: int,
max_position_embeddings: int = 8192,
quant_config: QuantizationConfig | None = None,
bias: bool = False,
bias_o_proj: bool = False,
cache_config: CacheConfig | None = None,
prefix: str = "",
attn_type: str = AttentionType.DECODER,
) -> None:
super().__init__()
layer_idx = extract_layer_index(prefix)
self.hidden_size = hidden_size
tp_size = get_tensor_model_parallel_world_size()
self.total_num_heads = num_heads
if self.total_num_heads % tp_size != 0:
raise ValueError(
f"total_num_heads {self.total_num_heads} "
f"is not divisible by tp_size {tp_size}."
)
self.num_heads = self.total_num_heads // tp_size
self.total_num_kv_heads = num_kv_heads
if self.total_num_kv_heads > tp_size and self.total_num_kv_heads % tp_size != 0:
# Number of KV heads is greater than TP size, so we partition
# the KV heads across multiple tensor parallel ranks.
raise ValueError(
"Number of KV heads is greater than TP size, "
f"but total_num_kv_heads {self.total_num_kv_heads} "
f"is not divisible by tp_size {tp_size}."
)
elif (
self.total_num_kv_heads < tp_size and tp_size % self.total_num_kv_heads != 0
):
# Number of KV heads is less than TP size, so we replicate
# the KV heads across multiple tensor parallel ranks.
raise ValueError(
f"Number of KV heads is less than TP size, but tp_size {tp_size} "
f"is not divisible by total_num_kv_heads {self.total_num_kv_heads}."
)
self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
head_dim = getattr(config, "head_dim", None)
if head_dim is None:
head_dim = self.hidden_size // self.total_num_heads
self.head_dim = head_dim
self.q_size = self.num_heads * self.head_dim
self.kv_size = self.num_kv_heads * self.head_dim
self.scaling = self.head_dim**-0.5
self.max_position_embeddings = max_position_embeddings
self.qkv_proj = QKVParallelLinear(
hidden_size=hidden_size,
head_size=self.head_dim,
total_num_heads=self.total_num_heads,
total_num_kv_heads=self.total_num_kv_heads,
bias=bias,
quant_config=quant_config,
prefix=f"{prefix}.qkv_proj",
)
self.o_proj = RowParallelLinear(
input_size=self.total_num_heads * self.head_dim,
output_size=hidden_size,
bias=bias_o_proj,
quant_config=quant_config,
prefix=f"{prefix}.o_proj",
)
self._init_rotary_emb(config, quant_config=quant_config)
if hasattr(config, "interleaved_sliding_window"):
interleaved_sliding_window = config.interleaved_sliding_window
if isinstance(interleaved_sliding_window, int):
sliding_window = interleaved_sliding_window
elif isinstance(interleaved_sliding_window, list):
sw_idx = layer_idx % len(interleaved_sliding_window)
sliding_window = interleaved_sliding_window[sw_idx]
else:
raise ValueError(
f"{type(interleaved_sliding_window)} "
"for interleaved_sliding_window is not supported."
)
else:
sliding_window = None
self.attn = Attention(
self.num_heads,
self.head_dim,
self.scaling,
num_kv_heads=self.num_kv_heads,
cache_config=cache_config,
quant_config=quant_config,
per_layer_sliding_window=sliding_window,
attn_type=attn_type,
prefix=f"{prefix}.attn",
)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
) -> torch.Tensor:
qkv, _ = self.qkv_proj(hidden_states)
q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
q, k = self.rotary_emb(positions, q, k)
attn_output = self.attn(q, k, v)
output, _ = self.o_proj(attn_output)
return output
def _init_rotary_emb(
self,
config: PretrainedConfig,
quant_config: QuantizationConfig | None,
) -> None:
is_neox_style = True
is_gguf = quant_config and quant_config.get_name() == "gguf"
if is_gguf and config.model_type == "PanguEmbedded":
is_neox_style = False
rope_parameters = config.rope_parameters or {}
if rope_parameters is not None and rope_parameters.get(
"mrope_interleaved", False
):
rope_parameters["rope_type"] = "openpangu"
self.rotary_emb = get_rope(
self.head_dim,
max_position=self.max_position_embeddings,
rope_parameters=rope_parameters,
is_neox_style=is_neox_style,
)
class OpenPanguSinkAttention(nn.Module):
def __init__(
self,
config: PretrainedConfig,
hidden_size: int,
num_heads: int,
num_kv_heads: int,
rope_parameters: dict[str, Any] | None = None,
max_position_embeddings: int = 8192,
quant_config: QuantizationConfig | None = None,
bias: bool = False,
bias_o_proj: bool = False,
cache_config: CacheConfig | None = None,
prefix: str = "",
attn_type: str = AttentionType.DECODER,
) -> None:
super().__init__()
layer_idx = extract_layer_index(prefix)
self.hidden_size = hidden_size
self.tp_size = get_tensor_model_parallel_world_size()
self.tp_rank = get_tensor_model_parallel_rank()
self.total_num_heads = num_heads
if self.total_num_heads % self.tp_size != 0:
raise ValueError(
f"total_num_heads {self.total_num_heads} "
f"is not divisible by tp_size {self.tp_size}."
)
self.num_heads = self.total_num_heads // self.tp_size
self.total_num_kv_heads = num_kv_heads
if (
self.total_num_kv_heads > self.tp_size
and self.total_num_kv_heads % self.tp_size != 0
):
# Number of KV heads is greater than TP size, so we partition
# the KV heads across multiple tensor parallel ranks.
raise ValueError(
"Number of KV heads is greater than TP size, "
f"but total_num_kv_heads {self.total_num_kv_heads} "
f"is not divisible by tp_size {self.tp_size}."
)
elif self.total_num_kv_heads < self.tp_size:
# TODO: Number of KV heads is less than TP size, so we replicate
# the KV heads across multiple tensor parallel ranks.
raise ValueError(
f"Number of KV heads {self.total_num_kv_heads} is less than "
f"TP size {self.tp_size}, KV heads replication is not support yet."
)
self.num_kv_heads = max(1, self.total_num_kv_heads // self.tp_size)
self.qk_nope_dim = getattr(config, "qk_nope_dim", None)
self.qk_rope_dim = getattr(config, "qk_rope_dim", None)
self.v_channels = getattr(config, "v_channels", None)
self.head_dim = self.qk_rope_dim + self.qk_nope_dim
self.q_size = self.num_heads * self.head_dim
self.k_size = self.num_kv_heads * self.head_dim
self.v_size = self.num_kv_heads * self.v_channels
self.scaling = self.head_dim**-0.5
self.max_position_embeddings = max_position_embeddings
self.param_sink_number = getattr(config, "param_sink_number", 0)
self.param_sink_with_value = getattr(config, "param_sink_with_value", False)
self.param_sink_scalar = getattr(config, "param_sink_scalar", None)
self.param_sink_of_head_num = getattr(config, "param_sink_of_head_dim", False)
self.qkv_proj = MergedColumnParallelLinear(
input_size=hidden_size,
output_sizes=[
self.q_size * self.tp_size,
self.k_size * self.tp_size,
self.v_size * self.tp_size,
],
bias=bias,
quant_config=quant_config,
prefix=f"{prefix}.qkv_proj",
)
self.o_proj = RowParallelLinear(
input_size=self.total_num_heads * self.v_channels,
output_size=hidden_size,
bias=bias_o_proj,
quant_config=quant_config,
prefix=f"{prefix}.o_proj",
)
self.k_layernorm = RMSNorm(self.head_dim, eps=config.rms_norm_eps)
self._init_rotary_emb(
config, rope_parameters=rope_parameters, quant_config=quant_config
)
if hasattr(config, "interleaved_sliding_window"):
interleaved_sliding_window = config.interleaved_sliding_window
if isinstance(interleaved_sliding_window, int):
sliding_window = interleaved_sliding_window
elif isinstance(interleaved_sliding_window, list):
sw_idx = layer_idx % len(interleaved_sliding_window)
sliding_window = interleaved_sliding_window[sw_idx]
else:
raise ValueError(
f"{type(interleaved_sliding_window)} "
"for interleaved_sliding_window is not supported."
)
else:
sliding_window = None
FlashAttentionDiffKVBackend.set_head_size_v(self.v_channels)
self.attn = StaticSinkAttention(
self.num_heads,
self.head_dim,
self.scaling,
sink_len=self.param_sink_number,
num_kv_heads=self.num_kv_heads,
cache_config=cache_config,
quant_config=quant_config,
per_layer_sliding_window=sliding_window,
attn_type=attn_type,
prefix=f"{prefix}.attn",
attn_backend=FlashAttentionDiffKVBackend,
head_size_v=self.v_channels,
)
if self.param_sink_number > 0:
self.param_sink_key = torch.nn.Parameter(
torch.empty(
(
self.param_sink_number,
self.num_kv_heads,
self.head_dim,
),
device=current_platform.current_device(),
dtype=config.torch_dtype,
)
)
set_weight_attrs(
self.param_sink_key,
{
"output_dim": 1,
"weight_loader": self.weight_loader,
},
)
if self.param_sink_with_value:
self.param_sink_value = torch.nn.Parameter(
torch.empty(
(
self.param_sink_number,
self.num_kv_heads,
self.v_channels,
),
device=current_platform.current_device(),
dtype=config.torch_dtype,
)
)
set_weight_attrs(
self.param_sink_value,
{
"output_dim": 1,
"weight_loader": self.weight_loader,
},
)
else:
self.param_sink_value = torch.zeros(
(
self.param_sink_number,
self.num_kv_heads,
self.v_channels,
),
device=current_platform.current_device(),
dtype=config.torch_dtype,
)
# To enable dummy run with out weight
self.post_weight_load()
def weight_loader(self, param: nn.Parameter, loaded_weight: torch.Tensor):
output_dim = getattr(param, "output_dim", None)
is_sharded_weight = getattr(param, "is_sharded_weight", False)
use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit", False)
# bitsandbytes loads the weights of the specific portion
# no need to narrow
is_sharded_weight = is_sharded_weight or use_bitsandbytes_4bit
# Special case for GGUF
is_gguf_weight = getattr(param, "is_gguf_weight", False)
is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
if is_gguf_weight_type:
param.weight_type = loaded_weight.item()
# Materialize GGUF UninitializedParameter
if is_gguf_weight and isinstance(param, nn.UninitializedParameter):
final_shape = list(loaded_weight.shape)
if output_dim is not None:
assert final_shape[output_dim] % self.tp_size == 0
final_shape[output_dim] = final_shape[output_dim] // self.tp_size
param.materialize(final_shape, dtype=loaded_weight.dtype)
param_data = param.data
if output_dim is not None and not is_sharded_weight:
shard_size = param_data.shape[output_dim]
start_idx = self.tp_rank * shard_size
loaded_weight = loaded_weight.narrow(output_dim, start_idx, shard_size)
# Special case for loading scales off disk, which often do not
# have a shape (such as in the case of AutoFP8).
if len(loaded_weight.shape) == 0:
loaded_weight = loaded_weight.reshape(1)
assert param_data.shape == loaded_weight.shape
param_data.copy_(loaded_weight)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
) -> torch.Tensor:
qkv, _ = self.qkv_proj(hidden_states)
q, k, v = qkv.split([self.q_size, self.k_size, self.v_size], dim=-1)
k = self.k_layernorm(k.view(-1, self.num_kv_heads, self.head_dim))
q, k = self.rotary_emb(positions, q, k)
q = q.view(-1, self.q_size)
k = k.view(-1, self.k_size)
attn_output = self.attn(
q,
k,
v,
output_shape=torch.Size(
[q.shape[0], q.shape[1] // self.head_dim * self.v_channels]
),
)
output, _ = self.o_proj(attn_output)
return output
def _init_rotary_emb(
self,
config: PretrainedConfig,
rope_parameters: dict[str, Any] | None,
quant_config: QuantizationConfig | None,
) -> None:
is_neox_style = False
rope_parameters = {"partial_rotary_factor": self.qk_rope_dim / self.head_dim}
self.rotary_emb = get_rope(
self.head_dim,
max_position=self.max_position_embeddings,
rope_parameters=rope_parameters,
is_neox_style=is_neox_style,
)
def post_weight_load(self) -> None:
if hasattr(self, "k_layernorm") and self.k_layernorm is not None:
param_sink_key = self.k_layernorm(self.param_sink_key)
else:
param_sink_key = self.param_sink_key
self.attn.update_sink_kv(param_sink_key, self.param_sink_value)
class OpenPanguDecoderLayer(nn.Module):
def __init__(
self,
config: PretrainedConfig,
prefix: str,
vllm_config: VllmConfig,
) -> None:
super().__init__()
if config is None:
config = vllm_config.model_config.hf_config
cache_config = vllm_config.cache_config
quant_config = vllm_config.quant_config
parallel_config = vllm_config.parallel_config
self.hidden_size = config.hidden_size
max_position_embeddings = getattr(config, "max_position_embeddings", 8192)
layer_idx = int(prefix.split(sep=".")[-1])
self.layer_idx = layer_idx
self.use_mla = (
hasattr(config, "qk_nope_head_dim")
and hasattr(config, "qk_rope_head_dim")
and hasattr(config, "v_head_dim")
and hasattr(config, "kv_lora_rank")
)
self.use_sink_attention = (
hasattr(config, "param_sink_number") and config.param_sink_number > 0
)
if self.use_mla:
self.self_attn = OpenPanguMLAAttention(
config=config,
hidden_size=self.hidden_size,
num_heads=config.num_attention_heads,
qk_nope_head_dim=config.qk_nope_head_dim,
qk_rope_head_dim=config.qk_rope_head_dim,
v_head_dim=config.v_head_dim,
q_lora_rank=(
config.q_lora_rank if hasattr(config, "q_lora_rank") else None
),
kv_lora_rank=config.kv_lora_rank,
max_position_embeddings=max_position_embeddings,
cache_config=cache_config,
quant_config=quant_config,
prefix=f"{prefix}.self_attn",
)
elif self.use_sink_attention:
attention_bias = getattr(config, "attention_bias", False) or getattr(
config, "bias", False
)
bias_o_proj = attention_bias
if hasattr(config, "qkv_bias"):
attention_bias = config.qkv_bias
if getattr(config, "is_causal", True):
attn_type = AttentionType.DECODER
else:
raise ValueError(
f"is_causal={config.is_causal} is not support "
"for attention with sink"
)
rope_parameters = getattr(config, "rope_scaling", None)
if rope_parameters is None:
rope_parameters = {
"rope_type": "default",
"rope_theta": config.rope_theta,
}
self.self_attn = OpenPanguSinkAttention(
config=config,
hidden_size=self.hidden_size,
num_heads=config.num_attention_heads,
num_kv_heads=getattr(
config, "num_key_value_heads", config.num_attention_heads
),
rope_parameters=rope_parameters,
max_position_embeddings=max_position_embeddings,
quant_config=quant_config,
bias=attention_bias,
bias_o_proj=bias_o_proj,
cache_config=cache_config,
prefix=f"{prefix}.self_attn",
attn_type=attn_type,
)
else:
attention_bias = getattr(config, "attention_bias", False) or getattr(
config, "bias", False
)
bias_o_proj = attention_bias
if hasattr(config, "qkv_bias"):
attention_bias = config.qkv_bias
# By default, PanguEmbedded uses causal attention
# as it is a decoder-only model.
# You can override the HF config with `is_causal=False` to enable
# bidirectional attention, which is used in some embedding models
if getattr(config, "is_causal", True):
attn_type = AttentionType.DECODER
else:
attn_type = AttentionType.ENCODER_ONLY
self.self_attn = OpenPanguEmbeddedAttention(
config=config,
hidden_size=self.hidden_size,
num_heads=config.num_attention_heads,
num_kv_heads=getattr(
config, "num_key_value_heads", config.num_attention_heads
),
max_position_embeddings=max_position_embeddings,
quant_config=quant_config,
bias=attention_bias,
bias_o_proj=bias_o_proj,
cache_config=cache_config,
prefix=f"{prefix}.self_attn",
attn_type=attn_type,
)
if (
getattr(config, "n_routed_experts", None) is not None
and layer_idx >= config.first_k_dense_replace
):
self.mlp = OpenPanguMoE(
config=config,
parallel_config=parallel_config,
quant_config=quant_config,
prefix=f"{prefix}.mlp",
)
else:
self.mlp = OpenPanguMLP(
hidden_size=self.hidden_size,
intermediate_size=config.intermediate_size,
hidden_act=config.hidden_act,
quant_config=quant_config,
bias=getattr(config, "mlp_bias", False),
prefix=f"{prefix}.mlp",
)
self.routed_scaling_factor = getattr(config, "routed_scaling_factor", 1.0)
self.num_hidden_layers = config.num_hidden_layers
self.first_k_dense_replace = getattr(
config, "first_k_dense_replace", self.num_hidden_layers
)
self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = RMSNorm(
config.hidden_size, eps=config.rms_norm_eps
)
self.tp_group = get_tp_group().device_group
self.sandwich_norm = getattr(config, "sandwich_norm", False)
if self.sandwich_norm:
self.pre_mlp_layernorm = RMSNorm(
config.hidden_size, eps=config.rms_norm_eps
)
self.post_mlp_layernorm = RMSNorm(
config.hidden_size, eps=config.rms_norm_eps
)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
residual: torch.Tensor | None,
) -> torch.Tensor:
if residual is None:
residual = hidden_states.clone()
hidden_states = self.input_layernorm(hidden_states)
else:
hidden_states, residual = self.input_layernorm(hidden_states, residual)
hidden_states = self.self_attn(
positions=positions,
hidden_states=hidden_states,
)
if (
self.routed_scaling_factor is not None
and hidden_states.dtype == torch.float16
):
# Fix FP16 overflow
# We scale both hidden_states and residual before
# rmsnorm, and rmsnorm result would not affect by scale.
hidden_states *= 1.0 / self.routed_scaling_factor
if self.layer_idx == 0:
# The residual is shared by all layers, we only scale it on
# first layer.
residual *= 1.0 / self.routed_scaling_factor
if self.sandwich_norm:
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states, residual = self.pre_mlp_layernorm(hidden_states, residual)
else:
hidden_states, residual = self.post_attention_layernorm(
hidden_states, residual
)
# Fully Connected
hidden_states = self.mlp(hidden_states)
if (
self.routed_scaling_factor is not None
and isinstance(self.mlp, OpenPanguMLP)
and hidden_states.dtype == torch.float16
):
hidden_states *= 1.0 / self.routed_scaling_factor
if self.sandwich_norm:
hidden_states = self.post_mlp_layernorm(hidden_states)
return hidden_states, residual
@support_torch_compile
class OpenPanguModel(nn.Module):
fall_back_to_pt_during_load = False
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
eplb_config = vllm_config.parallel_config.eplb_config
self.config = config
self.num_redundant_experts = eplb_config.num_redundant_experts
self.vocab_size = config.vocab_size
if get_pp_group().is_first_rank or (
config.tie_word_embeddings and get_pp_group().is_last_rank
):
self.embed_tokens = VocabParallelEmbedding(
config.vocab_size,
config.hidden_size,
quant_config=quant_config,
prefix=f"{prefix}.embed_tokens",
)
else:
self.embed_tokens = PPMissingLayer()
self.start_layer, self.end_layer, self.layers = make_layers(
config.num_hidden_layers,
lambda prefix: OpenPanguDecoderLayer(config, prefix, vllm_config),
prefix=f"{prefix}.layers",
)
if get_pp_group().is_last_rank:
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
else:
self.norm = PPMissingLayer()
self.make_empty_intermediate_tensors = make_empty_intermediate_tensors_factory(
["hidden_states", "residual"], config.hidden_size
)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.embed_tokens(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor | IntermediateTensors:
if get_pp_group().is_first_rank:
if inputs_embeds is not None:
hidden_states = inputs_embeds
else:
hidden_states = self.embed_input_ids(input_ids)
residual = None
else:
assert intermediate_tensors is not None
hidden_states = intermediate_tensors["hidden_states"]
residual = intermediate_tensors["residual"]
for i in range(self.start_layer, self.end_layer):
layer = self.layers[i]
hidden_states, residual = layer(positions, hidden_states, residual)
if not get_pp_group().is_last_rank:
return IntermediateTensors(
{"hidden_states": hidden_states, "residual": residual}
)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states
def load_attn_mlp_weight(
self,
attn_mlp_replace_mapping: list[tuple[str, str, int]],
params_dict: dict[str, Any],
weight_name: str,
loaded_weight: torch.Tensor,
loaded_params: set[str],
) -> bool:
for param_name, origin_name, shard_id in attn_mlp_replace_mapping:
if origin_name not in weight_name or (
("mlp.experts." in weight_name) and weight_name not in params_dict
):
continue
weight_name_mapped = weight_name.replace(origin_name, param_name)
if (
param_name == "fused_qkv_a_proj"
and weight_name_mapped not in params_dict
):
continue
else:
weight_name = weight_name_mapped
if weight_name.endswith(".bias") and weight_name not in params_dict:
continue
if is_pp_missing_parameter(weight_name, self):
continue
param = params_dict[weight_name]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
loaded_params.add(weight_name)
return True
return False
def load_expert_weight(
self,
expert_merge_mapping: list[tuple[str, str, int, str]],
params_dict: dict[str, Any],
weight_name: str,
loaded_weight: torch.Tensor,
loaded_params: set[str],
flag_dict: dict[str, bool],
) -> bool:
for mapping in expert_merge_mapping:
param_name, origin_name, expert_id, shard_id = mapping
if origin_name not in weight_name:
continue
flag_dict["is_expert_weight"] = True
weight_name_mapped = weight_name.replace(origin_name, param_name)
if is_pp_missing_parameter(weight_name_mapped, self):
continue
param = params_dict[weight_name_mapped]
weight_loader = typing.cast(Callable[..., bool], param.weight_loader)
success = weight_loader(
param,
loaded_weight,
weight_name_mapped,
shard_id=shard_id,
expert_id=expert_id,
return_success=True,
)
if success:
weight_name = weight_name_mapped
loaded_params.add(weight_name_mapped)
return True
return False
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
attn_mlp_replace_mapping = [
(".qkv_proj", ".q_proj", "q"),
(".qkv_proj", ".k_proj", "k"),
(".qkv_proj", ".v_proj", "v"),
(".fused_qkv_a_proj", ".q_a_proj", 0),
(".fused_qkv_a_proj", ".kv_a_proj_with_mqa", 1),
(".gate_up_proj", ".gate_proj", 0),
(".gate_up_proj", ".up_proj", 1),
]
has_experts = hasattr(self.config, "n_routed_experts")
if has_experts:
expert_merge_mapping = SharedFusedMoE.make_expert_params_mapping(
self,
ckpt_gate_proj_name="gate_proj",
ckpt_down_proj_name="down_proj",
ckpt_up_proj_name="up_proj",
num_experts=self.config.n_routed_experts,
num_redundant_experts=self.num_redundant_experts,
)
params_dict = dict(self.named_parameters())
loaded_params: set[str] = set()
for name, loaded_weight in weights:
if "rotary_emb.inv_freq" in name:
continue
if self.config.tie_word_embeddings and "lm_head.weight" in name:
continue
if (
"layers" in name
and hasattr(self.config, "num_nextn_predict_layers")
and (self.config.num_nextn_predict_layers > 0)
):
layer_idx = int(name.split("layers.")[-1].split(".")[0])
mtp_idx = layer_idx - self.config.num_hidden_layers
if mtp_idx >= 0 and mtp_idx < self.config.num_nextn_predict_layers:
continue # skip spec decode layers for main model
flag_dict = {"is_expert_weight": False}
if (
self.load_attn_mlp_weight(
attn_mlp_replace_mapping,
params_dict,
name,
loaded_weight,
loaded_params,
)
or has_experts
and self.load_expert_weight(
expert_merge_mapping,
params_dict,
name,
loaded_weight,
loaded_params,
flag_dict,
)
):
continue
else:
if flag_dict["is_expert_weight"]:
continue
if name.endswith(".bias") and name not in params_dict:
continue
name = maybe_remap_kv_scale_name(name, params_dict)
if name.endswith("e_score_correction_bias"):
name = name.replace(
"e_score_correction_bias", "gate.e_score_correction_bias"
)
if name is None:
continue
if is_pp_missing_parameter(name, self):
continue
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
loaded_params.add(name)
self.post_weight_load()
return loaded_params
def post_weight_load(self) -> None:
for name, module in self.named_modules():
if module is self:
continue
if hasattr(module, "post_weight_load"):
module.post_weight_load()
class OpenPanguModelBase(nn.Module, SupportsPP, SupportsLoRA):
packed_modules_mapping = {
"qkv_proj": ["q_proj", "k_proj", "v_proj"],
"gate_up_proj": ["gate_proj", "up_proj"],
}
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
self.config = config
self.quant_config = quant_config
self.fuse_qkv_a_proj = (
hasattr(config, "q_lora_rank") and config.q_lora_rank is not None
)
if self.fuse_qkv_a_proj:
self.packed_modules_mapping["fused_qkv_a_proj"] = [
"q_a_proj",
"kv_a_proj_with_mqa",
]
self.model = OpenPanguModel(
vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")
)
if get_pp_group().is_last_rank:
self.lm_head = ParallelLMHead(
config.vocab_size,
config.hidden_size,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "lm_head"),
)
if config.tie_word_embeddings:
self.lm_head.weight = self.model.embed_tokens.weight
else:
self.lm_head = PPMissingLayer()
self.logits_processor = LogitsProcessor(config.vocab_size)
self.make_empty_intermediate_tensors = (
self.model.make_empty_intermediate_tensors
)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.model.embed_input_ids(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor | IntermediateTensors:
hidden_states = self.model(
input_ids, positions, intermediate_tensors, inputs_embeds
)
return hidden_states
def compute_logits(
self,
hidden_states: torch.Tensor,
) -> torch.Tensor | None:
logits = self.logits_processor(self.lm_head, hidden_states)
return logits
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
loader = AutoWeightsLoader(
self,
skip_prefixes=(["lm_head."] if self.config.tie_word_embeddings else None),
)
return loader.load_weights(weights)
class OpenPanguMoEModel(OpenPanguModelBase, MixtureOfExperts):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__(vllm_config=vllm_config, prefix=prefix)
config = vllm_config.model_config.hf_config
# Set MoE hyperparameters
self.expert_weights = []
self.num_moe_layers = config.num_hidden_layers - config.first_k_dense_replace
self.num_expert_groups = 1
self.moe_layers = []
example_moe = None
for layer in self.model.layers:
if isinstance(layer, PPMissingLayer):
continue
assert isinstance(layer, OpenPanguDecoderLayer)
if isinstance(layer.mlp, OpenPanguMoE):
# Pick last one layer since the first ones may be dense layers.
example_moe = layer.mlp
self.moe_layers.append(layer.mlp.experts)
if example_moe is None:
raise RuntimeError("No MOE layer found in model.layers.")
self.num_logical_experts = example_moe.n_logical_experts
self.num_physical_experts = example_moe.n_physical_experts
self.num_local_physical_experts = example_moe.n_local_physical_experts
self.n_routed_experts = example_moe.n_routed_experts
self.n_shared_experts = example_moe.n_shared_experts
self.num_redundant_experts = example_moe.n_redundant_experts
def update_physical_experts_metadata(
self,
num_physical_experts: int,
num_local_physical_experts: int,
) -> None:
assert self.num_local_physical_experts == num_local_physical_experts
self.num_physical_experts = num_physical_experts
self.num_local_physical_experts = num_local_physical_experts
self.num_redundant_experts = num_physical_experts - self.num_logical_experts
for layer in self.model.layers:
if isinstance(layer.mlp, OpenPanguMoE):
moe = layer.mlp
moe.n_local_physical_experts = num_local_physical_experts
moe.n_physical_experts = num_physical_experts
moe.n_redundant_experts = self.num_redundant_experts
moe.experts.update_expert_map()
class OpenPanguEmbeddedModel(OpenPanguModelBase):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__(vllm_config=vllm_config, prefix=prefix)
class PanguEmbeddedForCausalLM(OpenPanguEmbeddedModel):
pass
class PanguUltraMoEForCausalLM(OpenPanguMoEModel):
pass
class PanguProMoEV2ForCausalLM(OpenPanguMoEModel):
pass
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/openpangu.py",
"license": "Apache License 2.0",
"lines": 1245,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/openpangu_mtp.py | #
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
# This file is a part of the vllm-ascend project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from
# https://github.com/vllm-project/vllm/blob/v0.7.3/vllm/model_executor/models/deepseek_mtp.py
from collections.abc import Iterable
import torch
import torch.nn as nn
from vllm.compilation.decorators import support_torch_compile
from vllm.config import VllmConfig
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from vllm.model_executor.layers.fused_moe import FusedMoE
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.vocab_parallel_embedding import (
VocabParallelEmbedding,
)
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
from vllm.model_executor.models.deepseek_mtp import (
DeepSeekMultiTokenPredictor,
DeepSeekMultiTokenPredictorLayer,
SharedHead,
)
from vllm.model_executor.models.utils import maybe_prefix
from vllm.sequence import IntermediateTensors
from .openpangu import OpenPanguDecoderLayer
class OpenPanguMultiTokenPredictorLayer(DeepSeekMultiTokenPredictorLayer):
def __init__(self, vllm_config: VllmConfig, prefix: str) -> None:
nn.Module.__init__(self)
config = vllm_config.speculative_config.draft_model_config.hf_config
self.config = config
quant_config = vllm_config.quant_config
self.enorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.hnorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.eh_proj = nn.Linear(config.hidden_size * 2, config.hidden_size, bias=False)
self.shared_head = SharedHead(
config=config,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "shared_head"),
)
self.mtp_block = OpenPanguDecoderLayer(config, prefix, vllm_config)
class OpenPanguMultiTokenPredictor(DeepSeekMultiTokenPredictor):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
nn.Module.__init__(self)
config = vllm_config.model_config.hf_config
self.mtp_start_layer_idx = config.num_hidden_layers
self.num_mtp_layers = config.num_nextn_predict_layers
# to map the exact layer index from weights
self.layers = torch.nn.ModuleDict(
{
str(idx): OpenPanguMultiTokenPredictorLayer(
vllm_config, f"{prefix}.layers.{idx}"
)
for idx in range(
self.mtp_start_layer_idx,
self.mtp_start_layer_idx + self.num_mtp_layers,
)
}
)
self.embed_tokens = VocabParallelEmbedding(
config.vocab_size,
config.hidden_size,
)
self.logits_processor = LogitsProcessor(config.vocab_size)
@support_torch_compile
class OpenPanguMTP(nn.Module):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
self.config = vllm_config.model_config.hf_config
self.model = OpenPanguMultiTokenPredictor(
vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")
)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.model.embed_input_ids(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
hidden_states: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
spec_step_idx: int = 0,
) -> torch.Tensor:
hidden_states = self.model(
input_ids,
positions,
hidden_states,
inputs_embeds,
spec_step_idx,
)
return hidden_states
def compute_logits(
self,
hidden_states: torch.Tensor,
spec_step_idx: int = 0,
) -> torch.Tensor | None:
return self.model.compute_logits(hidden_states, spec_step_idx)
def get_spec_layer(self, name):
if (
"layers" in name
and hasattr(self.config, "num_nextn_predict_layers")
and self.config.num_nextn_predict_layers > 0
):
layer_idx = int(name.split("layers.")[-1].split(".")[0])
mtp_idx = layer_idx - self.config.num_hidden_layers
if mtp_idx >= 0 and mtp_idx < self.config.num_nextn_predict_layers:
return layer_idx
return None
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
stacked_params_mapping = [
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
("fused_qkv_a_proj", "q_a_proj", 0),
("fused_qkv_a_proj", "kv_a_proj_with_mqa", 1),
]
expert_params_mapping = FusedMoE.make_expert_params_mapping(
self,
ckpt_gate_proj_name="gate_proj",
ckpt_down_proj_name="down_proj",
ckpt_up_proj_name="up_proj",
num_experts=self.config.n_routed_experts,
)
params_dict = dict(self.named_parameters())
loaded_params: set[str] = set()
for name, loaded_weight in weights:
if "rotary_emb.inv_freq" in name:
continue
spec_layer = self.get_spec_layer(name)
if spec_layer is None:
continue
name = self._rewrite_spec_layer_name(spec_layer, name)
for param_name, weight_name, shard_id in stacked_params_mapping:
# Skip non-stacked layers and experts (experts handled below).
if weight_name not in name:
continue
# We have mlp.experts[0].gate_proj in the checkpoint.
# Since we handle the experts below in expert_params_mapping,
# we need to skip here BEFORE we update the name, otherwise
# name will be updated to mlp.experts[0].gate_up_proj, which
# will then be updated below in expert_params_mapping
# for mlp.experts[0].gate_gate_up_proj, which breaks load.
if ("mlp.experts." in name) and name not in params_dict:
continue
name_mapped = name.replace(weight_name, param_name)
# QKV fusion is optional, fall back to normal
# weight loading if it's not enabled
if (
param_name == "fused_qkv_a_proj"
) and name_mapped not in params_dict:
continue
else:
name = name_mapped
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
break
else:
for mapping in expert_params_mapping:
param_name, weight_name, expert_id, shard_id = mapping
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(
param,
loaded_weight,
name,
shard_id=shard_id,
expert_id=expert_id,
)
break
else:
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
if (
spec_layer != self.model.mtp_start_layer_idx
and ".layers" not in name
):
continue
param = params_dict[name]
weight_loader = getattr(
param, "weight_loader", default_weight_loader
)
weight_loader(param, loaded_weight)
loaded_params.add(name)
return loaded_params
def _rewrite_spec_layer_name(self, spec_layer: int, name: str) -> str:
"""
Rewrite the weight name to match the format of the original model.
Add .mtp_block for modules in transformer layer block for spec layer
and rename shared layer weights to be top level.
"""
spec_layer_weight_names = [
"embed_tokens",
"enorm",
"hnorm",
"eh_proj",
"shared_head",
]
shared_weight_names = ["embed_tokens"]
spec_layer_weight = False
shared_weight = False
for weight_name in spec_layer_weight_names:
if weight_name in name:
spec_layer_weight = True
if weight_name in shared_weight_names:
shared_weight = True
break
if not spec_layer_weight:
# treat rest weights as weights for transformer layer block
name = name.replace(
f"model.layers.{spec_layer}.", f"model.layers.{spec_layer}.mtp_block."
)
elif shared_weight:
# treat shared weights as top level weights
name = name.replace(f"model.layers.{spec_layer}.", "model.")
return name
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/openpangu_mtp.py",
"license": "Apache License 2.0",
"lines": 237,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/v1/kv_connector/unit/test_backwards_compatibility.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Unit tests for backwards compatibility with external KV connector implementations.
This test ensures that external connectors (loaded via kv_connector_module_path)
implemented with the old signature continue to work:
- Old signature: __init__(self, vllm_config, role)
- New signature: __init__(self, vllm_config, role, kv_cache_config)
"""
from typing import TYPE_CHECKING
from unittest.mock import patch
import pytest
from vllm.distributed.kv_transfer.kv_connector.factory import KVConnectorFactory
from vllm.distributed.kv_transfer.kv_connector.v1 import (
KVConnectorBase_V1,
KVConnectorRole,
)
from vllm.v1.attention.backend import AttentionMetadata
from vllm.v1.core.sched.output import SchedulerOutput
from .utils import create_scheduler, create_vllm_config
if TYPE_CHECKING:
from vllm.config import VllmConfig
from vllm.forward_context import ForwardContext
from vllm.v1.core.kv_cache_manager import KVCacheBlocks
from vllm.v1.kv_cache_interface import KVCacheConfig
from vllm.v1.request import Request
class OldStyleTestConnector(KVConnectorBase_V1):
"""
Test connector using the old signature with 2 required arguments.
This simulates external connectors that haven't been updated yet.
"""
def __init__(self, vllm_config: "VllmConfig", role: KVConnectorRole):
# Old-style call to super().__init__ with only 2 arguments
super().__init__(vllm_config=vllm_config, role=role)
def get_num_new_matched_tokens(
self, request: "Request", num_computed_tokens: int
) -> tuple[int | None, bool]:
return 0, False
def update_state_after_alloc(
self,
request: "Request",
blocks: "KVCacheBlocks",
num_external_tokens: int,
):
pass
def build_connector_meta(self, scheduler_output: SchedulerOutput):
return None
def start_load_kv(self, forward_context: "ForwardContext", **kwargs) -> None:
pass
def wait_for_layer_load(self, layer_name: str) -> None:
pass
def save_kv_layer(
self,
layer_name: str,
kv_layer,
attn_metadata: AttentionMetadata,
**kwargs,
) -> None:
pass
def wait_for_save(self):
pass
class NewStyleTestConnector(KVConnectorBase_V1):
"""
Test connector using the new signature with 3 required arguments.
"""
def __init__(
self,
vllm_config: "VllmConfig",
role: KVConnectorRole,
kv_cache_config: "KVCacheConfig",
):
# New-style call to super().__init__ with all 3 arguments
super().__init__(
vllm_config=vllm_config, role=role, kv_cache_config=kv_cache_config
)
def get_num_new_matched_tokens(
self, request: "Request", num_computed_tokens: int
) -> tuple[int | None, bool]:
return 0, False
def update_state_after_alloc(
self,
request: "Request",
blocks: "KVCacheBlocks",
num_external_tokens: int,
):
pass
def build_connector_meta(self, scheduler_output: SchedulerOutput):
return None
def start_load_kv(self, forward_context: "ForwardContext", **kwargs) -> None:
pass
def wait_for_layer_load(self, layer_name: str) -> None:
pass
def save_kv_layer(
self,
layer_name: str,
kv_layer,
attn_metadata: AttentionMetadata,
**kwargs,
) -> None:
pass
def wait_for_save(self):
pass
@pytest.mark.parametrize("role", [KVConnectorRole.SCHEDULER, KVConnectorRole.WORKER])
def test_external_old_signature_factory_instantiation(role):
"""
Test that external connectors with old signature (2 required args) loaded
via kv_connector_module_path are correctly instantiated with backwards
compatibility support.
"""
vllm_config = create_vllm_config()
vllm_config.kv_transfer_config.kv_connector = "OldStyleTestConnector"
vllm_config.kv_transfer_config.kv_connector_module_path = (
"tests.v1.kv_connector.unit.test_backwards_compatibility"
)
scheduler = create_scheduler(vllm_config)
kv_cache_config = scheduler.kv_cache_config
connector = KVConnectorFactory.create_connector(vllm_config, role, kv_cache_config)
assert connector is not None
assert isinstance(connector, OldStyleTestConnector)
assert connector.role == role
assert connector._kv_cache_config is None
@pytest.mark.parametrize("role", [KVConnectorRole.SCHEDULER, KVConnectorRole.WORKER])
def test_external_new_signature_factory_instantiation(role):
"""
Test that external connectors with new signature (3 required args) loaded
via kv_connector_module_path are correctly instantiated.
"""
vllm_config = create_vllm_config()
vllm_config.kv_transfer_config.kv_connector = "NewStyleTestConnector"
vllm_config.kv_transfer_config.kv_connector_module_path = (
"tests.v1.kv_connector.unit.test_backwards_compatibility"
)
scheduler = create_scheduler(vllm_config)
kv_cache_config = scheduler.kv_cache_config
connector = KVConnectorFactory.create_connector(vllm_config, role, kv_cache_config)
assert connector is not None
assert isinstance(connector, NewStyleTestConnector)
assert connector.role == role
assert connector._kv_cache_config is not None
assert connector._kv_cache_config == kv_cache_config
@pytest.mark.parametrize("role", [KVConnectorRole.SCHEDULER, KVConnectorRole.WORKER])
def test_old_signature_super_init(role):
"""
Test that old-style connectors can call super().__init__() without
kv_cache_config parameter.
"""
vllm_config = create_vllm_config()
connector = OldStyleTestConnector(vllm_config, role)
assert connector is not None
assert connector.role == role
assert connector._kv_cache_config is None
def test_old_signature_super_init_with_kwargs():
"""
Test that old-style connectors can call super().__init__() with keyword
arguments in different orders.
"""
vllm_config = create_vllm_config()
# Test with vllm_config= and role= kwargs
connector1 = OldStyleTestConnector(
vllm_config=vllm_config, role=KVConnectorRole.SCHEDULER
)
assert connector1 is not None
assert connector1._kv_cache_config is None
# Test with role= and vllm_config= in reversed order
connector2 = OldStyleTestConnector(
role=KVConnectorRole.WORKER, vllm_config=vllm_config
)
assert connector2 is not None
assert connector2._kv_cache_config is None
def test_internal_connector_uses_new_signature():
"""
Test that internal connectors (registered in factory) always use the new
signature and get kv_cache_config.
"""
from vllm.distributed.kv_transfer.kv_connector.v1.example_connector import (
ExampleConnector,
)
vllm_config = create_vllm_config()
vllm_config.kv_transfer_config.kv_connector = "ExampleConnector"
scheduler = create_scheduler(vllm_config)
kv_cache_config = scheduler.kv_cache_config
connector = KVConnectorFactory.create_connector(
vllm_config, KVConnectorRole.SCHEDULER, kv_cache_config
)
assert connector is not None
assert isinstance(connector, ExampleConnector)
assert connector._kv_cache_config is not None
assert connector._kv_cache_config == kv_cache_config
def test_signature_detection_with_mocking():
"""
Test that the factory correctly applies compat_sig flag returned from
_get_connector_class_with_compat.
"""
vllm_config = create_vllm_config()
scheduler = create_scheduler(vllm_config)
kv_cache_config = scheduler.kv_cache_config
# Mock _get_connector_class_with_compat to return old-style connector
with patch.object(
KVConnectorFactory,
"_get_connector_class_with_compat",
return_value=(OldStyleTestConnector, True),
):
old_connector = KVConnectorFactory.create_connector(
vllm_config, KVConnectorRole.SCHEDULER, kv_cache_config
)
assert old_connector is not None
assert isinstance(old_connector, OldStyleTestConnector)
assert old_connector._kv_cache_config is None
# Mock _get_connector_class_with_compat to return new-style connector
with patch.object(
KVConnectorFactory,
"_get_connector_class_with_compat",
return_value=(NewStyleTestConnector, False),
):
new_connector = KVConnectorFactory.create_connector(
vllm_config, KVConnectorRole.SCHEDULER, kv_cache_config
)
assert new_connector is not None
assert isinstance(new_connector, NewStyleTestConnector)
assert new_connector._kv_cache_config is not None
assert new_connector._kv_cache_config == kv_cache_config
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/kv_connector/unit/test_backwards_compatibility.py",
"license": "Apache License 2.0",
"lines": 221,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/v1/spec_decode/suffix_decoding.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import torch
from vllm.config import VllmConfig
from vllm.v1.worker.gpu_input_batch import InputBatch
class SuffixDecodingProposer:
"""
Speculative decoding proposer for Suffix Decoding (https://arxiv.org/pdf/2411.04975).
This class imports and uses the official implementation from Arctic Inference
(https://github.com/snowflakedb/ArcticInference).
"""
def __init__(self, vllm_config: VllmConfig):
config = vllm_config.speculative_config
assert config is not None, "Speculative config must be set"
self.num_speculative_tokens = config.num_speculative_tokens
self.max_tree_depth = config.suffix_decoding_max_tree_depth
self.max_spec_factor = config.suffix_decoding_max_spec_factor
self.min_token_prob = config.suffix_decoding_min_token_prob
self.max_model_len = vllm_config.model_config.max_model_len
# Lazy import to avoid error when Suffix Decoding is not used.
from arctic_inference.suffix_decoding import SuffixDecodingCache
# Initialize and empty cache. This object will take care of caching request
# outputs, evicting old requests, and manages the per-prompt suffix trees.
self.suffix_cache = SuffixDecodingCache(
max_tree_depth=config.suffix_decoding_max_tree_depth,
max_cached_requests=config.suffix_decoding_max_cached_requests,
)
def propose(
self,
input_batch: InputBatch,
sampled_token_ids: list[list[int]],
slot_mappings: dict[str, torch.Tensor]
| list[dict[str, torch.Tensor]]
| None = None, # unused
) -> list[list[int]]:
"""
Propose speculative tokens for each request in the input batch. Suffix Decoding
will speculate a dynamic number of tokens for each request every decoding step,
so each entry in the returned list may have different lengths.
"""
draft_token_ids: list[list[int]] = []
for i, sampled_ids in enumerate(sampled_token_ids):
if not sampled_ids:
# Skip speculative decoding for partial prefills.
draft_token_ids.append([])
continue
req_id = input_batch.req_ids[i]
num_tokens = input_batch.num_tokens_no_spec[i]
if num_tokens >= self.max_model_len:
# Skip requests that have already reached the max model length.
draft_token_ids.append([])
continue
index = input_batch.req_id_to_index[req_id]
if req_id not in self.suffix_cache.active_requests:
if req_id in self.suffix_cache.cached_requests:
# Reset the suffix cache for this request.
self.suffix_cache.evict_cached_response(req_id)
num_prompt_tokens = input_batch.num_prompt_tokens[index]
prompt_token_ids = input_batch.token_ids_cpu[index, :num_prompt_tokens]
# Start a new request, this will build the suffix tree for that prompt.
self.suffix_cache.start_request(req_id, prompt_token_ids)
# Append the newly sampled ids to the suffix cache for this request.
self.suffix_cache.add_active_response(req_id, sampled_ids)
# Suffix decoding only uses the most recent tokens up to max_tree_depth, so
# we extract the pattern from the end of the input.
start = max(0, num_tokens - self.max_tree_depth)
pattern = input_batch.token_ids_cpu[i, start:num_tokens]
draft = self.suffix_cache.speculate(
req_id,
pattern,
max_spec_tokens=min(
self.num_speculative_tokens, self.max_model_len - num_tokens - 1
),
max_spec_factor=self.max_spec_factor,
min_token_prob=self.min_token_prob,
)
draft_token_ids.append(draft.token_ids)
# Stop requests that were not seen in the input batch.
for req_id in (
self.suffix_cache.active_requests - input_batch.req_id_to_index.keys()
):
self.suffix_cache.stop_request(req_id)
return draft_token_ids
def load_model(self, *args, **kwargs):
# No model to load.
pass
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/spec_decode/suffix_decoding.py",
"license": "Apache License 2.0",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/v1/distributed/test_dbo.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Test Dual Batch Overlap (DBO) with Data Parallelism + Expert Parallelism.
DBO is specifically designed for DP+EP scenarios to hide communication latency
by overlapping computation of two batches. This test validates that DBO works
correctly with the DeepSeek-V2-Lite model using GSM8K evaluation.
"""
import pytest
import torch
from tests.evals.gsm8k.gsm8k_eval import evaluate_gsm8k
from tests.utils import RemoteOpenAIServer
from vllm.utils.import_utils import has_deep_ep
# Detect Blackwell / B200 (compute capability 10.x)
try:
if torch.cuda.is_available():
cap = torch.cuda.get_device_capability(0)
IS_BLACKWELL = cap[0] >= 10
else:
IS_BLACKWELL = False
except Exception:
# Be conservative: if we can't detect, don't xfail by default
IS_BLACKWELL = False
MODEL_NAME = "deepseek-ai/DeepSeek-V2-Lite-Chat"
DP_SIZE = 2
# GSM8K eval configuration
NUM_QUESTIONS = 256 # Fast eval for CI; but must be large enough to hit dbo thresholds
NUM_SHOTS = 5 # Few-shot examples
MIN_ACCURACY = 0.62 # Expected 0.64 with 2% buffer (based on vLLM test data)
# Increase max_num_seqs to trigger DBO for decode batches
# With 64 seqs, decode batches should exceed the 32 token threshold
MAX_NUM_SEQS = 64 # Increased from 16 to trigger decode DBO
# DeepEP backends to test
DEEPEP_BACKENDS = [
"deepep_low_latency",
"deepep_high_throughput",
]
@pytest.mark.skipif(not has_deep_ep(), reason="These tests require deep_ep to run")
@pytest.mark.parametrize("all2all_backend", DEEPEP_BACKENDS)
@pytest.mark.xfail(
IS_BLACKWELL,
reason=(
"Temporary: DBO accuracy unstable on Blackwell "
"(doesn't meet expectation of MIN_ACCURACY = 0.62)"
),
)
def test_dbo_dp_ep_gsm8k(all2all_backend: str, num_gpus_available):
"""
Test DBO with DP+EP using GSM8K evaluation.
"""
required_gpus = DP_SIZE
if num_gpus_available < required_gpus:
pytest.skip(f"Need at least {required_gpus} GPUs (DP={DP_SIZE})")
# Server arguments for DBO + DP + EP
server_args = [
"--max-model-len",
"4096",
"--max-num-seqs",
str(MAX_NUM_SEQS), # Use larger batch to trigger decode DBO
"--trust-remote-code",
# Note: Not using --enforce-eager to test DBO's alternate CUDA graph dispatching
"--data-parallel-size",
str(DP_SIZE),
"--enable-expert-parallel",
"--enable-dbo",
# Fix threshold so we know we trigger DBO
"--dbo-decode-token-threshold",
"16",
"--dbo-prefill-token-threshold",
"256",
"--all2all-backend",
all2all_backend,
]
with RemoteOpenAIServer(
MODEL_NAME,
server_args,
max_wait_seconds=600, # Allow time for model loading with DP+EP
) as remote_server:
# Use host and port directly from RemoteOpenAIServer
host = f"http://{remote_server.host}"
port = remote_server.port
# Run GSM8K evaluation
results = evaluate_gsm8k(
num_questions=NUM_QUESTIONS,
num_shots=NUM_SHOTS,
host=host,
port=port,
)
# Validate accuracy is reasonable
accuracy = results["accuracy"]
assert accuracy >= MIN_ACCURACY, (
f"DBO+DP+EP accuracy too low ({all2all_backend}): "
f"{accuracy:.3f} < {MIN_ACCURACY:.3f} "
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/distributed/test_dbo.py",
"license": "Apache License 2.0",
"lines": 94,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/transformers_utils/test_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from pathlib import Path
from unittest.mock import patch
import pytest
from vllm.transformers_utils.gguf_utils import (
is_gguf,
is_remote_gguf,
split_remote_gguf,
)
from vllm.transformers_utils.utils import (
is_cloud_storage,
is_gcs,
is_s3,
)
def test_is_gcs():
assert is_gcs("gs://model-path")
assert not is_gcs("s3://model-path/path-to-model")
assert not is_gcs("/unix/local/path")
assert not is_gcs("nfs://nfs-fqdn.local")
def test_is_s3():
assert is_s3("s3://model-path/path-to-model")
assert not is_s3("gs://model-path")
assert not is_s3("/unix/local/path")
assert not is_s3("nfs://nfs-fqdn.local")
def test_is_cloud_storage():
assert is_cloud_storage("gs://model-path")
assert is_cloud_storage("s3://model-path/path-to-model")
assert not is_cloud_storage("/unix/local/path")
assert not is_cloud_storage("nfs://nfs-fqdn.local")
class TestIsRemoteGGUF:
"""Test is_remote_gguf utility function."""
def test_is_remote_gguf_with_colon_and_slash(self):
"""Test is_remote_gguf with repo_id:quant_type format."""
# Valid quant types (exact GGML types)
assert is_remote_gguf("unsloth/Qwen3-0.6B-GGUF:IQ1_S")
assert is_remote_gguf("user/repo:Q2_K")
assert is_remote_gguf("repo/model:Q4_K")
assert is_remote_gguf("repo/model:Q8_0")
# Invalid quant types should return False
assert not is_remote_gguf("repo/model:quant")
assert not is_remote_gguf("repo/model:INVALID")
assert not is_remote_gguf("repo/model:invalid_type")
def test_is_remote_gguf_extended_quant_types(self):
"""Test is_remote_gguf with extended quant type naming conventions."""
# Extended quant types with _M, _S, _L suffixes
assert is_remote_gguf("repo/model:Q4_K_M")
assert is_remote_gguf("repo/model:Q4_K_S")
assert is_remote_gguf("repo/model:Q3_K_L")
assert is_remote_gguf("repo/model:Q5_K_M")
assert is_remote_gguf("repo/model:Q3_K_S")
# Extended quant types with _XL, _XS, _XXS suffixes
assert is_remote_gguf("repo/model:Q5_K_XL")
assert is_remote_gguf("repo/model:IQ4_XS")
assert is_remote_gguf("repo/model:IQ3_XXS")
# Invalid extended types (base type doesn't exist)
assert not is_remote_gguf("repo/model:INVALID_M")
assert not is_remote_gguf("repo/model:Q9_K_M")
def test_is_remote_gguf_without_colon(self):
"""Test is_remote_gguf without colon."""
assert not is_remote_gguf("repo/model")
assert not is_remote_gguf("unsloth/Qwen3-0.6B-GGUF")
def test_is_remote_gguf_without_slash(self):
"""Test is_remote_gguf without slash."""
assert not is_remote_gguf("model.gguf")
# Even with valid quant_type, no slash means not remote GGUF
assert not is_remote_gguf("model:IQ1_S")
assert not is_remote_gguf("model:quant")
def test_is_remote_gguf_local_path(self):
"""Test is_remote_gguf with local file path."""
assert not is_remote_gguf("/path/to/model.gguf")
assert not is_remote_gguf("./model.gguf")
def test_is_remote_gguf_with_path_object(self):
"""Test is_remote_gguf with Path object."""
assert is_remote_gguf(Path("unsloth/Qwen3-0.6B-GGUF:IQ1_S"))
assert not is_remote_gguf(Path("repo/model"))
def test_is_remote_gguf_with_http_https(self):
"""Test is_remote_gguf with HTTP/HTTPS URLs."""
# HTTP/HTTPS URLs should return False even with valid quant_type
assert not is_remote_gguf("http://example.com/repo/model:IQ1_S")
assert not is_remote_gguf("https://huggingface.co/repo/model:Q2_K")
assert not is_remote_gguf("http://repo/model:Q4_K")
assert not is_remote_gguf("https://repo/model:Q8_0")
def test_is_remote_gguf_with_cloud_storage(self):
"""Test is_remote_gguf with cloud storage paths."""
# Cloud storage paths should return False even with valid quant_type
assert not is_remote_gguf("s3://bucket/repo/model:IQ1_S")
assert not is_remote_gguf("gs://bucket/repo/model:Q2_K")
assert not is_remote_gguf("s3://repo/model:Q4_K")
assert not is_remote_gguf("gs://repo/model:Q8_0")
class TestSplitRemoteGGUF:
"""Test split_remote_gguf utility function."""
def test_split_remote_gguf_valid(self):
"""Test split_remote_gguf with valid repo_id:quant_type format."""
repo_id, quant_type = split_remote_gguf("unsloth/Qwen3-0.6B-GGUF:IQ1_S")
assert repo_id == "unsloth/Qwen3-0.6B-GGUF"
assert quant_type == "IQ1_S"
repo_id, quant_type = split_remote_gguf("repo/model:Q2_K")
assert repo_id == "repo/model"
assert quant_type == "Q2_K"
def test_split_remote_gguf_extended_quant_types(self):
"""Test split_remote_gguf with extended quant type naming conventions."""
repo_id, quant_type = split_remote_gguf("unsloth/Qwen3-0.6B-GGUF:Q4_K_M")
assert repo_id == "unsloth/Qwen3-0.6B-GGUF"
assert quant_type == "Q4_K_M"
repo_id, quant_type = split_remote_gguf("repo/model:Q3_K_S")
assert repo_id == "repo/model"
assert quant_type == "Q3_K_S"
def test_split_remote_gguf_with_path_object(self):
"""Test split_remote_gguf with Path object."""
repo_id, quant_type = split_remote_gguf(Path("unsloth/Qwen3-0.6B-GGUF:IQ1_S"))
assert repo_id == "unsloth/Qwen3-0.6B-GGUF"
assert quant_type == "IQ1_S"
def test_split_remote_gguf_invalid(self):
"""Test split_remote_gguf with invalid format."""
# Invalid format (no colon) - is_remote_gguf returns False
with pytest.raises(ValueError, match="Wrong GGUF model"):
split_remote_gguf("repo/model")
# Invalid quant type - is_remote_gguf returns False
with pytest.raises(ValueError, match="Wrong GGUF model"):
split_remote_gguf("repo/model:INVALID_TYPE")
# HTTP URL - is_remote_gguf returns False
with pytest.raises(ValueError, match="Wrong GGUF model"):
split_remote_gguf("http://repo/model:IQ1_S")
# Cloud storage - is_remote_gguf returns False
with pytest.raises(ValueError, match="Wrong GGUF model"):
split_remote_gguf("s3://bucket/repo/model:Q2_K")
class TestIsGGUF:
"""Test is_gguf utility function."""
@patch("vllm.transformers_utils.gguf_utils.check_gguf_file", return_value=True)
def test_is_gguf_with_local_file(self, mock_check_gguf):
"""Test is_gguf with local GGUF file."""
assert is_gguf("/path/to/model.gguf")
assert is_gguf("./model.gguf")
def test_is_gguf_with_remote_gguf(self):
"""Test is_gguf with remote GGUF format."""
# Valid remote GGUF format (repo_id:quant_type with valid quant_type)
assert is_gguf("unsloth/Qwen3-0.6B-GGUF:IQ1_S")
assert is_gguf("repo/model:Q2_K")
assert is_gguf("repo/model:Q4_K")
# Extended quant types with suffixes
assert is_gguf("repo/model:Q4_K_M")
assert is_gguf("repo/model:Q3_K_S")
assert is_gguf("repo/model:Q5_K_L")
# Invalid quant_type should return False
assert not is_gguf("repo/model:quant")
assert not is_gguf("repo/model:INVALID")
@patch("vllm.transformers_utils.gguf_utils.check_gguf_file", return_value=False)
def test_is_gguf_false(self, mock_check_gguf):
"""Test is_gguf returns False for non-GGUF models."""
assert not is_gguf("unsloth/Qwen3-0.6B")
assert not is_gguf("repo/model")
assert not is_gguf("model")
def test_is_gguf_edge_cases(self):
"""Test is_gguf with edge cases."""
# Empty string
assert not is_gguf("")
# Only colon, no slash (even with valid quant_type)
assert not is_gguf("model:IQ1_S")
# Only slash, no colon
assert not is_gguf("repo/model")
# HTTP/HTTPS URLs
assert not is_gguf("http://repo/model:IQ1_S")
assert not is_gguf("https://repo/model:Q2_K")
# Cloud storage
assert not is_gguf("s3://bucket/repo/model:IQ1_S")
assert not is_gguf("gs://bucket/repo/model:Q2_K")
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/transformers_utils/test_utils.py",
"license": "Apache License 2.0",
"lines": 167,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/entrypoints/openai/orca_metrics.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Utility functions that create ORCA endpoint load report response headers.
"""
import json
from collections.abc import Mapping
from vllm.logger import init_logger
from vllm.v1.metrics.reader import Gauge, get_metrics_snapshot
logger = init_logger(__name__)
def create_orca_header(
metrics_format: str, named_metrics: list[tuple[str, float]]
) -> Mapping[str, str] | None:
"""
Creates ORCA headers named 'endpoint-load-metrics' in the specified format
and adds custom metrics to named_metrics.
ORCA headers format description: https://docs.google.com/document/d/1C1ybMmDKJIVlrbOLbywhu9iRYo4rilR-cT50OTtOFTs/edit?tab=t.0
ORCA proto https://github.com/cncf/xds/blob/main/xds/data/orca/v3/orca_load_report.proto
Parameters:
- metrics_format (str): The format of the header ('TEXT', 'JSON').
- named_metrics (List[Tuple[str, float]]): List of tuples with metric names
and their corresponding double values.
Returns:
- Optional[Mapping[str,str]]: A dictionary with header key as
'endpoint-load-metrics' and values as the ORCA header strings with
format prefix and data in with named_metrics in.
"""
if metrics_format.lower() not in ["text", "json"]:
logger.warning(
"Warning: `%s` format is not supported in the ORCA response header",
format,
)
return None
header = {}
orca_report = {
"named_metrics": {
metric_name: value
for metric_name, value in named_metrics
if isinstance(metric_name, str) and isinstance(value, float)
}
}
# output example:
# endpoint-load-metrics: TEXT named_metrics.kv_cache_utilization=0.4
if metrics_format.lower() == "text":
native_http_header = ", ".join(
[
f"named_metrics.{metric_name}={value}"
for metric_name, value in named_metrics
if isinstance(metric_name, str) and isinstance(value, float)
]
)
header["endpoint-load-metrics"] = f"TEXT {native_http_header}"
# output example:
# endpoint-load-metrics: JSON “named_metrics”: {“custom-metric-util”: 0.4}
elif metrics_format.lower() == "json":
header["endpoint-load-metrics"] = f"JSON {json.dumps(orca_report)}"
logger.info("Created ORCA header %s", header)
return header
def get_named_metrics_from_prometheus() -> list[tuple[str, float]]:
"""
Collects current metrics from Prometheus and returns some of them
in the form of the `named_metrics` list for `create_orca_header()`.
Parameters:
- None
Returns:
- list[tuple[str, float]]: List of tuples of metric names and their values.
"""
named_metrics: list[tuple[str, float]] = []
# Map from prometheus metric names to ORCA named metrics.
prometheus_to_orca_metrics = {
"vllm:kv_cache_usage_perc": "kv_cache_usage_perc",
"vllm:num_requests_waiting": "num_requests_waiting",
}
metrics = get_metrics_snapshot()
for metric in metrics:
orca_name = prometheus_to_orca_metrics.get(metric.name)
# If this metric is mapped into ORCA, then add it to the report.
# Note: Only Gauge metrics are currently supported.
if orca_name is not None and isinstance(metric, Gauge):
named_metrics.append((str(orca_name), float(metric.value)))
return named_metrics
def metrics_header(metrics_format: str) -> Mapping[str, str] | None:
"""
Creates ORCA headers named 'endpoint-load-metrics' in the specified format.
Metrics are collected from Prometheus using `get_named_metrics_from_prometheus()`.
ORCA headers format description: https://docs.google.com/document/d/1C1ybMmDKJIVlrbOLbywhu9iRYo4rilR-cT50OTtOFTs/edit?tab=t.0
ORCA proto https://github.com/cncf/xds/blob/main/xds/data/orca/v3/orca_load_report.proto
Parameters:
- metrics_format (str): The format of the header ('TEXT', 'JSON').
Returns:
- Optional[Mapping[str,str]]: A dictionary with header key as
'endpoint-load-metrics' and values as the ORCA header strings with
format prefix and data in with named_metrics in.
"""
if not metrics_format:
return None
# Get named metrics from prometheus.
named_metrics = get_named_metrics_from_prometheus()
return create_orca_header(metrics_format, named_metrics)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/entrypoints/openai/orca_metrics.py",
"license": "Apache License 2.0",
"lines": 99,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/v1/kv_connector/unit/test_config.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Tests for KV cache offloading configuration."""
import pytest
from vllm.config import CacheConfig, KVTransferConfig, ParallelConfig, VllmConfig
pytestmark = pytest.mark.cpu_test
@pytest.mark.parametrize(
"kv_offloading_backend,kv_offloading_size,tp,pp,expected_backend,expected_bytes",
[
("native", 4.0, 1, 1, "OffloadingConnector", 4.0 * (1 << 30)),
# bytes per rank: 8.0 GiB / (2 * 2) = 2.0 GiB
("native", 8.0, 2, 2, "OffloadingConnector", 8.0 * (1 << 30)),
("lmcache", 4.0, 1, 1, "LMCacheConnectorV1", 4.0),
# size per rank: 8.0 GiB / (2 * 2) = 2.0 GiB
("lmcache", 8.0, 2, 2, "LMCacheConnectorV1", 2.0),
# When kv_offloading_size is None, offloading is disabled (backend is ignored)
("native", None, 1, 1, None, None),
],
)
def test_kv_connector(
kv_offloading_backend, kv_offloading_size, tp, pp, expected_backend, expected_bytes
):
kv_transfer_config = (
KVTransferConfig(kv_connector_extra_config={"existing_key": "existing_value"})
if expected_backend is not None
else None
)
vllm_config = VllmConfig(
cache_config=CacheConfig(
kv_offloading_backend=kv_offloading_backend,
kv_offloading_size=kv_offloading_size,
),
kv_transfer_config=kv_transfer_config,
parallel_config=ParallelConfig(
tensor_parallel_size=tp, pipeline_parallel_size=pp
),
)
# No KV transfer config expected
if expected_backend is None:
assert vllm_config.kv_transfer_config is expected_backend
return
kv_transfer_config = vllm_config.kv_transfer_config
kv_connector_extra_config = kv_transfer_config.kv_connector_extra_config
assert kv_transfer_config.kv_connector == expected_backend
assert kv_transfer_config.kv_role == "kv_both"
if kv_offloading_backend == "native":
assert kv_connector_extra_config["cpu_bytes_to_use"] == expected_bytes
# Existing config should be preserved
assert kv_connector_extra_config["existing_key"] == "existing_value"
elif kv_offloading_backend == "lmcache":
assert kv_connector_extra_config["lmcache.local_cpu"] is True
assert kv_connector_extra_config["lmcache.max_local_cpu_size"] == expected_bytes
# Existing config should be replaced
assert "existing_key" not in kv_connector_extra_config
def test_kv_offloading_size_only_uses_native_default():
"""Test that setting only kv_offloading_size enables native offloading."""
vllm_config = VllmConfig(
cache_config=CacheConfig(
kv_offloading_size=4.0,
# kv_offloading_backend not set, should default to "native"
),
)
kv_transfer_config = vllm_config.kv_transfer_config
kv_connector_extra_config = kv_transfer_config.kv_connector_extra_config
assert kv_transfer_config.kv_connector == "OffloadingConnector"
assert kv_transfer_config.kv_role == "kv_both"
assert kv_connector_extra_config["cpu_bytes_to_use"] == 4.0 * (1 << 30)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/kv_connector/unit/test_config.py",
"license": "Apache License 2.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/models/multimodal/generation/test_keye.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from dataclasses import asdict
from typing import NamedTuple
import pytest
from PIL.Image import Image
from transformers import AutoProcessor
from vllm import LLM, EngineArgs, SamplingParams
from vllm.multimodal.utils import encode_image_url
MODEL_NAME = "Kwai-Keye/Keye-VL-8B-Preview"
QUESTION = "What is the content of each image?"
class ModelRequestData(NamedTuple):
engine_args: EngineArgs
prompt: str
image_data: list[Image]
stop_token_ids: list[int] | None = None
chat_template: str | None = None
sampling_params: SamplingParams | None = None
@pytest.mark.core_model
@pytest.mark.parametrize("question", [QUESTION])
def test_keye_vl(
image_assets,
question: str,
):
images = [asset.pil_image for asset in image_assets]
image_urls = [encode_image_url(image) for image in images]
engine_args = EngineArgs(
model=MODEL_NAME,
trust_remote_code=True,
max_model_len=8192,
max_num_seqs=5,
limit_mm_per_prompt={"image": len(image_urls)},
)
placeholders = [{"type": "image", "image": url} for url in image_urls]
messages = [
{
"role": "user",
"content": [
*placeholders,
{"type": "text", "text": question},
],
},
]
processor = AutoProcessor.from_pretrained(MODEL_NAME, trust_remote_code=True)
prompt = processor.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True
)
engine_args = asdict(engine_args) | {"seed": 42}
llm = LLM(**engine_args)
sampling_params = SamplingParams(
temperature=0.0, max_tokens=256, stop_token_ids=None
)
outputs = llm.generate(
{
"prompt": prompt,
"multi_modal_data": {"image": images},
},
sampling_params=sampling_params,
)
print("-" * 50)
for o in outputs:
generated_text = o.outputs[0].text
print(generated_text)
assert len(generated_text) > 10, (
f"Generated text is too short: {generated_text}"
)
print("-" * 50)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/models/multimodal/generation/test_keye.py",
"license": "Apache License 2.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/lora/test_gptoss_tp.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
import vllm
from vllm.lora.request import LoRARequest
from ..utils import multi_gpu_test
MODEL_PATH = "openai/gpt-oss-20b"
PROMPT_TEMPLATE = """<|start|>system<|message|>You are ChatGPT, a large language model trained by OpenAI.
Knowledge cutoff: 2024-06
Current date: 2025-10-29
Reasoning: medium
# Valid channels: analysis, commentary, final. Channel must be included for every message.<|end|><|start|>user<|message|>I want you to act as a SQL terminal in front of an example database, you need only to return the sql command to me.Below is an instruction that describes a task, Write a response that appropriately completes the request.
"
##Instruction:
farm contains tables such as city, farm, farm_competition, competition_record. Table city has columns such as City_ID, Official_Name, Status, Area_km_2, Population, Census_Ranking. City_ID is the primary key.
Table farm has columns such as Farm_ID, Year, Total_Horses, Working_Horses, Total_Cattle, Oxen, Bulls, Cows, Pigs, Sheep_and_Goats. Farm_ID is the primary key.
Table farm_competition has columns such as Competition_ID, Year, Theme, Host_city_ID, Hosts. Competition_ID is the primary key.
Table competition_record has columns such as Competition_ID, Farm_ID, Rank. Competition_ID is the primary key.
The Host_city_ID of farm_competition is the foreign key of City_ID of city.
The Farm_ID of competition_record is the foreign key of Farm_ID of farm.
The Competition_ID of competition_record is the foreign key of Competition_ID of farm_competition.
###Input:
{context}
###Response:<|end|><|start|>assistant<|channel|>final<|message|>""" # noqa: E501
EXPECTED_LORA_OUTPUT = [
"SELECT avg(Working_Horses) FROM farm WHERE Total_Horses > 5000",
"SELECT max(Cows) , min(Cows) FROM farm",
"SELECT max(Cows) , min(Cows) FROM farm",
]
def generate_and_test(llm: vllm.LLM, lora_path: str, lora_id: int) -> None:
prompts = [
PROMPT_TEMPLATE.format(
context="Give the average number of working horses on farms with more than 5000 total horses." # noqa: E501
), # noqa: E501
PROMPT_TEMPLATE.format(
context="What are the maximum and minimum number of cows across all farms."
),
PROMPT_TEMPLATE.format(
context="Return the maximum and minimum number of cows across all farms."
),
]
sampling_params = vllm.SamplingParams(temperature=0, max_tokens=64)
outputs = llm.generate(
prompts,
sampling_params,
lora_request=LoRARequest(str(lora_id), lora_id, lora_path) if lora_id else None,
)
# Print the outputs.
generated_texts: list[str] = []
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text.strip()
generated_texts.append(generated_text)
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
for i in range(len(EXPECTED_LORA_OUTPUT)):
assert generated_texts[i].startswith(EXPECTED_LORA_OUTPUT[i])
@pytest.mark.parametrize("mxfp4_use_marlin", [True, False])
def test_gpt_oss_lora(
monkeypatch: pytest.MonkeyPatch, gptoss20b_lora_files, mxfp4_use_marlin
):
with monkeypatch.context() as m:
m.setenv("VLLM_MXFP4_USE_MARLIN", "1" if mxfp4_use_marlin else "0")
llm = vllm.LLM(
MODEL_PATH,
max_model_len=1024,
enable_lora=True,
max_loras=4,
max_lora_rank=8,
max_num_seqs=2,
max_num_batched_tokens=2048,
compilation_config=vllm.config.CompilationConfig( # Avoid OOM
cudagraph_specialize_lora=False,
),
)
generate_and_test(llm, gptoss20b_lora_files, lora_id=1)
generate_and_test(llm, gptoss20b_lora_files, lora_id=2)
@multi_gpu_test(num_gpus=2)
@pytest.mark.parametrize("fully_sharded_loras", [False, True])
@pytest.mark.parametrize("mxfp4_use_marlin", [True, False])
def test_gpt_oss_lora_tp2(
monkeypatch: pytest.MonkeyPatch,
gptoss20b_lora_files,
fully_sharded_loras,
mxfp4_use_marlin,
):
with monkeypatch.context() as m:
m.setenv("VLLM_MXFP4_USE_MARLIN", "1" if mxfp4_use_marlin else "0")
llm = vllm.LLM(
MODEL_PATH,
max_model_len=1024,
enable_lora=True,
max_loras=2,
max_num_seqs=2,
max_num_batched_tokens=2048,
tensor_parallel_size=2,
gpu_memory_utilization=0.8,
fully_sharded_loras=fully_sharded_loras,
compilation_config=vllm.config.CompilationConfig( # Avoid OOM
cudagraph_specialize_lora=False,
),
)
generate_and_test(llm, gptoss20b_lora_files, lora_id=1)
generate_and_test(llm, gptoss20b_lora_files, lora_id=2)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/lora/test_gptoss_tp.py",
"license": "Apache License 2.0",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_executor/models/ouro.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
# Adapted from
# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/qwen2/modeling_qwen2.py
# Copyright 2024 The Qwen team.
# Copyright 2023 The vLLM team.
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inference-only Ouro model compatible with HuggingFace weights."""
from collections.abc import Iterable
from typing import Any
import torch
from torch import nn
from transformers import PretrainedConfig
from vllm.compilation.decorators import support_torch_compile
from vllm.config import CacheConfig, VllmConfig
from vllm.distributed import get_tensor_model_parallel_world_size
from vllm.model_executor.layers.activation import SiluAndMul
from vllm.model_executor.layers.attention import Attention
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.linear import (
MergedColumnParallelLinear,
QKVParallelLinear,
RowParallelLinear,
)
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.rotary_embedding import get_rope
from vllm.model_executor.layers.vocab_parallel_embedding import (
ParallelLMHead,
VocabParallelEmbedding,
)
from vllm.model_executor.model_loader.weight_utils import (
default_weight_loader,
maybe_remap_kv_scale_name,
)
from vllm.sequence import IntermediateTensors
from vllm.v1.attention.backend import AttentionType
from .interfaces import SupportsLoRA
from .utils import (
AutoWeightsLoader,
extract_layer_index,
make_empty_intermediate_tensors_factory,
make_layers,
maybe_prefix,
)
class OuroMLP(nn.Module):
def __init__(
self,
hidden_size: int,
intermediate_size: int,
hidden_act: str,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
self.gate_up_proj = MergedColumnParallelLinear(
hidden_size,
[intermediate_size] * 2,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.gate_up_proj",
)
self.down_proj = RowParallelLinear(
intermediate_size,
hidden_size,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.down_proj",
)
if hidden_act != "silu":
raise ValueError(
f"Unsupported activation: {hidden_act}. Only silu is supported for now."
)
self.act_fn = SiluAndMul()
def forward(self, x):
gate_up, _ = self.gate_up_proj(x)
x = self.act_fn(gate_up)
x, _ = self.down_proj(x)
return x
class OuroAttention(nn.Module):
def __init__(
self,
config: PretrainedConfig,
hidden_size: int,
num_heads: int,
num_kv_heads: int,
max_position: int = 4096 * 32,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
attn_type: str = AttentionType.DECODER,
dual_chunk_attention_config: dict[str, Any] | None = None,
) -> None:
super().__init__()
self.hidden_size = hidden_size
tp_size = get_tensor_model_parallel_world_size()
self.total_num_heads = num_heads
assert self.total_num_heads % tp_size == 0
self.num_heads = self.total_num_heads // tp_size
self.total_num_kv_heads = num_kv_heads
if self.total_num_kv_heads >= tp_size:
# Number of KV heads is greater than TP size, so we partition
# the KV heads across multiple tensor parallel GPUs.
assert self.total_num_kv_heads % tp_size == 0
else:
# Number of KV heads is less than TP size, so we replicate
# the KV heads across multiple tensor parallel GPUs.
assert tp_size % self.total_num_kv_heads == 0
self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
self.head_dim = hidden_size // self.total_num_heads
self.q_size = self.num_heads * self.head_dim
self.kv_size = self.num_kv_heads * self.head_dim
self.scaling = self.head_dim**-0.5
self.dual_chunk_attention_config = dual_chunk_attention_config
# Get total_ut_steps from config, default to 4 if not specified
total_ut_steps = getattr(config, "total_ut_steps", 4)
# Use total number of hidden layers instead of hardcoded 24
total_layers = config.num_hidden_layers
self.qkv_proj = QKVParallelLinear(
hidden_size,
self.head_dim,
self.total_num_heads,
self.total_num_kv_heads,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.qkv_proj",
)
self.o_proj = RowParallelLinear(
self.total_num_heads * self.head_dim,
hidden_size,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.o_proj",
)
self.rotary_emb = get_rope(
self.head_dim,
max_position=max_position,
rope_parameters=config.rope_parameters,
dual_chunk_attention_config=dual_chunk_attention_config,
)
self.attn = nn.ModuleList()
for ut_step in range(total_ut_steps):
base_layer_idx = extract_layer_index(prefix)
unique_layer_idx = ut_step * total_layers + base_layer_idx
unique_prefix = prefix.replace(
f"layers.{base_layer_idx}", f"layers.{unique_layer_idx}"
)
self.attn.append(
Attention(
self.num_heads,
self.head_dim,
self.scaling,
num_kv_heads=self.num_kv_heads,
cache_config=cache_config,
quant_config=quant_config,
attn_type=attn_type,
prefix=f"{unique_prefix}.attn",
**{
"layer_idx": unique_layer_idx,
"dual_chunk_attention_config": dual_chunk_attention_config,
}
if dual_chunk_attention_config
else {},
)
)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
current_ut: int,
) -> torch.Tensor:
qkv, _ = self.qkv_proj(hidden_states)
q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
q, k = self.rotary_emb(positions, q, k)
attn_output = self.attn[current_ut](q, k, v)
output, _ = self.o_proj(attn_output)
return output
class OuroDecoderLayer(nn.Module):
def __init__(
self,
config: PretrainedConfig,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
self.hidden_size = config.hidden_size
dual_chunk_attention_config = getattr(
config, "dual_chunk_attention_config", None
)
if getattr(config, "is_causal", True):
attn_type = AttentionType.DECODER
else:
attn_type = AttentionType.ENCODER_ONLY
self.self_attn = OuroAttention(
config=config,
hidden_size=self.hidden_size,
num_heads=config.num_attention_heads,
max_position=config.max_position_embeddings,
num_kv_heads=config.num_key_value_heads,
cache_config=cache_config,
quant_config=quant_config,
prefix=f"{prefix}.self_attn",
attn_type=attn_type,
dual_chunk_attention_config=dual_chunk_attention_config,
)
self.mlp = OuroMLP(
hidden_size=self.hidden_size,
intermediate_size=config.intermediate_size,
hidden_act=config.hidden_act,
quant_config=quant_config,
prefix=f"{prefix}.mlp",
)
self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.input_layernorm_2 = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = RMSNorm(
config.hidden_size, eps=config.rms_norm_eps
)
self.post_attention_layernorm_2 = RMSNorm(
config.hidden_size, eps=config.rms_norm_eps
)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
current_ut: int,
residual: torch.Tensor | None = None,
) -> tuple[torch.Tensor, torch.Tensor]:
if residual is None:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
else:
hidden_states, residual = self.input_layernorm(hidden_states, residual)
hidden_states = self.self_attn(
positions=positions, hidden_states=hidden_states, current_ut=current_ut
)
hidden_states = self.input_layernorm_2(hidden_states)
hidden_states, residual = self.post_attention_layernorm(hidden_states, residual)
hidden_states = self.mlp(hidden_states)
hidden_states = self.post_attention_layernorm_2(hidden_states)
return hidden_states, residual
@support_torch_compile(
dynamic_arg_dims={
"input_ids": 0,
"positions": -1,
"intermediate_tensors": 0,
"inputs_embeds": 0,
}
)
class OuroModel(nn.Module):
def __init__(
self,
*,
vllm_config: VllmConfig,
prefix: str = "",
decoder_layer_type: type[nn.Module] = OuroDecoderLayer,
):
super().__init__()
config = vllm_config.model_config.hf_config
cache_config = vllm_config.cache_config
quant_config = vllm_config.quant_config
# TODO (@robertgshaw2): see if this can be moved out
if cache_config.sliding_window is not None and hasattr(
config, "max_window_layers"
):
assert config.max_window_layers == config.num_hidden_layers, (
"Sliding window for some but all layers is not supported. "
"This model uses sliding window but `max_window_layers` = {} "
"is less than `num_hidden_layers` = {}. Please open an issue "
"to discuss this feature.".format(
config.max_window_layers,
config.num_hidden_layers,
)
)
self.config = config
self.quant_config = quant_config
self.vocab_size = config.vocab_size
self.embed_tokens = VocabParallelEmbedding(
config.vocab_size,
config.hidden_size,
quant_config=quant_config,
prefix=f"{prefix}.embed_tokens",
)
# Use the provided decoder layer type or default to OuroDecoderLayer
decoder_layer_type = decoder_layer_type or OuroDecoderLayer
self.start_layer, self.end_layer, self.layers = make_layers(
config.num_hidden_layers,
lambda prefix: decoder_layer_type(
config=config,
cache_config=cache_config,
quant_config=quant_config,
prefix=prefix,
),
prefix=f"{prefix}.layers",
)
self.make_empty_intermediate_tensors = make_empty_intermediate_tensors_factory(
["hidden_states", "residual"], config.hidden_size
)
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.early_exit_gate = RowParallelLinear(config.hidden_size, 1, bias=True)
self.total_ut_steps = getattr(self.config, "total_ut_steps", 4)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.embed_tokens(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor | IntermediateTensors:
if inputs_embeds is not None:
hidden_states = inputs_embeds
else:
hidden_states = self.embed_input_ids(input_ids)
for current_ut in range(self.total_ut_steps):
residual = None
for layer in self.layers[self.start_layer : self.end_layer]:
hidden_states, residual = layer(
positions, hidden_states, current_ut, residual
)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
params_dict = dict(self.named_parameters(remove_duplicate=False))
loaded_params: set[str] = set()
for name, loaded_weight in weights:
if "rotary_emb.inv_freq" in name:
continue
if self.quant_config is not None and (
scale_name := self.quant_config.get_cache_scale(name)
):
# Loading kv cache quantization scales
param = params_dict[scale_name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
loaded_weight = (
loaded_weight if loaded_weight.dim() == 0 else loaded_weight[0]
)
weight_loader(param, loaded_weight)
loaded_params.add(scale_name)
continue
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
if name.endswith("scale"):
# Remapping the name of FP8 kv-scale.
name = maybe_remap_kv_scale_name(name, params_dict)
if name is None:
continue
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
if weight_loader == default_weight_loader:
weight_loader(param, loaded_weight)
else:
weight_loader(param, loaded_weight, shard_id)
break
else:
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
# Remapping the name of FP8 kv-scale.
name = maybe_remap_kv_scale_name(name, params_dict)
if name is None:
continue
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
loaded_params.add(name)
return loaded_params
class OuroForCausalLM(nn.Module, SupportsLoRA):
packed_modules_mapping = {
"qkv_proj": [
"q_proj",
"k_proj",
"v_proj",
],
"gate_up_proj": [
"gate_proj",
"up_proj",
],
}
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
self.config = config
self.quant_config = quant_config
self.model = OuroModel(
vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")
)
if config.tie_word_embeddings:
self.lm_head = self.model.embed_tokens
else:
self.lm_head = ParallelLMHead(
config.vocab_size,
config.hidden_size,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "lm_head"),
)
self.logits_processor = LogitsProcessor(config.vocab_size)
self.make_empty_intermediate_tensors = (
self.model.make_empty_intermediate_tensors
)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.model.embed_input_ids(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor | IntermediateTensors:
hidden_states = self.model(
input_ids, positions, intermediate_tensors, inputs_embeds
)
return hidden_states
def compute_logits(
self,
hidden_states: torch.Tensor,
) -> torch.Tensor | None:
logits = self.logits_processor(self.lm_head, hidden_states)
return logits
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
loader = AutoWeightsLoader(
self,
skip_prefixes=(["lm_head."] if self.config.tie_word_embeddings else None),
)
return loader.load_weights(weights)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/ouro.py",
"license": "Apache License 2.0",
"lines": 457,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/kda.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import torch
from einops import rearrange
from torch import nn
from vllm.config import CacheConfig, ModelConfig, get_current_vllm_config
from vllm.distributed import (
divide,
get_tensor_model_parallel_rank,
get_tensor_model_parallel_world_size,
)
from vllm.forward_context import ForwardContext, get_forward_context
from vllm.logger import init_logger
from vllm.model_executor.model_loader.weight_utils import sharded_weight_loader
from vllm.model_executor.utils import set_weight_attrs
from vllm.utils.torch_utils import direct_register_custom_op
from vllm.v1.attention.backend import AttentionMetadata
from vllm.v1.attention.backends.gdn_attn import GDNAttentionMetadata
from .fla.ops.kda import (
FusedRMSNormGated,
chunk_kda,
fused_kda_gate,
fused_recurrent_kda,
)
from .linear import (
ColumnParallelLinear,
ReplicatedLinear,
RowParallelLinear,
)
from .mamba.abstract import MambaBase
from .mamba.mamba_utils import MambaStateDtypeCalculator, MambaStateShapeCalculator
from .mamba.ops.causal_conv1d import causal_conv1d_fn, causal_conv1d_update
from .quantization.base_config import QuantizationConfig
logger = init_logger(__name__)
def kda_attention(
q_proj_states: torch.Tensor,
k_proj_states: torch.Tensor,
v_proj_states: torch.Tensor,
g1: torch.Tensor,
beta: torch.Tensor,
core_attn_out: torch.Tensor,
layer_name: str,
) -> None:
forward_context: ForwardContext = get_forward_context()
self = forward_context.no_compile_layers[layer_name]
self._forward(
q_proj_states=q_proj_states,
k_proj_states=k_proj_states,
v_proj_states=v_proj_states,
g1=g1,
beta=beta,
core_attn_out=core_attn_out,
)
def kda_attention_fake(
q_proj_states: torch.Tensor,
k_proj_states: torch.Tensor,
v_proj_states: torch.Tensor,
g1: torch.Tensor,
beta: torch.Tensor,
core_attn_out: torch.Tensor,
layer_name: str,
) -> None:
return
direct_register_custom_op(
op_name="kda_attention",
op_func=kda_attention,
mutates_args=["core_attn_out"],
fake_impl=kda_attention_fake,
)
class KimiDeltaAttention(nn.Module, MambaBase):
@property
def mamba_type(self) -> str:
return "gdn_attention"
def get_state_dtype(
self,
) -> tuple[torch.dtype, torch.dtype, torch.dtype, torch.dtype]:
if self.model_config is None or self.cache_config is None:
raise ValueError("model_config and cache_config must be set")
return MambaStateDtypeCalculator.kda_state_dtype(
self.model_config.dtype, self.cache_config.mamba_cache_dtype
)
def get_state_shape(
self,
) -> tuple[tuple[int, ...], tuple[int, ...], tuple[int, ...], tuple[int, ...]]:
return MambaStateShapeCalculator.kda_state_shape(
self.tp_size, self.num_heads, self.head_dim, conv_kernel_size=self.conv_size
)
def __init__(
self,
layer_idx: int,
hidden_size: int,
quant_config: QuantizationConfig | None = None,
cache_config: CacheConfig | None = None,
model_config: ModelConfig | None = None,
rms_norm_eps: float = 1e-5,
prefix: str = "",
**kwargs,
) -> None:
super().__init__()
self.tp_size = get_tensor_model_parallel_world_size()
self.tp_rank = get_tensor_model_parallel_rank()
self.hidden_size = hidden_size
self.model_config = model_config
self.cache_config = cache_config
if model_config is None:
raise ValueError("model_config must be provided")
kda_config = model_config.linear_attn_config
self.head_dim = kda_config["head_dim"]
self.num_heads = kda_config["num_heads"]
self.layer_idx = layer_idx
self.prefix = prefix
assert self.num_heads % self.tp_size == 0
self.local_num_heads = divide(self.num_heads, self.tp_size)
projection_size = self.head_dim * self.num_heads
self.conv_size = kda_config["short_conv_kernel_size"]
self.q_proj = ColumnParallelLinear(
self.hidden_size,
projection_size,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.q_proj",
)
self.k_proj = ColumnParallelLinear(
self.hidden_size,
projection_size,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.k_proj",
)
self.v_proj = ColumnParallelLinear(
self.hidden_size,
projection_size,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.v_proj",
)
self.f_a_proj = ReplicatedLinear(
self.hidden_size,
self.head_dim,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.f_a_proj",
)
self.f_b_proj = ColumnParallelLinear(
self.head_dim,
projection_size,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.f_b_proj",
)
self.dt_bias = nn.Parameter(
torch.empty(divide(projection_size, self.tp_size), dtype=torch.float32)
)
set_weight_attrs(self.dt_bias, {"weight_loader": sharded_weight_loader(0)})
self.b_proj = ColumnParallelLinear(
self.hidden_size,
self.num_heads,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.b_proj",
)
self.q_conv1d = ColumnParallelLinear(
input_size=self.conv_size,
output_size=projection_size,
bias=False,
params_dtype=torch.float32,
prefix=f"{prefix}.q_conv1d",
)
self.k_conv1d = ColumnParallelLinear(
input_size=self.conv_size,
output_size=projection_size,
bias=False,
params_dtype=torch.float32,
prefix=f"{prefix}.k_conv1d",
)
self.v_conv1d = ColumnParallelLinear(
input_size=self.conv_size,
output_size=projection_size,
bias=False,
params_dtype=torch.float32,
prefix=f"{prefix}.v_conv1d",
)
# unsqueeze to fit conv1d weights shape into the linear weights shape.
# Can't do this in `weight_loader` since it already exists in
# `ColumnParallelLinear` and `set_weight_attrs`
# doesn't allow to override it
self.q_conv1d.weight.data = self.q_conv1d.weight.data.unsqueeze(1)
self.k_conv1d.weight.data = self.k_conv1d.weight.data.unsqueeze(1)
self.v_conv1d.weight.data = self.v_conv1d.weight.data.unsqueeze(1)
self.A_log = nn.Parameter(
torch.empty(1, 1, self.local_num_heads, 1, dtype=torch.float32)
)
set_weight_attrs(self.A_log, {"weight_loader": sharded_weight_loader(2)})
self.g_a_proj = ReplicatedLinear(
self.hidden_size,
self.head_dim,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.g_a_proj",
)
self.g_b_proj = ColumnParallelLinear(
self.head_dim,
projection_size,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.g_b_proj",
)
self.o_norm = FusedRMSNormGated(
self.head_dim, eps=rms_norm_eps, activation="sigmoid"
)
self.o_proj = RowParallelLinear(
projection_size,
self.hidden_size,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.o_proj",
)
compilation_config = get_current_vllm_config().compilation_config
if prefix in compilation_config.static_forward_context:
raise ValueError(f"Duplicate layer name: {prefix}")
compilation_config.static_forward_context[prefix] = self
def forward(
self,
hidden_states: torch.Tensor,
positions: torch.Tensor,
output: torch.Tensor,
) -> None:
num_tokens = hidden_states.size(0)
q = self.q_proj(hidden_states)[0]
k = self.k_proj(hidden_states)[0]
v = self.v_proj(hidden_states)[0]
beta = self.b_proj(hidden_states)[0].float().sigmoid()
g1 = self.f_b_proj(self.f_a_proj(hidden_states)[0])[0]
g1 = fused_kda_gate(g1, self.A_log, self.head_dim, g_bias=self.dt_bias)
beta = beta.unsqueeze(0)
g1 = g1.unsqueeze(0)
g_proj_states = self.g_b_proj(self.g_a_proj(hidden_states)[0])[0]
g2 = rearrange(g_proj_states, "... (h d) -> ... h d", d=self.head_dim)
core_attn_out = torch.zeros(
(1, num_tokens, self.local_num_heads, self.head_dim),
dtype=hidden_states.dtype,
device=hidden_states.device,
)
torch.ops.vllm.kda_attention(
q,
k,
v,
g1,
beta,
core_attn_out,
self.prefix,
)
core_attn_out = self.o_norm(core_attn_out, g2)
core_attn_out = rearrange(core_attn_out, "1 n h d -> n (h d)")
output[:] = self.o_proj(core_attn_out)[0]
def _forward(
self,
q_proj_states: torch.Tensor,
k_proj_states: torch.Tensor,
v_proj_states: torch.Tensor,
g1: torch.Tensor,
beta: torch.Tensor,
core_attn_out: torch.Tensor,
) -> None:
forward_context = get_forward_context()
attn_metadata: AttentionMetadata = forward_context.attn_metadata
if attn_metadata is None:
# # V1 profile run
return
assert isinstance(attn_metadata, dict)
attn_metadata = attn_metadata[self.prefix]
assert isinstance(attn_metadata, GDNAttentionMetadata)
has_initial_state = attn_metadata.has_initial_state
non_spec_query_start_loc = attn_metadata.non_spec_query_start_loc
non_spec_state_indices_tensor = attn_metadata.non_spec_state_indices_tensor # noqa: E501
num_actual_tokens = attn_metadata.num_actual_tokens
constant_caches = self.kv_cache[forward_context.virtual_engine]
q_proj_states = q_proj_states[:num_actual_tokens]
k_proj_states = k_proj_states[:num_actual_tokens]
v_proj_states = v_proj_states[:num_actual_tokens]
g1 = g1[:num_actual_tokens]
beta = beta[:num_actual_tokens]
(conv_state_q, conv_state_k, conv_state_v, recurrent_state) = constant_caches
# deal with strides
conv_state_q = conv_state_q.transpose(-1, -2)
conv_state_k = conv_state_k.transpose(-1, -2)
conv_state_v = conv_state_v.transpose(-1, -2)
q_conv_weights = self.q_conv1d.weight.view(
self.q_conv1d.weight.size(0), self.q_conv1d.weight.size(2)
)
k_conv_weights = self.k_conv1d.weight.view(
self.k_conv1d.weight.size(0), self.k_conv1d.weight.size(2)
)
v_conv_weights = self.v_conv1d.weight.view(
self.v_conv1d.weight.size(0), self.v_conv1d.weight.size(2)
)
if attn_metadata.num_prefills > 0:
q_proj_states = q_proj_states.transpose(0, 1)
k_proj_states = k_proj_states.transpose(0, 1)
v_proj_states = v_proj_states.transpose(0, 1)
q = causal_conv1d_fn(
q_proj_states,
q_conv_weights,
self.q_conv1d.bias,
activation="silu",
conv_states=conv_state_q,
has_initial_state=has_initial_state,
cache_indices=non_spec_state_indices_tensor,
query_start_loc=non_spec_query_start_loc,
metadata=attn_metadata,
).transpose(0, 1)
k = causal_conv1d_fn(
k_proj_states,
k_conv_weights,
self.k_conv1d.bias,
activation="silu",
conv_states=conv_state_k,
has_initial_state=has_initial_state,
cache_indices=non_spec_state_indices_tensor,
query_start_loc=non_spec_query_start_loc,
metadata=attn_metadata,
).transpose(0, 1)
v = causal_conv1d_fn(
v_proj_states,
v_conv_weights,
self.v_conv1d.bias,
activation="silu",
conv_states=conv_state_v,
has_initial_state=has_initial_state,
cache_indices=non_spec_state_indices_tensor,
query_start_loc=non_spec_query_start_loc,
metadata=attn_metadata,
).transpose(0, 1)
else:
decode_conv_indices = non_spec_state_indices_tensor[
: attn_metadata.num_actual_tokens
]
q = causal_conv1d_update(
q_proj_states,
conv_state_q,
q_conv_weights,
self.q_conv1d.bias,
activation="silu",
conv_state_indices=decode_conv_indices,
validate_data=True,
)
k = causal_conv1d_update(
k_proj_states,
conv_state_k,
k_conv_weights,
self.k_conv1d.bias,
activation="silu",
conv_state_indices=decode_conv_indices,
validate_data=True,
)
v = causal_conv1d_update(
v_proj_states,
conv_state_v,
v_conv_weights,
self.v_conv1d.bias,
activation="silu",
conv_state_indices=decode_conv_indices,
validate_data=True,
)
q, k, v = map(
lambda x: rearrange(x, "n (h d) -> 1 n h d", d=self.head_dim), (q, k, v)
)
if attn_metadata.num_prefills > 0:
zero_idx = non_spec_state_indices_tensor[~has_initial_state]
recurrent_state[zero_idx] = 0
initial_state = recurrent_state[non_spec_state_indices_tensor].contiguous()
(
core_attn_out_non_spec,
last_recurrent_state,
) = chunk_kda(
q=q,
k=k,
v=v,
g=g1,
beta=beta,
initial_state=initial_state,
output_final_state=True,
use_qk_l2norm_in_kernel=True,
cu_seqlens=non_spec_query_start_loc,
)
# Init cache
recurrent_state[non_spec_state_indices_tensor] = last_recurrent_state
else:
(
core_attn_out_non_spec,
last_recurrent_state,
) = fused_recurrent_kda(
q=q,
k=k,
v=v,
g=g1,
beta=beta,
initial_state=recurrent_state,
use_qk_l2norm_in_kernel=True,
cu_seqlens=non_spec_query_start_loc[: attn_metadata.num_decodes + 1],
ssm_state_indices=non_spec_state_indices_tensor,
)
core_attn_out[0, :num_actual_tokens] = core_attn_out_non_spec[
0, :num_actual_tokens
]
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/kda.py",
"license": "Apache License 2.0",
"lines": 405,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/kimi_linear.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Iterable
import torch
from torch import nn
from vllm.compilation.decorators import support_torch_compile
from vllm.config import CacheConfig, ModelConfig, ParallelConfig, VllmConfig
from vllm.distributed import (
get_pp_group,
get_tensor_model_parallel_world_size,
tensor_model_parallel_all_reduce,
)
from vllm.logger import init_logger
from vllm.model_executor.layers.activation import SiluAndMul
from vllm.model_executor.layers.fused_moe import FusedMoE
from vllm.model_executor.layers.kda import KimiDeltaAttention
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.linear import (
ColumnParallelLinear,
MergedColumnParallelLinear,
ReplicatedLinear,
RowParallelLinear,
)
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.mamba.mamba_utils import (
MambaStateCopyFunc,
MambaStateCopyFuncCalculator,
MambaStateDtypeCalculator,
MambaStateShapeCalculator,
)
from vllm.model_executor.layers.mla import MLAModules, MultiHeadLatentAttentionWrapper
from vllm.model_executor.layers.quantization.base_config import QuantizationConfig
from vllm.model_executor.layers.vocab_parallel_embedding import (
ParallelLMHead,
VocabParallelEmbedding,
)
from vllm.model_executor.model_loader.weight_utils import (
default_weight_loader,
maybe_remap_kv_scale_name,
)
from vllm.sequence import IntermediateTensors
from vllm.transformers_utils.configs.kimi_linear import KimiLinearConfig
from .interfaces import HasInnerState, IsHybrid, MixtureOfExperts, SupportsPP
from .utils import (
PPMissingLayer,
is_pp_missing_parameter,
make_layers,
maybe_prefix,
)
logger = init_logger(__name__)
class KimiMLP(nn.Module):
def __init__(
self,
hidden_size: int,
intermediate_size: int,
hidden_act: str,
quant_config: QuantizationConfig | None = None,
reduce_results: bool = True,
prefix: str = "",
) -> None:
super().__init__()
self.gate_up_proj = MergedColumnParallelLinear(
hidden_size,
[intermediate_size] * 2,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.gate_up_proj",
)
self.down_proj = RowParallelLinear(
intermediate_size,
hidden_size,
bias=False,
quant_config=quant_config,
reduce_results=reduce_results,
prefix=f"{prefix}.down_proj",
)
if hidden_act != "silu":
raise ValueError(
f"Unsupported activation: {hidden_act}. Only silu is supported for now."
)
self.act_fn = SiluAndMul()
def forward(self, x):
gate_up, _ = self.gate_up_proj(x)
x = self.act_fn(gate_up)
x, _ = self.down_proj(x)
return x
class KimiMoE(nn.Module):
def __init__(
self,
config: KimiLinearConfig,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
layer_idx: int = 0,
):
super().__init__()
hidden_size = config.hidden_size
intermediate_size = config.intermediate_size
moe_intermediate_size = config.moe_intermediate_size
num_experts = config.num_experts
moe_renormalize = config.moe_renormalize
self.tp_size = get_tensor_model_parallel_world_size()
self.routed_scaling_factor = config.routed_scaling_factor
self.num_shared_experts = config.num_shared_experts
self.layer_idx = layer_idx
if config.hidden_act != "silu":
raise ValueError(
f"Unsupported activation: {config.hidden_act}. "
"Only silu is supported for now."
)
# Gate always runs at half / full precision for now.
self.gate = ReplicatedLinear(
hidden_size,
num_experts,
bias=False,
quant_config=None,
prefix=f"{prefix}.gate",
)
self.gate.e_score_correction_bias = nn.Parameter(torch.empty(num_experts))
self.experts = FusedMoE(
num_experts=num_experts,
top_k=config.num_experts_per_token,
hidden_size=hidden_size,
intermediate_size=moe_intermediate_size,
reduce_results=False,
renormalize=moe_renormalize,
quant_config=quant_config,
use_grouped_topk=config.use_grouped_topk,
num_expert_group=config.num_expert_group,
topk_group=config.topk_group,
prefix=f"{prefix}.experts",
scoring_func=config.moe_router_activation_func,
e_score_correction_bias=self.gate.e_score_correction_bias,
)
if self.num_shared_experts is not None:
intermediate_size = moe_intermediate_size * self.num_shared_experts
self.shared_experts = KimiMLP(
hidden_size=config.hidden_size,
intermediate_size=intermediate_size,
hidden_act=config.hidden_act,
quant_config=quant_config,
reduce_results=False,
prefix=f"{prefix}.shared_experts",
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
num_tokens, hidden_size = hidden_states.shape
hidden_states = hidden_states.view(-1, hidden_size)
if self.num_shared_experts is not None:
shared_output = self.shared_experts(hidden_states)
router_logits, _ = self.gate(hidden_states)
final_hidden_states = (
self.experts(hidden_states=hidden_states, router_logits=router_logits)
* self.routed_scaling_factor
)
if shared_output is not None:
final_hidden_states = final_hidden_states + shared_output
if self.tp_size > 1:
final_hidden_states = tensor_model_parallel_all_reduce(final_hidden_states)
return final_hidden_states.view(num_tokens, hidden_size)
class KimiMLAAttention(nn.Module):
"""
Main reference: DeepseekV2 vllm Implementation
"""
def __init__(
self,
config: KimiLinearConfig,
hidden_size: int,
num_heads: int,
qk_nope_head_dim: int,
qk_rope_head_dim: int,
v_head_dim: int,
q_lora_rank: int | None,
kv_lora_rank: int,
use_nope: bool = False,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
**kwargs,
) -> None:
super().__init__()
self.hidden_size = hidden_size
self.qk_nope_head_dim = qk_nope_head_dim
self.qk_rope_head_dim = qk_rope_head_dim
self.qk_head_dim = qk_nope_head_dim + qk_rope_head_dim
self.v_head_dim = v_head_dim
self.q_lora_rank = q_lora_rank
self.kv_lora_rank = kv_lora_rank
self.num_heads = num_heads
tp_size = get_tensor_model_parallel_world_size()
self.num_local_heads = num_heads // tp_size
self.scaling = self.qk_head_dim**-0.5
self.use_nope = use_nope
assert self.use_nope is True
assert self.q_lora_rank is None
assert num_heads % tp_size == 0
self.kv_a_proj_with_mqa = ReplicatedLinear(
self.hidden_size,
self.kv_lora_rank + self.qk_rope_head_dim,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.kv_a_proj_with_mqa",
)
self.q_proj = ColumnParallelLinear(
self.hidden_size,
self.num_heads * self.qk_head_dim,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.q_proj",
)
self.kv_a_layernorm = RMSNorm(
self.kv_lora_rank,
eps=config.rms_norm_eps,
)
self.kv_b_proj = ColumnParallelLinear(
self.kv_lora_rank,
self.num_heads * (self.qk_nope_head_dim + self.v_head_dim),
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.kv_b_proj",
)
self.o_proj = RowParallelLinear(
self.num_heads * self.v_head_dim,
self.hidden_size,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.o_proj",
)
mla_modules = MLAModules(
kv_a_layernorm=self.kv_a_layernorm,
kv_b_proj=self.kv_b_proj,
rotary_emb=None,
o_proj=self.o_proj,
fused_qkv_a_proj=None,
kv_a_proj_with_mqa=self.kv_a_proj_with_mqa,
q_a_layernorm=None,
q_b_proj=None,
q_proj=self.q_proj,
indexer=None,
is_sparse=False,
topk_indices_buffer=None,
)
self.mla_attn = MultiHeadLatentAttentionWrapper(
self.hidden_size,
self.num_local_heads,
self.scaling,
self.qk_nope_head_dim,
self.qk_rope_head_dim,
self.v_head_dim,
self.q_lora_rank,
self.kv_lora_rank,
mla_modules,
cache_config,
quant_config,
prefix,
)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
output: torch.Tensor,
) -> None:
output[:] = self.mla_attn(positions, hidden_states)
class KimiDecoderLayer(nn.Module):
def __init__(
self,
config: KimiLinearConfig,
layer_idx: int,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
parallel_config: ParallelConfig | None = None,
model_config: ModelConfig | None = None,
prefix: str = "",
**kwargs,
) -> None:
super().__init__()
self.hidden_size = config.hidden_size
self.is_moe = config.is_moe
if config.is_kda_layer(layer_idx):
self.self_attn = KimiDeltaAttention(
layer_idx=layer_idx,
hidden_size=config.hidden_size,
quant_config=quant_config,
cache_config=cache_config,
model_config=config,
prefix=f"{prefix}.self_attn",
)
else:
self.self_attn = KimiMLAAttention(
layer_idx=layer_idx,
hidden_size=self.hidden_size,
num_heads=config.num_attention_heads,
quant_config=quant_config,
cache_config=cache_config,
model_config=model_config,
prefix=f"{prefix}.self_attn",
config=config,
qk_nope_head_dim=config.qk_nope_head_dim,
qk_rope_head_dim=config.qk_rope_head_dim,
v_head_dim=config.v_head_dim,
q_lora_rank=config.q_lora_rank,
kv_lora_rank=config.kv_lora_rank,
use_nope=config.mla_use_nope,
)
if (
self.is_moe
and config.num_experts is not None
and layer_idx >= config.first_k_dense_replace
and layer_idx % config.moe_layer_freq == 0
):
self.block_sparse_moe = KimiMoE(
config=config,
quant_config=quant_config,
prefix=f"{prefix}.block_sparse_moe",
)
self.mlp = self.block_sparse_moe
else:
self.mlp = KimiMLP(
hidden_size=self.hidden_size,
intermediate_size=config.intermediate_size,
hidden_act=config.hidden_act,
quant_config=quant_config,
prefix=f"{prefix}.mlp",
)
self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = RMSNorm(
config.hidden_size, eps=config.rms_norm_eps
)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
residual: torch.Tensor | None,
**kwargs,
) -> tuple[torch.Tensor, torch.Tensor]:
# Self Attention
if residual is None:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
else:
hidden_states, residual = self.input_layernorm(hidden_states, residual)
attn_output = torch.empty_like(hidden_states)
self.self_attn(
hidden_states=hidden_states,
positions=positions,
output=attn_output,
)
hidden_states = attn_output
# Fully Connected
hidden_states, residual = self.post_attention_layernorm(hidden_states, residual)
hidden_states = self.mlp(hidden_states)
return hidden_states, residual
@support_torch_compile
class KimiLinearModel(nn.Module):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_text_config
model_config = vllm_config.model_config
cache_config = vllm_config.cache_config
quant_config = vllm_config.quant_config
parallel_config = vllm_config.parallel_config
self.config = config
self.vocab_size = config.vocab_size
if get_pp_group().is_first_rank:
self.embed_tokens = VocabParallelEmbedding(
config.vocab_size,
config.hidden_size,
prefix=f"{prefix}.embed_tokens",
)
else:
self.embed_tokens = PPMissingLayer()
extra_kwargs = {}
def get_layer(prefix: str):
layer_idx = int(prefix.rsplit(".", 1)[1])
return KimiDecoderLayer(
config,
layer_idx,
cache_config,
quant_config,
parallel_config,
model_config,
prefix,
**extra_kwargs,
)
self.start_layer, self.end_layer, self.layers = make_layers(
config.num_hidden_layers,
get_layer,
prefix=f"{prefix}.layers",
)
if get_pp_group().is_last_rank:
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
else:
self.norm = PPMissingLayer()
world_size = get_tensor_model_parallel_world_size()
assert config.num_attention_heads % world_size == 0, (
"num_attention_heads must be divisible by world_size"
)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.embed_tokens(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None,
inputs_embeds: torch.Tensor | None = None,
**kwargs,
) -> torch.Tensor:
if get_pp_group().is_first_rank:
if inputs_embeds is not None:
hidden_states = inputs_embeds
else:
hidden_states = self.embed_input_ids(input_ids)
residual = None
else:
assert intermediate_tensors is not None
hidden_states = intermediate_tensors["hidden_states"]
residual = intermediate_tensors["residual"]
for _, layer in enumerate(self.layers[self.start_layer : self.end_layer]):
hidden_states, residual = layer(
positions=positions,
hidden_states=hidden_states,
residual=residual,
)
if not get_pp_group().is_last_rank:
return IntermediateTensors(
{"hidden_states": hidden_states, "residual": residual}
)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states
class KimiLinearForCausalLM(
nn.Module, HasInnerState, SupportsPP, MixtureOfExperts, IsHybrid
):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
self.model_config = vllm_config.model_config
self.vllm_config = vllm_config
self.config = self.model_config.hf_config
quant_config = vllm_config.quant_config
self.quant_config = quant_config
self.model = KimiLinearModel(
vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")
)
if get_pp_group().is_last_rank:
self.lm_head = ParallelLMHead(
self.config.vocab_size,
self.config.hidden_size,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "lm_head"),
)
else:
self.lm_head = PPMissingLayer()
logit_scale = getattr(self.config, "logit_scale", 1.0)
self.logits_processor = LogitsProcessor(
self.config.vocab_size, scale=logit_scale
)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.model.embed_input_ids(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
**kwargs,
) -> torch.Tensor | IntermediateTensors:
hidden_states = self.model(
input_ids, positions, intermediate_tensors, inputs_embeds, **kwargs
)
return hidden_states
@classmethod
def get_mamba_state_dtype_from_config(
cls,
vllm_config: "VllmConfig",
) -> tuple[torch.dtype, torch.dtype, torch.dtype, torch.dtype]:
return MambaStateDtypeCalculator.kda_state_dtype(
vllm_config.model_config.dtype, vllm_config.cache_config.mamba_cache_dtype
)
@classmethod
def get_mamba_state_shape_from_config(
cls, vllm_config: "VllmConfig"
) -> tuple[tuple[int, ...], tuple[int, ...], tuple[int, ...], tuple[int, ...]]:
parallel_config = vllm_config.parallel_config
hf_config = vllm_config.model_config.hf_config
tp_size = parallel_config.tensor_parallel_size
num_spec = (
vllm_config.speculative_config.num_speculative_tokens
if vllm_config.speculative_config
else 0
)
return MambaStateShapeCalculator.kda_state_shape(
tp_size,
hf_config.linear_attn_config["num_heads"],
hf_config.linear_attn_config["head_dim"],
conv_kernel_size=hf_config.linear_attn_config["short_conv_kernel_size"],
num_spec=num_spec,
)
@classmethod
def get_mamba_state_copy_func(
cls,
) -> tuple[
MambaStateCopyFunc, MambaStateCopyFunc, MambaStateCopyFunc, MambaStateCopyFunc
]:
return MambaStateCopyFuncCalculator.kda_state_copy_func()
def compute_logits(
self,
hidden_states: torch.Tensor,
) -> torch.Tensor | None:
return self.logits_processor(self.lm_head, hidden_states)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]):
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
(".gate_up_proj", ".gate_proj", 0),
(".gate_up_proj", ".up_proj", 1),
]
if self.config.is_moe:
# Params for weights, fp8 weight scales, fp8 activation scales
# (param_name, weight_name, expert_id, shard_id)
expert_params_mapping = FusedMoE.make_expert_params_mapping(
self,
ckpt_gate_proj_name="w1",
ckpt_down_proj_name="w2",
ckpt_up_proj_name="w3",
num_experts=self.config.num_experts,
)
else:
expert_params_mapping = []
params_dict = dict(self.named_parameters())
loaded_params: set[str] = set()
for args in weights:
name, loaded_weight = args[:2]
kwargs = args[2] if len(args) > 2 else {}
if "rotary_emb.inv_freq" in name:
continue
spec_layer = get_spec_layer_idx_from_weight_name(self.config, name)
if spec_layer is not None:
continue # skip spec decode layers for main model
if "rotary_emb.cos_cached" in name or "rotary_emb.sin_cached" in name:
# Models trained using ColossalAI may include these tensors in
# the checkpoint. Skip them.
continue
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in name:
continue
# We have mlp.experts[0].gate_proj in the checkpoint.
# Since we handle the experts below in expert_params_mapping,
# we need to skip here BEFORE we update the name, otherwise
# name will be updated to mlp.experts[0].gate_up_proj, which
# will then be updated below in expert_params_mapping
# for mlp.experts[0].gate_gate_up_proj, which breaks load.
if ("mlp.experts." in name) and name not in params_dict:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
if is_pp_missing_parameter(name, self):
continue
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
break
else:
for idx, (param_name, weight_name, expert_id, shard_id) in enumerate(
expert_params_mapping
):
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
if is_pp_missing_parameter(name, self):
continue
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(
param,
loaded_weight,
name,
expert_id=expert_id,
shard_id=shard_id,
)
break
else:
# Skip loading extra bias for GPTQ models.
if (
name.endswith(".bias")
and name not in params_dict
and not self.config.is_linear_attn
): # noqa: E501
continue
# Remapping the name of FP8 kv-scale.
name = maybe_remap_kv_scale_name(name, params_dict)
if name is None:
continue
if is_pp_missing_parameter(name, self):
continue
param = params_dict[name]
weight_loader = getattr(
param, "weight_loader", default_weight_loader
)
weight_loader(param, loaded_weight, **kwargs)
loaded_params.add(name)
def get_spec_layer_idx_from_weight_name(
config: KimiLinearConfig, weight_name: str
) -> int | None:
if hasattr(config, "num_nextn_predict_layers") and (
config.num_nextn_predict_layers > 0
):
layer_idx = config.num_hidden_layers
for i in range(config.num_nextn_predict_layers):
if weight_name.startswith(f"model.layers.{layer_idx + i}."):
return layer_idx + i
return None
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/kimi_linear.py",
"license": "Apache License 2.0",
"lines": 609,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/transformers_utils/configs/kimi_linear.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from transformers.configuration_utils import PretrainedConfig
from vllm.logger import init_logger
logger = init_logger(__name__)
class KimiLinearConfig(PretrainedConfig):
model_type = "kimi_linear"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
model_type="kimi_linear",
vocab_size=163840,
hidden_size=4096,
head_dim=None,
intermediate_size=11008,
num_hidden_layers=32,
num_attention_heads=32,
num_key_value_heads=None,
hidden_act="silu",
initializer_range=0.02,
rms_norm_eps=1e-6,
use_cache=True,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
rope_parameters=None,
tie_word_embeddings=False,
moe_intermediate_size: int | None = None,
moe_renormalize: bool = True,
moe_router_activation_func: str = "sigmoid",
num_experts: int | None = None,
num_experts_per_token: int | None = None,
num_shared_experts: int = 0,
routed_scaling_factor: float = 1.0,
first_k_dense_replace: int = 0,
moe_layer_freq: int = 1,
use_grouped_topk: bool = True,
num_expert_group: int = 1,
topk_group: int = 1,
q_lora_rank: int | None = None,
kv_lora_rank: int | None = None,
qk_nope_head_dim: int | None = None,
qk_rope_head_dim: int | None = None,
v_head_dim: int | None = None,
mla_use_nope: bool | None = False,
num_nextn_predict_layers: int = 0,
linear_attn_config: dict | None = None,
**kwargs,
):
self.model_type = model_type
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.head_dim = (
head_dim if head_dim is not None else hidden_size // num_attention_heads
)
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
# Try to set `rope_scaling` if available, otherwise use `rope_parameters`
rope_scaling = kwargs.pop("rope_scaling", None)
rope_parameters = rope_scaling or rope_parameters or {"rope_type": "default"}
rope_theta = kwargs.pop("rope_theta", 10000.0)
if "rope_theta" not in rope_parameters:
rope_parameters["rope_theta"] = rope_theta
self.rope_parameters = rope_parameters
self.q_lora_rank = q_lora_rank
self.kv_lora_rank = kv_lora_rank
self.qk_nope_head_dim = qk_nope_head_dim
self.qk_rope_head_dim = qk_rope_head_dim
self.v_head_dim = v_head_dim
self.mla_use_nope = mla_use_nope
# moe config
self.num_experts = num_experts
self.num_experts_per_token = num_experts_per_token
self.moe_renormalize = moe_renormalize
self.num_shared_experts = num_shared_experts
self.routed_scaling_factor = routed_scaling_factor
self.moe_router_activation_func = moe_router_activation_func
assert self.moe_router_activation_func in ("softmax", "sigmoid")
self.moe_intermediate_size = moe_intermediate_size
self.first_k_dense_replace = first_k_dense_replace
self.moe_layer_freq = moe_layer_freq
self.use_grouped_topk = use_grouped_topk
self.num_expert_group = num_expert_group
self.topk_group = topk_group
self.num_nextn_predict_layers = num_nextn_predict_layers
if linear_attn_config is not None:
assert linear_attn_config["kda_layers"] is not None
assert linear_attn_config["full_attn_layers"] is not None
self.linear_attn_config = linear_attn_config
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
@property
def is_mla(self):
return (
self.q_lora_rank is not None
or self.kv_lora_rank is not None
or self.qk_nope_head_dim is not None
or self.qk_rope_head_dim is not None
or self.v_head_dim is not None
or self.mla_use_nope is True
)
@property
def is_moe(self):
return self.num_experts is not None
@property
def is_linear_attn(self) -> bool:
return not (
self.linear_attn_config is None
or (
isinstance(self.linear_attn_config, dict)
and self.linear_attn_config["kda_layers"] is not None
and len(self.linear_attn_config["kda_layers"]) == 0
)
)
def is_kda_layer(self, layer_idx: int):
return (
self.linear_attn_config is not None
and (layer_idx + 1) in self.linear_attn_config["kda_layers"]
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/transformers_utils/configs/kimi_linear.py",
"license": "Apache License 2.0",
"lines": 133,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/benchmarks/sweep/cli.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import argparse
from vllm.entrypoints.utils import VLLM_SUBCMD_PARSER_EPILOG
from .plot import SweepPlotArgs
from .plot import main as plot_main
from .plot_pareto import SweepPlotParetoArgs
from .plot_pareto import main as plot_pareto_main
from .serve import SweepServeArgs
from .serve import main as serve_main
from .serve_workload import SweepServeWorkloadArgs
from .serve_workload import main as serve_workload_main
from .startup import SweepStartupArgs
from .startup import main as startup_main
SUBCOMMANDS = (
(SweepServeArgs, serve_main),
(SweepServeWorkloadArgs, serve_workload_main),
(SweepStartupArgs, startup_main),
(SweepPlotArgs, plot_main),
(SweepPlotParetoArgs, plot_pareto_main),
)
def add_cli_args(parser: argparse.ArgumentParser):
subparsers = parser.add_subparsers(required=True, dest="sweep_type")
for cmd, entrypoint in SUBCOMMANDS:
cmd_subparser = subparsers.add_parser(
cmd.parser_name,
description=cmd.parser_help,
usage=f"vllm bench sweep {cmd.parser_name} [options]",
)
cmd_subparser.set_defaults(dispatch_function=entrypoint)
cmd.add_cli_args(cmd_subparser)
cmd_subparser.epilog = VLLM_SUBCMD_PARSER_EPILOG.format(
subcmd=f"sweep {cmd.parser_name}"
)
def main(args: argparse.Namespace):
args.dispatch_function(args)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/benchmarks/sweep/cli.py",
"license": "Apache License 2.0",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/entrypoints/cli/benchmark/sweep.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import argparse
from vllm.benchmarks.sweep.cli import add_cli_args, main
from vllm.entrypoints.cli.benchmark.base import BenchmarkSubcommandBase
class BenchmarkSweepSubcommand(BenchmarkSubcommandBase):
"""The `sweep` subcommand for `vllm bench`."""
name = "sweep"
help = "Benchmark for a parameter sweep."
@classmethod
def add_cli_args(cls, parser: argparse.ArgumentParser) -> None:
add_cli_args(parser)
@staticmethod
def cmd(args: argparse.Namespace) -> None:
main(args)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/entrypoints/cli/benchmark/sweep.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/benchmarks/test_random_multimodal_dataset_video.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import base64
import os
from tempfile import NamedTemporaryFile
from typing import Any, cast
import cv2
import pytest
from transformers import AutoTokenizer, PreTrainedTokenizerBase
from vllm.benchmarks.datasets import RandomMultiModalDataset, SampleRequest
@pytest.fixture(scope="session")
def hf_tokenizer() -> PreTrainedTokenizerBase:
"""Use a small, commonly available tokenizer."""
return AutoTokenizer.from_pretrained("gpt2")
@pytest.fixture
def video_dataset() -> RandomMultiModalDataset:
"""Create a RandomMultiModalDataset instance for testing."""
return RandomMultiModalDataset(random_seed=42)
@pytest.mark.benchmark
def test_generate_synthetic_video_different_seeds():
"""Test that different seeds produce different videos."""
dataset1 = RandomMultiModalDataset(random_seed=123)
dataset2 = RandomMultiModalDataset(random_seed=456)
width, height, num_frames = 64, 48, 8
video1 = dataset1.generate_synthetic_video(width, height, num_frames)
video2 = dataset2.generate_synthetic_video(width, height, num_frames)
# Videos should be different due to different seeds
assert video1["bytes"] != video2["bytes"]
@pytest.mark.benchmark
def test_map_config_to_modality(video_dataset: RandomMultiModalDataset):
"""Test modality mapping for different configurations."""
# Test image configuration (num_frames = 1)
assert video_dataset.map_config_to_modality((256, 256, 1)) == "image"
assert video_dataset.map_config_to_modality((720, 1280, 1)) == "image"
# Test video configurations (num_frames > 1)
assert video_dataset.map_config_to_modality((256, 256, 8)) == "video"
assert video_dataset.map_config_to_modality((720, 1280, 16)) == "video"
assert video_dataset.map_config_to_modality((64, 64, 32)) == "video"
# Test invalid configurations
with pytest.raises(ValueError, match="Invalid multimodal item configuration"):
video_dataset.map_config_to_modality((256, 256, 0))
with pytest.raises(ValueError, match="Invalid multimodal item configuration"):
video_dataset.map_config_to_modality((256, 256, -1))
@pytest.mark.benchmark
def test_generate_mm_item_video(video_dataset: RandomMultiModalDataset):
"""Test generating multimodal items for video configurations."""
# Test video item generation
video_config = (64, 48, 8) # height, width, num_frames
result = video_dataset.generate_mm_item(video_config)
# Check the result structure matches OpenAI API format
assert isinstance(result, dict)
assert result["type"] == "video_url"
assert "video_url" in result
assert "url" in result["video_url"]
# Check that the URL is a data URL with base64 encoded video
url = result["video_url"]["url"]
assert url.startswith("data:video/mp4;base64,")
# Decode and verify the video content
base64_data = url.split(",")[1]
video_bytes = base64.b64decode(base64_data)
assert len(video_bytes) > 0
# Verify the video can be decoded
with NamedTemporaryFile(suffix=".mp4", delete=False) as temp_file:
temp_path = temp_file.name
temp_file.write(video_bytes)
try:
cap = cv2.VideoCapture(temp_path)
assert cap.isOpened()
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
assert frame_count == 8
assert frame_width == 48
assert frame_height == 64
cap.release()
finally:
if os.path.exists(temp_path):
os.unlink(temp_path)
@pytest.mark.benchmark
def test_generate_mm_item_image(video_dataset: RandomMultiModalDataset):
"""Test generating multimodal items for image configurations."""
# Test image item generation
image_config = (64, 48, 1) # height, width, num_frames=1
result = video_dataset.generate_mm_item(image_config)
# Check the result structure matches OpenAI API format
assert isinstance(result, dict)
assert result["type"] == "image_url"
assert "image_url" in result
assert "url" in result["image_url"]
# Check that the URL is a data URL with base64 encoded image
url = result["image_url"]["url"]
assert url.startswith("data:image/jpeg;base64,")
@pytest.mark.benchmark
def test_generate_mm_item_invalid_config(video_dataset: RandomMultiModalDataset):
"""Test error handling for invalid configurations."""
with pytest.raises(ValueError, match="Invalid multimodal item configuration"):
video_dataset.generate_mm_item((256, 256, 0))
@pytest.mark.benchmark
def test_sample_with_video_buckets(
video_dataset: RandomMultiModalDataset, hf_tokenizer: PreTrainedTokenizerBase
):
"""Test sampling with video bucket configurations."""
# Configure bucket with video probability > 0
bucket_config = {
(64, 64, 1): 0.3, # Images
(64, 64, 8): 0.7, # Videos
}
limit_mm_per_prompt = {"image": 5, "video": 3}
samples = video_dataset.sample(
tokenizer=hf_tokenizer,
num_requests=5,
base_items_per_request=2,
num_mm_items_range_ratio=0.0,
limit_mm_per_prompt=limit_mm_per_prompt,
bucket_config=bucket_config,
input_len=20,
output_len=5,
)
assert len(samples) == 5
# Check that samples contain both images and videos
video_count = 0
image_count = 0
for sample in samples:
assert isinstance(sample, SampleRequest)
assert sample.multi_modal_data is not None
assert isinstance(sample.multi_modal_data, list)
mm_data = cast(list[dict[str, Any]], sample.multi_modal_data)
assert len(mm_data) == 2 # base_items_per_request
for item in mm_data:
if item["type"] == "video_url":
video_count += 1
# Verify video URL format
url = item["video_url"]["url"]
assert url.startswith("data:video/mp4;base64,")
elif item["type"] == "image_url":
image_count += 1
# Verify image URL format
url = item["image_url"]["url"]
assert url.startswith("data:image/jpeg;base64,")
# Should have some videos due to 0.7 probability
assert video_count > 0
assert image_count > 0
@pytest.mark.benchmark
def test_sample_video_only_buckets(
video_dataset: RandomMultiModalDataset, hf_tokenizer: PreTrainedTokenizerBase
):
"""Test sampling with only video buckets."""
bucket_config = {
(64, 64, 8): 1.0, # Only videos
}
limit_mm_per_prompt = {"image": 0, "video": 2}
samples = video_dataset.sample(
tokenizer=hf_tokenizer,
num_requests=3,
base_items_per_request=1,
num_mm_items_range_ratio=0.0,
limit_mm_per_prompt=limit_mm_per_prompt,
bucket_config=bucket_config,
input_len=20,
output_len=5,
)
assert len(samples) == 3
for sample in samples:
assert isinstance(sample, SampleRequest)
assert sample.multi_modal_data is not None
assert isinstance(sample.multi_modal_data, list)
mm_data = cast(list[dict[str, Any]], sample.multi_modal_data)
assert len(mm_data) == 1
item = mm_data[0]
assert item["type"] == "video_url"
url = item["video_url"]["url"]
assert url.startswith("data:video/mp4;base64,")
@pytest.mark.benchmark
def test_sample_respects_video_limits(
video_dataset: RandomMultiModalDataset, hf_tokenizer: PreTrainedTokenizerBase
):
"""Test that sampling respects video limits per prompt."""
bucket_config = {
(64, 64, 8): 1.0, # Only videos
}
# Set very low video limit
limit_mm_per_prompt = {"image": 0, "video": 1}
samples = video_dataset.sample(
tokenizer=hf_tokenizer,
num_requests=3,
base_items_per_request=1,
num_mm_items_range_ratio=0.0,
limit_mm_per_prompt=limit_mm_per_prompt,
bucket_config=bucket_config,
input_len=20,
output_len=5,
)
assert len(samples) == 3
for sample in samples:
mm_data = cast(list[dict[str, Any]], sample.multi_modal_data)
assert len(mm_data) <= 1 # Should respect video limit
@pytest.mark.benchmark
def test_sample_mixed_buckets_with_zero_probability(
video_dataset: RandomMultiModalDataset, hf_tokenizer: PreTrainedTokenizerBase
):
"""Test sampling with mixed buckets including zero probability entries."""
bucket_config = {
(64, 64, 1): 0.5, # Images
(64, 64, 8): 0.5, # Videos
(128, 128, 16): 0.0, # Zero probability videos (should be ignored)
}
limit_mm_per_prompt = {"image": 2, "video": 2}
samples = video_dataset.sample(
tokenizer=hf_tokenizer,
num_requests=4,
base_items_per_request=2,
num_mm_items_range_ratio=0.0,
limit_mm_per_prompt=limit_mm_per_prompt,
bucket_config=bucket_config,
input_len=20,
output_len=5,
)
assert len(samples) == 4
# Should only see 64x64 videos, not 128x128 videos
for sample in samples:
mm_data = cast(list[dict[str, Any]], sample.multi_modal_data)
for item in mm_data:
if item["type"] == "video_url":
# Decode video to verify dimensions
url = item["video_url"]["url"]
base64_data = url.split(",")[1]
video_bytes = base64.b64decode(base64_data)
with NamedTemporaryFile(suffix=".mp4", delete=False) as temp_file: # noqa
temp_path = temp_file.name
temp_file.write(video_bytes)
try:
cap = cv2.VideoCapture(temp_path)
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
cap.release()
# Should be 64x64, not 128x128
assert frame_width == 64
assert frame_height == 64
finally:
if os.path.exists(temp_path):
os.unlink(temp_path)
@pytest.mark.benchmark
def test_sample_deterministic_with_videos(hf_tokenizer: PreTrainedTokenizerBase):
"""Test that sampling with videos is deterministic with same seed."""
dataset1 = RandomMultiModalDataset(random_seed=123)
dataset2 = RandomMultiModalDataset(random_seed=123)
bucket_config = {
(64, 64, 1): 0.3, # Images
(64, 64, 8): 0.7, # Videos
}
limit_mm_per_prompt = {"image": 2, "video": 2}
samples1 = dataset1.sample(
tokenizer=hf_tokenizer,
num_requests=3,
base_items_per_request=1,
num_mm_items_range_ratio=0.0,
limit_mm_per_prompt=limit_mm_per_prompt,
bucket_config=bucket_config,
input_len=20,
output_len=5,
)
samples2 = dataset2.sample(
tokenizer=hf_tokenizer,
num_requests=3,
base_items_per_request=1,
num_mm_items_range_ratio=0.0,
limit_mm_per_prompt=limit_mm_per_prompt,
bucket_config=bucket_config,
input_len=20,
output_len=5,
)
assert len(samples1) == len(samples2)
# Compare multimodal data
for s1, s2 in zip(samples1, samples2):
assert s1.multi_modal_data == s2.multi_modal_data
@pytest.mark.benchmark
def test_sample_different_seeds_produce_different_videos(
hf_tokenizer: PreTrainedTokenizerBase,
):
"""Test that different seeds produce different video content."""
dataset1 = RandomMultiModalDataset(random_seed=123)
dataset2 = RandomMultiModalDataset(random_seed=456)
bucket_config = {
(64, 64, 8): 1.0, # Only videos
}
limit_mm_per_prompt = {"image": 0, "video": 1}
samples1 = dataset1.sample(
tokenizer=hf_tokenizer,
num_requests=2,
base_items_per_request=1,
num_mm_items_range_ratio=0.0,
limit_mm_per_prompt=limit_mm_per_prompt,
bucket_config=bucket_config,
input_len=20,
output_len=5,
)
samples2 = dataset2.sample(
tokenizer=hf_tokenizer,
num_requests=2,
base_items_per_request=1,
num_mm_items_range_ratio=0.0,
limit_mm_per_prompt=limit_mm_per_prompt,
bucket_config=bucket_config,
input_len=20,
output_len=5,
)
# Video content should be different
for s1, s2 in zip(samples1, samples2):
mm_data1 = cast(list[dict[str, Any]], s1.multi_modal_data)
mm_data2 = cast(list[dict[str, Any]], s2.multi_modal_data)
assert len(mm_data1) == len(mm_data2) == 1
url1 = mm_data1[0]["video_url"]["url"]
url2 = mm_data2[0]["video_url"]["url"]
assert url1 != url2 # Different video content
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/benchmarks/test_random_multimodal_dataset_video.py",
"license": "Apache License 2.0",
"lines": 314,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/v1/attention/test_batch_reordering.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from dataclasses import dataclass
import numpy as np
import pytest
from vllm.v1.attention.backends.utils import reorder_batch_to_split_decodes_and_prefills
class MockInputBatch:
def __init__(self, req_ids, num_computed_tokens_cpu):
self.req_ids = req_ids
self.num_computed_tokens_cpu = num_computed_tokens_cpu
def swap_states(self, i, j):
self.req_ids[i], self.req_ids[j] = self.req_ids[j], self.req_ids[i]
self.num_computed_tokens_cpu[i], self.num_computed_tokens_cpu[j] = (
self.num_computed_tokens_cpu[j],
self.num_computed_tokens_cpu[i],
)
class MockSchedulerOutput:
def __init__(self, num_scheduled_tokens):
self.num_scheduled_tokens = num_scheduled_tokens
@dataclass
class ReorderTestCase:
requests: list[tuple[int, int]] # (num_scheduled_tokens, num_computed_tokens)
expected_order: list[int]
expected_modified: bool
decode_threshold: int = 1
# Test cases for batch reordering
REORDER_TEST_CASES = {
"all_decodes": ReorderTestCase(
requests=[(1, 10), (1, 20), (1, 30)],
expected_order=[0, 1, 2],
expected_modified=False,
),
"all_prefills": ReorderTestCase(
requests=[(100, 100), (200, 200), (300, 300)],
expected_order=[0, 1, 2],
expected_modified=False,
),
"mixed_interleaved": ReorderTestCase(
requests=[(100, 100), (1, 10), (200, 200), (1, 20)],
expected_order=[3, 1, 2, 0], # Only swap 0↔3, keep 1 and 2 in place
expected_modified=True,
),
"already_ordered": ReorderTestCase(
requests=[(1, 10), (1, 20), (100, 100), (200, 0)],
expected_order=[0, 1, 2, 3],
expected_modified=False,
),
"single_request": ReorderTestCase(
requests=[(1, 10)],
expected_order=[0],
expected_modified=False,
),
"higher_threshold": ReorderTestCase(
requests=[(2, 10), (3, 20), (5, 30), (6, 40)],
expected_order=[0, 1, 2, 3],
expected_modified=False,
decode_threshold=4,
),
"decodes_at_end": ReorderTestCase(
requests=[(100, 100), (200, 200), (1, 10), (1, 20)],
expected_order=[2, 3, 0, 1],
expected_modified=True,
),
"decode_extend_prefill": ReorderTestCase(
requests=[(100, 0), (10, 50), (1, 10)],
expected_order=[2, 1, 0],
expected_modified=True,
),
"extend_prefill_only": ReorderTestCase(
requests=[(100, 0), (10, 50), (200, 0), (20, 75)],
expected_order=[3, 1, 2, 0], # Only swap 0↔3, keep 1 and 2 in place
expected_modified=True,
),
"complicated_mixed_interleaved": ReorderTestCase(
requests=[
(1, 20),
(1, 50),
(374, 0),
(300, 20),
(1, 20),
(256, 0),
(1, 5),
(27, 0),
(1, 4),
],
expected_order=[0, 1, 6, 8, 4, 3, 2, 7, 5],
expected_modified=True,
),
"new_request_single_token_prefill": ReorderTestCase(
requests=[
(100, 0),
(1, 0), # New request with only 1 token (STILL prefill)
(50, 100),
(1, 10),
],
# Only index 3 is a true decode (has num_computed_tokens > 0)
expected_order=[3, 2, 0, 1],
expected_modified=True,
),
"multiple_new_requests_single_token_prefill": ReorderTestCase(
requests=[
(1, 0), # New prefill (1 token, no computed)
(1, 0), # New prefill (1 token, no computed)
(1, 50),
(200, 0),
],
expected_order=[2, 1, 0, 3],
expected_modified=True,
),
}
@pytest.mark.parametrize(
"test_case", REORDER_TEST_CASES.values(), ids=REORDER_TEST_CASES.keys()
)
def test_reorder_batch_to_split_decodes_and_prefills(test_case: ReorderTestCase):
req_ids = [f"r{i}" for i in range(len(test_case.requests))]
num_computed_tokens = np.array([r[1] for r in test_case.requests], dtype=np.int32)
num_scheduled_tokens = {f"r{i}": r[0] for i, r in enumerate(test_case.requests)}
input_batch = MockInputBatch(req_ids, num_computed_tokens)
scheduler_output = MockSchedulerOutput(num_scheduled_tokens)
modified = reorder_batch_to_split_decodes_and_prefills(
input_batch, scheduler_output, decode_threshold=test_case.decode_threshold
)
expected_req_ids = [f"r{i}" for i in test_case.expected_order]
assert modified == test_case.expected_modified, (
f"Expected modified={test_case.expected_modified}, got {modified}"
)
assert input_batch.req_ids == expected_req_ids, (
f"Expected order {expected_req_ids}, got {input_batch.req_ids}"
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/attention/test_batch_reordering.py",
"license": "Apache License 2.0",
"lines": 129,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_executor/layers/fla/ops/kda.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# SPDX-FileCopyrightText: Songlin Yang, Yu Zhang
#
# This file contains code copied from the flash-linear-attention project.
# The original source code was licensed under the MIT license and included
# the following copyright notice:
# Copyright (c) 2023-2025, Songlin Yang, Yu Zhang
# ruff: noqa: E501
import torch
import torch.nn as nn
from vllm.triton_utils import tl, triton
from vllm.utils.math_utils import cdiv, next_power_of_2
from .chunk_delta_h import chunk_gated_delta_rule_fwd_h
from .cumsum import chunk_local_cumsum
from .fused_recurrent import fused_recurrent_gated_delta_rule_fwd_kernel
from .index import prepare_chunk_indices
from .l2norm import l2norm_fwd
from .op import exp, log
from .solve_tril import solve_tril
from .utils import is_amd
BT_LIST_AUTOTUNE = [32, 64, 128]
NUM_WARPS_AUTOTUNE = [2, 4, 8, 16] if is_amd else [4, 8, 16, 32]
def fused_recurrent_kda_fwd(
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
g: torch.Tensor,
beta: torch.Tensor,
scale: float,
initial_state: torch.Tensor,
inplace_final_state: bool = True,
cu_seqlens: torch.LongTensor | None = None,
ssm_state_indices: torch.Tensor | None = None,
num_accepted_tokens: torch.Tensor | None = None,
use_qk_l2norm_in_kernel: bool = False,
) -> tuple[torch.Tensor, torch.Tensor]:
B, T, H, K, V = *k.shape, v.shape[-1]
HV = v.shape[2]
N = B if cu_seqlens is None else len(cu_seqlens) - 1
BK, BV = next_power_of_2(K), min(next_power_of_2(V), 8)
NK, NV = cdiv(K, BK), cdiv(V, BV)
assert NK == 1, "NK > 1 is not supported yet"
num_stages = 3
num_warps = 1
o = torch.empty_like(k)
if inplace_final_state:
final_state = initial_state
else:
final_state = q.new_empty(T, HV, V, K, dtype=initial_state.dtype)
stride_init_state_token = initial_state.stride(0)
stride_final_state_token = final_state.stride(0)
if ssm_state_indices is None:
stride_indices_seq, stride_indices_tok = 1, 1
elif ssm_state_indices.ndim == 1:
stride_indices_seq, stride_indices_tok = ssm_state_indices.stride(0), 1
else:
stride_indices_seq, stride_indices_tok = ssm_state_indices.stride()
grid = (NK, NV, N * HV)
fused_recurrent_gated_delta_rule_fwd_kernel[grid](
q=q,
k=k,
v=v,
g=g,
beta=beta,
o=o,
h0=initial_state,
ht=final_state,
cu_seqlens=cu_seqlens,
ssm_state_indices=ssm_state_indices,
num_accepted_tokens=num_accepted_tokens,
scale=scale,
N=N,
T=T,
B=B,
H=H,
HV=HV,
K=K,
V=V,
BK=BK,
BV=BV,
stride_init_state_token=stride_init_state_token,
stride_final_state_token=stride_final_state_token,
stride_indices_seq=stride_indices_seq,
stride_indices_tok=stride_indices_tok,
IS_BETA_HEADWISE=beta.ndim == v.ndim,
USE_QK_L2NORM_IN_KERNEL=use_qk_l2norm_in_kernel,
INPLACE_FINAL_STATE=inplace_final_state,
IS_KDA=True,
num_warps=num_warps,
num_stages=num_stages,
)
return o, final_state
def fused_recurrent_kda(
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
g: torch.Tensor,
beta: torch.Tensor = None,
scale: float = None,
initial_state: torch.Tensor = None,
inplace_final_state: bool = True,
use_qk_l2norm_in_kernel: bool = True,
cu_seqlens: torch.LongTensor | None = None,
ssm_state_indices: torch.LongTensor | None = None,
**kwargs,
) -> tuple[torch.Tensor, torch.Tensor]:
if cu_seqlens is not None and q.shape[0] != 1:
raise ValueError(
f"The batch size is expected to be 1 rather than {q.shape[0]} when using `cu_seqlens`."
f"Please flatten variable-length inputs before processing."
)
if scale is None:
scale = k.shape[-1] ** -0.5
o, final_state = fused_recurrent_kda_fwd(
q=q.contiguous(),
k=k.contiguous(),
v=v.contiguous(),
g=g.contiguous(),
beta=beta.contiguous(),
scale=scale,
initial_state=initial_state,
inplace_final_state=inplace_final_state,
cu_seqlens=cu_seqlens,
ssm_state_indices=ssm_state_indices,
num_accepted_tokens=None,
use_qk_l2norm_in_kernel=use_qk_l2norm_in_kernel,
)
return o, final_state
@triton.heuristics(
{
"STORE_RESIDUAL_OUT": lambda args: args["residual_out"] is not None,
"HAS_RESIDUAL": lambda args: args["residual"] is not None,
"HAS_WEIGHT": lambda args: args["w"] is not None,
"HAS_BIAS": lambda args: args["b"] is not None,
}
)
@triton.jit
def layer_norm_gated_fwd_kernel(
x, # pointer to the input
g, # pointer to the gate
y, # pointer to the output
w, # pointer to the weights
b, # pointer to the biases
residual, # pointer to the residual
residual_out, # pointer to the residual
mean, # pointer to the mean
rstd, # pointer to the 1/std
eps, # epsilon to avoid division by zero
T, # number of rows in x
D: tl.constexpr, # number of columns in x
BT: tl.constexpr,
BD: tl.constexpr,
ACTIVATION: tl.constexpr,
IS_RMS_NORM: tl.constexpr,
STORE_RESIDUAL_OUT: tl.constexpr,
HAS_RESIDUAL: tl.constexpr,
HAS_WEIGHT: tl.constexpr,
HAS_BIAS: tl.constexpr,
):
i_t = tl.program_id(0)
o_d = tl.arange(0, BD)
m_d = o_d < D
p_x = tl.make_block_ptr(x, (T, D), (D, 1), (i_t * BT, 0), (BT, BD), (1, 0))
b_x = tl.load(p_x, boundary_check=(0, 1)).to(tl.float32)
if HAS_RESIDUAL:
p_res = tl.make_block_ptr(
residual, (T, D), (D, 1), (i_t * BT, 0), (BT, BD), (1, 0)
)
b_x += tl.load(p_res, boundary_check=(0, 1)).to(tl.float32)
if STORE_RESIDUAL_OUT:
p_res_out = tl.make_block_ptr(
residual_out, (T, D), (D, 1), (i_t * BT, 0), (BT, BD), (1, 0)
)
tl.store(p_res_out, b_x.to(p_res_out.dtype.element_ty), boundary_check=(0, 1))
if not IS_RMS_NORM:
b_mean = tl.sum(b_x, axis=1) / D
p_mean = tl.make_block_ptr(mean, (T,), (1,), (i_t * BT,), (BT,), (0,))
tl.store(p_mean, b_mean.to(p_mean.dtype.element_ty), boundary_check=(0,))
b_xbar = tl.where(m_d[None, :], b_x - b_mean[:, None], 0.0)
b_var = tl.sum(b_xbar * b_xbar, axis=1) / D
else:
b_xbar = tl.where(m_d[None, :], b_x, 0.0)
b_var = tl.sum(b_xbar * b_xbar, axis=1) / D
b_rstd = 1 / tl.sqrt(b_var + eps)
p_rstd = tl.make_block_ptr(rstd, (T,), (1,), (i_t * BT,), (BT,), (0,))
tl.store(p_rstd, b_rstd.to(p_rstd.dtype.element_ty), boundary_check=(0,))
if HAS_WEIGHT:
b_w = tl.load(w + o_d, mask=m_d).to(tl.float32)
if HAS_BIAS:
b_b = tl.load(b + o_d, mask=m_d).to(tl.float32)
b_x_hat = (
(b_x - b_mean[:, None]) * b_rstd[:, None]
if not IS_RMS_NORM
else b_x * b_rstd[:, None]
)
b_y = b_x_hat * b_w[None, :] if HAS_WEIGHT else b_x_hat
if HAS_BIAS:
b_y = b_y + b_b[None, :]
# swish/sigmoid output gate
p_g = tl.make_block_ptr(g, (T, D), (D, 1), (i_t * BT, 0), (BT, BD), (1, 0))
b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32)
if ACTIVATION == "swish" or ACTIVATION == "silu":
b_y = b_y * b_g * tl.sigmoid(b_g)
elif ACTIVATION == "sigmoid":
b_y = b_y * tl.sigmoid(b_g)
# Write output
p_y = tl.make_block_ptr(y, (T, D), (D, 1), (i_t * BT, 0), (BT, BD), (1, 0))
tl.store(p_y, b_y.to(p_y.dtype.element_ty), boundary_check=(0, 1))
@triton.heuristics(
{
"STORE_RESIDUAL_OUT": lambda args: args["residual_out"] is not None,
"HAS_RESIDUAL": lambda args: args["residual"] is not None,
"HAS_WEIGHT": lambda args: args["w"] is not None,
"HAS_BIAS": lambda args: args["b"] is not None,
}
)
@triton.jit
def layer_norm_gated_fwd_kernel1(
x, # pointer to the input
g, # pointer to the gate
y, # pointer to the output
w, # pointer to the weights
b, # pointer to the biases
residual, # pointer to the residual
residual_out, # pointer to the residual
mean, # pointer to the mean
rstd, # pointer to the 1/std
eps, # epsilon to avoid division by zero
D: tl.constexpr, # number of columns in x
BD: tl.constexpr,
ACTIVATION: tl.constexpr,
IS_RMS_NORM: tl.constexpr,
STORE_RESIDUAL_OUT: tl.constexpr,
HAS_RESIDUAL: tl.constexpr,
HAS_WEIGHT: tl.constexpr,
HAS_BIAS: tl.constexpr,
):
i_t = tl.program_id(0)
x += i_t * D
y += i_t * D
g += i_t * D
if HAS_RESIDUAL:
residual += i_t * D
if STORE_RESIDUAL_OUT:
residual_out += i_t * D
o_d = tl.arange(0, BD)
m_d = o_d < D
b_x = tl.load(x + o_d, mask=m_d, other=0.0).to(tl.float32)
if HAS_RESIDUAL:
b_x += tl.load(residual + o_d, mask=m_d, other=0.0).to(tl.float32)
if STORE_RESIDUAL_OUT:
tl.store(residual_out + o_d, b_x, mask=m_d)
if not IS_RMS_NORM:
b_mean = tl.sum(b_x, axis=0) / D
tl.store(mean + i_t, b_mean)
b_xbar = tl.where(m_d, b_x - b_mean, 0.0)
b_var = tl.sum(b_xbar * b_xbar, axis=0) / D
else:
b_xbar = tl.where(m_d, b_x, 0.0)
b_var = tl.sum(b_xbar * b_xbar, axis=0) / D
b_rstd = 1 / tl.sqrt(b_var + eps)
tl.store(rstd + i_t, b_rstd)
if HAS_WEIGHT:
b_w = tl.load(w + o_d, mask=m_d).to(tl.float32)
if HAS_BIAS:
b_b = tl.load(b + o_d, mask=m_d).to(tl.float32)
b_x_hat = (b_x - b_mean) * b_rstd if not IS_RMS_NORM else b_x * b_rstd
b_y = b_x_hat * b_w if HAS_WEIGHT else b_x_hat
if HAS_BIAS:
b_y = b_y + b_b
# swish/sigmoid output gate
b_g = tl.load(g + o_d, mask=m_d, other=0.0).to(tl.float32)
if ACTIVATION == "swish" or ACTIVATION == "silu":
b_y = b_y * b_g * tl.sigmoid(b_g)
elif ACTIVATION == "sigmoid":
b_y = b_y * tl.sigmoid(b_g)
# Write output
tl.store(y + o_d, b_y, mask=m_d)
def layer_norm_gated_fwd(
x: torch.Tensor,
g: torch.Tensor,
weight: torch.Tensor,
bias: torch.Tensor,
activation: str = "swish",
eps: float = 1e-5,
residual: torch.Tensor = None,
out_dtype: torch.dtype = None,
residual_dtype: torch.dtype = None,
is_rms_norm: bool = False,
):
if residual is not None:
residual_dtype = residual.dtype
T, D = x.shape
if residual is not None:
assert residual.shape == (T, D)
if weight is not None:
assert weight.shape == (D,)
if bias is not None:
assert bias.shape == (D,)
# allocate output
y = x if out_dtype is None else torch.empty_like(x, dtype=out_dtype)
if residual is not None or (
residual_dtype is not None and residual_dtype != x.dtype
):
residual_out = torch.empty(T, D, device=x.device, dtype=residual_dtype)
else:
residual_out = None
mean = (
torch.empty((T,), dtype=torch.float, device=x.device)
if not is_rms_norm
else None
)
rstd = torch.empty((T,), dtype=torch.float, device=x.device)
# Less than 64KB per feature: enqueue fused kernel
MAX_FUSED_SIZE = 65536 // x.element_size()
BD = min(MAX_FUSED_SIZE, next_power_of_2(D))
if D > BD:
raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.")
# heuristics for number of warps
if D <= 512:
BT = 32
layer_norm_gated_fwd_kernel[(cdiv(T, BT),)](
x=x,
g=g,
y=y,
w=weight,
b=bias,
residual=residual,
residual_out=residual_out,
mean=mean,
rstd=rstd,
eps=eps,
T=T,
D=D,
BD=BD,
BT=BT,
ACTIVATION=activation,
IS_RMS_NORM=is_rms_norm,
num_warps=4,
)
else:
layer_norm_gated_fwd_kernel1[(T,)](
x=x,
g=g,
y=y,
w=weight,
b=bias,
residual=residual,
residual_out=residual_out,
mean=mean,
rstd=rstd,
eps=eps,
D=D,
BD=BD,
ACTIVATION=activation,
IS_RMS_NORM=is_rms_norm,
num_warps=4,
)
# residual_out is None if residual is None and residual_dtype == input_dtype
return y, mean, rstd, residual_out if residual_out is not None else x
def rms_norm_gated(
x: torch.Tensor,
g: torch.Tensor,
weight: torch.Tensor,
bias: torch.Tensor,
activation: str = "swish",
residual: torch.Tensor | None = None,
prenorm: bool = False,
residual_in_fp32: bool = False,
eps: float = 1e-6,
):
x_shape_og = x.shape
# reshape input data into 2D tensor
x = x.contiguous().reshape(-1, x.shape[-1])
g = g.contiguous().reshape(-1, g.shape[-1])
if residual is not None:
assert residual.shape == x_shape_og
residual = residual.contiguous().reshape(-1, residual.shape[-1])
residual_dtype = (
residual.dtype
if residual is not None
else (torch.float if residual_in_fp32 else None)
)
y, _, _, residual_out = layer_norm_gated_fwd(
x=x,
g=g,
weight=weight,
bias=bias,
activation=activation,
eps=eps,
residual=residual,
residual_dtype=residual_dtype,
is_rms_norm=True,
)
y = y.reshape(x_shape_og)
return y if not prenorm else (y, residual_out.reshape(x_shape_og))
class FusedRMSNormGated(nn.Module):
def __init__(
self,
hidden_size: int,
elementwise_affine: bool = True,
eps: float = 1e-5,
activation: str = "swish",
device: torch.device | None = None,
dtype: torch.dtype | None = None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__()
self.hidden_size = hidden_size
self.elementwise_affine = elementwise_affine
self.eps = eps
self.activation = activation
if self.activation not in ["swish", "silu", "sigmoid"]:
raise ValueError(f"Unsupported activation: {self.activation}")
if elementwise_affine:
self.weight = nn.Parameter(torch.empty(hidden_size, **factory_kwargs))
else:
self.register_parameter("weight", None)
self.register_parameter("bias", None)
def forward(
self,
x: torch.Tensor,
g: torch.Tensor,
residual: torch.Tensor | None = None,
prenorm: bool = False,
residual_in_fp32: bool = False,
) -> torch.Tensor:
return rms_norm_gated(
x,
g,
self.weight,
self.bias,
self.activation,
residual=residual,
eps=self.eps,
prenorm=prenorm,
residual_in_fp32=residual_in_fp32,
)
@triton.heuristics({"IS_VARLEN": lambda args: args["cu_seqlens"] is not None})
@triton.autotune(
configs=[
triton.Config({"BK": BK}, num_warps=num_warps, num_stages=num_stages)
for BK in [32, 64]
for num_warps in [1, 2, 4, 8]
for num_stages in [2, 3, 4]
],
key=["BC"],
)
@triton.jit(do_not_specialize=["T"])
def chunk_kda_scaled_dot_kkt_fwd_kernel_intra_sub_inter(
q,
k,
g,
beta,
A,
Aqk,
scale,
cu_seqlens,
chunk_indices,
T,
H: tl.constexpr,
K: tl.constexpr,
BT: tl.constexpr,
BC: tl.constexpr,
BK: tl.constexpr,
NC: tl.constexpr,
IS_VARLEN: tl.constexpr,
):
i_t, i_c, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_b, i_h = i_bh // H, i_bh % H
i_i, i_j = i_c // NC, i_c % NC
if IS_VARLEN:
i_n, i_t = (
tl.load(chunk_indices + i_t * 2).to(tl.int32),
tl.load(chunk_indices + i_t * 2 + 1).to(tl.int32),
)
bos, eos = (
tl.load(cu_seqlens + i_n).to(tl.int32),
tl.load(cu_seqlens + i_n + 1).to(tl.int32),
)
T = eos - bos
else:
bos, eos = i_b * T, i_b * T + T
if i_t * BT + i_i * BC >= T:
return
if i_i <= i_j:
return
q += (bos * H + i_h) * K
k += (bos * H + i_h) * K
g += (bos * H + i_h) * K
A += (bos * H + i_h) * BT
Aqk += (bos * H + i_h) * BT
p_b = tl.make_block_ptr(
beta + bos * H + i_h, (T,), (H,), (i_t * BT + i_i * BC,), (BC,), (0,)
)
b_b = tl.load(p_b, boundary_check=(0,))
b_A = tl.zeros([BC, BC], dtype=tl.float32)
b_Aqk = tl.zeros([BC, BC], dtype=tl.float32)
for i_k in range(tl.cdiv(K, BK)):
p_q = tl.make_block_ptr(
q, (T, K), (H * K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)
)
p_k = tl.make_block_ptr(
k, (T, K), (H * K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)
)
p_g = tl.make_block_ptr(
g, (T, K), (H * K, 1), (i_t * BT + i_i * BC, i_k * BK), (BC, BK), (1, 0)
)
b_kt = tl.make_block_ptr(
k, (K, T), (1, H * K), (i_k * BK, i_t * BT + i_j * BC), (BK, BC), (0, 1)
)
p_gk = tl.make_block_ptr(
g, (K, T), (1, H * K), (i_k * BK, i_t * BT + i_j * BC), (BK, BC), (0, 1)
)
o_k = i_k * BK + tl.arange(0, BK)
m_k = o_k < K
# [BK,]
b_gn = tl.load(g + (i_t * BT + i_i * BC) * H * K + o_k, mask=m_k, other=0)
# [BC, BK]
b_g = tl.load(p_g, boundary_check=(0, 1))
b_k = tl.load(p_k, boundary_check=(0, 1)) * exp(b_g - b_gn[None, :])
# [BK, BC]
b_gk = tl.load(p_gk, boundary_check=(0, 1))
b_kt = tl.load(b_kt, boundary_check=(0, 1))
# [BC, BC]
b_ktg = b_kt * exp(b_gn[:, None] - b_gk)
b_A += tl.dot(b_k, b_ktg)
b_q = tl.load(p_q, boundary_check=(0, 1))
b_qg = b_q * exp(b_g - b_gn[None, :]) * scale
b_Aqk += tl.dot(b_qg, b_ktg)
b_A *= b_b[:, None]
p_A = tl.make_block_ptr(
A, (T, BT), (H * BT, 1), (i_t * BT + i_i * BC, i_j * BC), (BC, BC), (1, 0)
)
tl.store(p_A, b_A.to(A.dtype.element_ty), boundary_check=(0, 1))
p_Aqk = tl.make_block_ptr(
Aqk, (T, BT), (H * BT, 1), (i_t * BT + i_i * BC, i_j * BC), (BC, BC), (1, 0)
)
tl.store(p_Aqk, b_Aqk.to(Aqk.dtype.element_ty), boundary_check=(0, 1))
@triton.heuristics({"IS_VARLEN": lambda args: args["cu_seqlens"] is not None})
@triton.autotune(
configs=[triton.Config({}, num_warps=num_warps) for num_warps in [1, 2, 4, 8]],
key=["BK", "BT"],
)
@triton.jit(do_not_specialize=["T"])
def chunk_kda_scaled_dot_kkt_fwd_kernel_intra_sub_intra(
q,
k,
g,
beta,
A,
Aqk,
scale,
cu_seqlens,
chunk_indices,
T,
H: tl.constexpr,
K: tl.constexpr,
BT: tl.constexpr,
BC: tl.constexpr,
BK: tl.constexpr,
IS_VARLEN: tl.constexpr,
):
i_t, i_i, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_b, i_h = i_bh // H, i_bh % H
if IS_VARLEN:
i_n, i_t = (
tl.load(chunk_indices + i_t * 2).to(tl.int32),
tl.load(chunk_indices + i_t * 2 + 1).to(tl.int32),
)
bos, eos = (
tl.load(cu_seqlens + i_n).to(tl.int32),
tl.load(cu_seqlens + i_n + 1).to(tl.int32),
)
T = eos - bos
else:
bos, eos = i_b * T, i_b * T + T
if i_t * BT + i_i * BC >= T:
return
o_i = tl.arange(0, BC)
o_k = tl.arange(0, BK)
m_k = o_k < K
m_A = (i_t * BT + i_i * BC + o_i) < T
o_A = (bos + i_t * BT + i_i * BC + o_i) * H * BT + i_h * BT + i_i * BC
p_q = tl.make_block_ptr(
q + (bos * H + i_h) * K,
(T, K),
(H * K, 1),
(i_t * BT + i_i * BC, 0),
(BC, BK),
(1, 0),
)
p_k = tl.make_block_ptr(
k + (bos * H + i_h) * K,
(T, K),
(H * K, 1),
(i_t * BT + i_i * BC, 0),
(BC, BK),
(1, 0),
)
p_g = tl.make_block_ptr(
g + (bos * H + i_h) * K,
(T, K),
(H * K, 1),
(i_t * BT + i_i * BC, 0),
(BC, BK),
(1, 0),
)
b_q = tl.load(p_q, boundary_check=(0, 1))
b_k = tl.load(p_k, boundary_check=(0, 1))
b_g = tl.load(p_g, boundary_check=(0, 1))
p_b = beta + (bos + i_t * BT + i_i * BC + o_i) * H + i_h
b_k = b_k * tl.load(p_b, mask=m_A, other=0)[:, None]
p_kt = k + (bos + i_t * BT + i_i * BC) * H * K + i_h * K + o_k
p_gk = g + (bos + i_t * BT + i_i * BC) * H * K + i_h * K + o_k
for j in range(0, min(BC, T - i_t * BT - i_i * BC)):
b_kt = tl.load(p_kt, mask=m_k, other=0).to(tl.float32)
b_gk = tl.load(p_gk, mask=m_k, other=0).to(tl.float32)
b_ktg = b_kt[None, :] * exp(b_g - b_gk[None, :])
b_A = tl.sum(b_k * b_ktg, 1)
b_A = tl.where(o_i > j, b_A, 0.0)
b_Aqk = tl.sum(b_q * b_ktg, 1)
b_Aqk = tl.where(o_i >= j, b_Aqk * scale, 0.0)
tl.store(A + o_A + j, b_A, mask=m_A)
tl.store(Aqk + o_A + j, b_Aqk, mask=m_A)
p_kt += H * K
p_gk += H * K
def chunk_kda_scaled_dot_kkt_fwd(
q: torch.Tensor,
k: torch.Tensor,
gk: torch.Tensor | None = None,
beta: torch.Tensor | None = None,
scale: float | None = None,
cu_seqlens: torch.LongTensor | None = None,
chunk_size: int = 64,
output_dtype: torch.dtype = torch.float32,
) -> tuple[torch.Tensor, torch.Tensor]:
r"""
Compute beta * K * K^T.
Args:
k (torch.Tensor):
The key tensor of shape `[B, T, H, K]`.
beta (torch.Tensor):
The beta tensor of shape `[B, T, H]`.
gk (torch.Tensor):
The cumulative sum of the gate tensor of shape `[B, T, H, K]` applied to the key tensor. Default: `None`.
cu_seqlens (torch.LongTensor):
The cumulative sequence lengths of the input tensor.
Default: None
chunk_size (int):
The chunk size. Default: 64.
output_dtype (torch.dtype):
The dtype of the output tensor. Default: `torch.float32`
Returns:
beta * K * K^T of shape `[B, T, H, BT]` where `BT` is the chunk size.
"""
B, T, H, K = k.shape
assert K <= 256
BT = chunk_size
chunk_indices = (
prepare_chunk_indices(cu_seqlens, BT) if cu_seqlens is not None else None
)
NT = cdiv(T, BT) if cu_seqlens is None else len(chunk_indices)
BC = min(16, BT)
NC = cdiv(BT, BC)
BK = max(next_power_of_2(K), 16)
A = torch.zeros(B, T, H, BT, device=k.device, dtype=output_dtype)
Aqk = torch.zeros(B, T, H, BT, device=k.device, dtype=output_dtype)
grid = (NT, NC * NC, B * H)
chunk_kda_scaled_dot_kkt_fwd_kernel_intra_sub_inter[grid](
q=q,
k=k,
g=gk,
beta=beta,
A=A,
Aqk=Aqk,
scale=scale,
cu_seqlens=cu_seqlens,
chunk_indices=chunk_indices,
T=T,
H=H,
K=K,
BT=BT,
BC=BC,
NC=NC,
)
grid = (NT, NC, B * H)
chunk_kda_scaled_dot_kkt_fwd_kernel_intra_sub_intra[grid](
q=q,
k=k,
g=gk,
beta=beta,
A=A,
Aqk=Aqk,
scale=scale,
cu_seqlens=cu_seqlens,
chunk_indices=chunk_indices,
T=T,
H=H,
K=K,
BT=BT,
BC=BC,
BK=BK,
)
return A, Aqk
@triton.heuristics(
{
"STORE_QG": lambda args: args["qg"] is not None,
"STORE_KG": lambda args: args["kg"] is not None,
"IS_VARLEN": lambda args: args["cu_seqlens"] is not None,
}
)
@triton.autotune(
configs=[
triton.Config({}, num_warps=num_warps, num_stages=num_stages)
for num_warps in [2, 4, 8]
for num_stages in [2, 3, 4]
],
key=["H", "K", "V", "BT", "BK", "BV", "IS_VARLEN"],
)
@triton.jit(do_not_specialize=["T"])
def recompute_w_u_fwd_kernel(
q,
k,
qg,
kg,
v,
beta,
w,
u,
A,
gk,
cu_seqlens,
chunk_indices,
T,
H: tl.constexpr,
K: tl.constexpr,
V: tl.constexpr,
BT: tl.constexpr,
BK: tl.constexpr,
BV: tl.constexpr,
STORE_QG: tl.constexpr,
STORE_KG: tl.constexpr,
IS_VARLEN: tl.constexpr,
DOT_PRECISION: tl.constexpr,
):
i_t, i_bh = tl.program_id(0), tl.program_id(1)
i_b, i_h = i_bh // H, i_bh % H
if IS_VARLEN:
i_n, i_t = (
tl.load(chunk_indices + i_t * 2).to(tl.int32),
tl.load(chunk_indices + i_t * 2 + 1).to(tl.int32),
)
bos, eos = (
tl.load(cu_seqlens + i_n).to(tl.int32),
tl.load(cu_seqlens + i_n + 1).to(tl.int32),
)
T = eos - bos
else:
bos, eos = i_b * T, i_b * T + T
p_b = tl.make_block_ptr(beta + bos * H + i_h, (T,), (H,), (i_t * BT,), (BT,), (0,))
b_b = tl.load(p_b, boundary_check=(0,))
p_A = tl.make_block_ptr(
A + (bos * H + i_h) * BT, (T, BT), (H * BT, 1), (i_t * BT, 0), (BT, BT), (1, 0)
)
b_A = tl.load(p_A, boundary_check=(0, 1))
for i_v in range(tl.cdiv(V, BV)):
p_v = tl.make_block_ptr(
v + (bos * H + i_h) * V,
(T, V),
(H * V, 1),
(i_t * BT, i_v * BV),
(BT, BV),
(1, 0),
)
p_u = tl.make_block_ptr(
u + (bos * H + i_h) * V,
(T, V),
(H * V, 1),
(i_t * BT, i_v * BV),
(BT, BV),
(1, 0),
)
b_v = tl.load(p_v, boundary_check=(0, 1))
b_vb = (b_v * b_b[:, None]).to(b_v.dtype)
b_u = tl.dot(b_A, b_vb, input_precision=DOT_PRECISION)
tl.store(p_u, b_u.to(p_u.dtype.element_ty), boundary_check=(0, 1))
for i_k in range(tl.cdiv(K, BK)):
p_w = tl.make_block_ptr(
w + (bos * H + i_h) * K,
(T, K),
(H * K, 1),
(i_t * BT, i_k * BK),
(BT, BK),
(1, 0),
)
p_k = tl.make_block_ptr(
k + (bos * H + i_h) * K,
(T, K),
(H * K, 1),
(i_t * BT, i_k * BK),
(BT, BK),
(1, 0),
)
b_k = tl.load(p_k, boundary_check=(0, 1))
b_kb = b_k * b_b[:, None]
p_gk = tl.make_block_ptr(
gk + (bos * H + i_h) * K,
(T, K),
(H * K, 1),
(i_t * BT, i_k * BK),
(BT, BK),
(1, 0),
)
b_gk = tl.load(p_gk, boundary_check=(0, 1))
b_kb *= exp(b_gk)
if STORE_QG:
p_q = tl.make_block_ptr(
q + (bos * H + i_h) * K,
(T, K),
(H * K, 1),
(i_t * BT, i_k * BK),
(BT, BK),
(1, 0),
)
p_qg = tl.make_block_ptr(
qg + (bos * H + i_h) * K,
(T, K),
(H * K, 1),
(i_t * BT, i_k * BK),
(BT, BK),
(1, 0),
)
b_q = tl.load(p_q, boundary_check=(0, 1))
b_qg = b_q * exp(b_gk)
tl.store(p_qg, b_qg.to(p_qg.dtype.element_ty), boundary_check=(0, 1))
if STORE_KG:
last_idx = min(i_t * BT + BT, T) - 1
o_k = i_k * BK + tl.arange(0, BK)
m_k = o_k < K
b_gn = tl.load(
gk + ((bos + last_idx) * H + i_h) * K + o_k, mask=m_k, other=0.0
)
b_kg = b_k * exp(b_gn - b_gk)
p_kg = tl.make_block_ptr(
kg + (bos * H + i_h) * K,
(T, K),
(H * K, 1),
(i_t * BT, i_k * BK),
(BT, BK),
(1, 0),
)
tl.store(p_kg, b_kg.to(p_kg.dtype.element_ty), boundary_check=(0, 1))
b_w = tl.dot(b_A, b_kb.to(b_k.dtype))
tl.store(p_w, b_w.to(p_w.dtype.element_ty), boundary_check=(0, 1))
def recompute_w_u_fwd(
k: torch.Tensor,
v: torch.Tensor,
beta: torch.Tensor,
A: torch.Tensor,
q: torch.Tensor | None = None,
gk: torch.Tensor | None = None,
cu_seqlens: torch.LongTensor | None = None,
) -> tuple[torch.Tensor, torch.Tensor]:
B, T, H, K, V = *k.shape, v.shape[-1]
BT = A.shape[-1]
BK = 64
BV = 64
chunk_indices = (
prepare_chunk_indices(cu_seqlens, BT) if cu_seqlens is not None else None
)
NT = cdiv(T, BT) if cu_seqlens is None else len(chunk_indices)
w = torch.empty_like(k)
u = torch.empty_like(v)
kg = torch.empty_like(k) if gk is not None else None
recompute_w_u_fwd_kernel[(NT, B * H)](
q=q,
k=k,
qg=None,
kg=kg,
v=v,
beta=beta,
w=w,
u=u,
A=A,
gk=gk,
cu_seqlens=cu_seqlens,
chunk_indices=chunk_indices,
T=T,
H=H,
K=K,
V=V,
BT=BT,
BK=BK,
BV=BV,
DOT_PRECISION="ieee",
)
return w, u, None, kg
@triton.heuristics({"IS_VARLEN": lambda args: args["cu_seqlens"] is not None})
@triton.autotune(
configs=[
triton.Config({"BK": BK, "BV": BV}, num_warps=num_warps, num_stages=num_stages)
for BK in [32, 64]
for BV in [64, 128]
for num_warps in [2, 4, 8]
for num_stages in [2, 3, 4]
],
key=["BT"],
)
@triton.jit(do_not_specialize=["T"])
def chunk_gla_fwd_kernel_o(
q,
v,
g,
h,
o,
A,
cu_seqlens,
chunk_indices,
scale,
T,
H: tl.constexpr,
K: tl.constexpr,
V: tl.constexpr,
BT: tl.constexpr,
BK: tl.constexpr,
BV: tl.constexpr,
IS_VARLEN: tl.constexpr,
):
i_v, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_b, i_h = i_bh // H, i_bh % H
if IS_VARLEN:
i_tg = i_t
i_n, i_t = (
tl.load(chunk_indices + i_t * 2).to(tl.int32),
tl.load(chunk_indices + i_t * 2 + 1).to(tl.int32),
)
bos, eos = (
tl.load(cu_seqlens + i_n).to(tl.int32),
tl.load(cu_seqlens + i_n + 1).to(tl.int32),
)
T = eos - bos
NT = tl.cdiv(T, BT)
else:
NT = tl.cdiv(T, BT)
i_tg = i_b * NT + i_t
bos, eos = i_b * T, i_b * T + T
m_s = tl.arange(0, BT)[:, None] >= tl.arange(0, BT)[None, :]
b_o = tl.zeros([BT, BV], dtype=tl.float32)
for i_k in range(tl.cdiv(K, BK)):
p_q = tl.make_block_ptr(
q + (bos * H + i_h) * K,
(T, K),
(H * K, 1),
(i_t * BT, i_k * BK),
(BT, BK),
(1, 0),
)
p_g = tl.make_block_ptr(
g + (bos * H + i_h) * K,
(T, K),
(H * K, 1),
(i_t * BT, i_k * BK),
(BT, BK),
(1, 0),
)
p_h = tl.make_block_ptr(
h + (i_tg * H + i_h) * K * V,
(K, V),
(V, 1),
(i_k * BK, i_v * BV),
(BK, BV),
(1, 0),
)
# [BT, BK]
b_q = tl.load(p_q, boundary_check=(0, 1))
b_q = (b_q * scale).to(b_q.dtype)
# [BT, BK]
b_g = tl.load(p_g, boundary_check=(0, 1))
# [BT, BK]
b_qg = (b_q * exp(b_g)).to(b_q.dtype)
# [BK, BV]
b_h = tl.load(p_h, boundary_check=(0, 1))
# works but dkw, owing to divine benevolence
# [BT, BV]
if i_k >= 0:
b_o += tl.dot(b_qg, b_h.to(b_qg.dtype))
p_v = tl.make_block_ptr(
v + (bos * H + i_h) * V,
(T, V),
(H * V, 1),
(i_t * BT, i_v * BV),
(BT, BV),
(1, 0),
)
p_o = tl.make_block_ptr(
o + (bos * H + i_h) * V,
(T, V),
(H * V, 1),
(i_t * BT, i_v * BV),
(BT, BV),
(1, 0),
)
p_A = tl.make_block_ptr(
A + (bos * H + i_h) * BT, (T, BT), (H * BT, 1), (i_t * BT, 0), (BT, BT), (1, 0)
)
# [BT, BV]
b_v = tl.load(p_v, boundary_check=(0, 1))
# [BT, BT]
b_A = tl.load(p_A, boundary_check=(0, 1))
b_A = tl.where(m_s, b_A, 0.0).to(b_v.dtype)
b_o += tl.dot(b_A, b_v, allow_tf32=False)
tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
def chunk_gla_fwd_o_gk(
q: torch.Tensor,
v: torch.Tensor,
g: torch.Tensor,
A: torch.Tensor,
h: torch.Tensor,
o: torch.Tensor,
scale: float,
cu_seqlens: torch.LongTensor | None = None,
chunk_size: int = 64,
):
B, T, H, K, V = *q.shape, v.shape[-1]
BT = chunk_size
chunk_indices = (
prepare_chunk_indices(cu_seqlens, chunk_size)
if cu_seqlens is not None
else None
)
NT = cdiv(T, BT) if cu_seqlens is None else len(chunk_indices)
def grid(meta):
return (cdiv(V, meta["BV"]), NT, B * H)
chunk_gla_fwd_kernel_o[grid](
q=q,
v=v,
g=g,
h=h,
o=o,
A=A,
cu_seqlens=cu_seqlens,
chunk_indices=chunk_indices,
scale=scale,
T=T,
H=H,
K=K,
V=V,
BT=BT,
)
return o
def chunk_kda_fwd(
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
g: torch.Tensor,
beta: torch.Tensor,
scale: float,
initial_state: torch.Tensor,
output_final_state: bool,
cu_seqlens: torch.LongTensor | None = None,
):
chunk_size = 64
g = chunk_local_cumsum(g, chunk_size=chunk_size, cu_seqlens=cu_seqlens)
# the intra Aqk is kept in fp32
# the computation has very marginal effect on the entire throughput
A, Aqk = chunk_kda_scaled_dot_kkt_fwd(
q=q,
k=k,
gk=g,
beta=beta,
scale=scale,
cu_seqlens=cu_seqlens,
output_dtype=torch.float32,
)
A = solve_tril(A=A, cu_seqlens=cu_seqlens, output_dtype=k.dtype)
w, u, _, kg = recompute_w_u_fwd(
k=k,
v=v,
beta=beta,
A=A,
gk=g,
cu_seqlens=cu_seqlens,
)
del A
h, v_new, final_state = chunk_gated_delta_rule_fwd_h(
k=kg,
w=w,
u=u,
gk=g,
initial_state=initial_state,
output_final_state=output_final_state,
cu_seqlens=cu_seqlens,
)
del w, u, kg
o = chunk_gla_fwd_o_gk(
q=q,
v=v_new,
g=g,
A=Aqk,
h=h,
o=v,
scale=scale,
cu_seqlens=cu_seqlens,
chunk_size=chunk_size,
)
del Aqk, v_new, h
return o, final_state
def chunk_kda(
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
g: torch.Tensor,
beta: torch.Tensor,
scale: float = None,
initial_state: torch.Tensor = None,
output_final_state: bool = False,
use_qk_l2norm_in_kernel: bool = False,
cu_seqlens: torch.LongTensor | None = None,
**kwargs,
):
if scale is None:
scale = k.shape[-1] ** -0.5
if use_qk_l2norm_in_kernel:
q = l2norm_fwd(q.contiguous())
k = l2norm_fwd(k.contiguous())
o, final_state = chunk_kda_fwd(
q=q,
k=k,
v=v.contiguous(),
g=g.contiguous(),
beta=beta.contiguous(),
scale=scale,
initial_state=initial_state.contiguous(),
output_final_state=output_final_state,
cu_seqlens=cu_seqlens,
)
return o, final_state
@triton.autotune(
configs=[
triton.Config({"BT": bt}, num_warps=nw, num_stages=ns)
for bt in BT_LIST_AUTOTUNE
for nw in NUM_WARPS_AUTOTUNE
for ns in [2, 3]
],
key=["H", "D"],
)
@triton.jit
def kda_gate_fwd_kernel(
g,
A,
y,
g_bias,
beta: tl.constexpr,
threshold: tl.constexpr,
T,
H,
D: tl.constexpr,
BT: tl.constexpr,
BD: tl.constexpr,
HAS_BIAS: tl.constexpr,
):
i_t, i_h = tl.program_id(0), tl.program_id(1)
n_t = i_t * BT
b_a = tl.load(A + i_h).to(tl.float32)
b_a = -tl.exp(b_a)
stride_row = H * D
stride_col = 1
g_ptr = tl.make_block_ptr(
base=g + i_h * D,
shape=(T, D),
strides=(stride_row, stride_col),
offsets=(n_t, 0),
block_shape=(BT, BD),
order=(1, 0),
)
y_ptr = tl.make_block_ptr(
base=y + i_h * D,
shape=(T, D),
strides=(stride_row, stride_col),
offsets=(n_t, 0),
block_shape=(BT, BD),
order=(1, 0),
)
b_g = tl.load(g_ptr, boundary_check=(0, 1)).to(tl.float32)
if HAS_BIAS:
n_d = tl.arange(0, BD)
bias_mask = n_d < D
b_bias = tl.load(g_bias + i_h * D + n_d, mask=bias_mask, other=0.0).to(
tl.float32
)
b_g = b_g + b_bias[None, :]
# softplus(x, beta) = (1/beta) * log(1 + exp(beta * x))
# When beta * x > threshold, use linear approximation x
# Use threshold to switch to linear when beta*x > threshold
g_scaled = b_g * beta
use_linear = g_scaled > threshold
sp = tl.where(use_linear, b_g, (1.0 / beta) * log(1.0 + tl.exp(g_scaled)))
b_y = b_a * sp
tl.store(y_ptr, b_y.to(y.dtype.element_ty), boundary_check=(0, 1))
def fused_kda_gate(
g: torch.Tensor,
A: torch.Tensor,
head_k_dim: int,
g_bias: torch.Tensor | None = None,
beta: float = 1.0,
threshold: float = 20.0,
) -> torch.Tensor:
"""
Forward pass for KDA gate:
input g: [..., H*D]
param A: [H] or [1, 1, H, 1]
beta: softplus beta parameter
threshold: softplus threshold parameter
return : [..., H, D]
"""
orig_shape = g.shape[:-1]
g = g.view(-1, g.shape[-1])
T = g.shape[0]
HD = g.shape[1]
H = A.numel()
assert H * head_k_dim == HD
y = torch.empty_like(g, dtype=torch.float32)
def grid(meta):
return (cdiv(T, meta["BT"]), H)
kda_gate_fwd_kernel[grid](
g,
A,
y,
g_bias,
beta,
threshold,
T,
H,
head_k_dim,
BD=next_power_of_2(head_k_dim),
HAS_BIAS=g_bias is not None,
)
y = y.view(*orig_shape, H, head_k_dim)
return y
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/fla/ops/kda.py",
"license": "Apache License 2.0",
"lines": 1241,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/utils/counter.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import threading
class Counter:
def __init__(self, start: int = 0) -> None:
super().__init__()
self.counter = start
def __next__(self) -> int:
i = self.counter
self.counter += 1
return i
def reset(self) -> None:
self.counter = 0
class AtomicCounter:
"""An atomic, thread-safe counter"""
def __init__(self, initial: int = 0) -> None:
"""Initialize a new atomic counter to given initial value"""
super().__init__()
self._value = initial
self._lock = threading.Lock()
@property
def value(self) -> int:
return self._value
def inc(self, num: int = 1) -> int:
"""Atomically increment the counter by num and return the new value"""
with self._lock:
self._value += num
return self._value
def dec(self, num: int = 1) -> int:
"""Atomically decrement the counter by num and return the new value"""
with self._lock:
self._value -= num
return self._value
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/utils/counter.py",
"license": "Apache License 2.0",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/minimax_m2.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# Copyright 2025 The MiniMax AI team.
# Copyright 2023 The vLLM team.
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inference-only MiniMaxM2 model."""
from collections.abc import Iterable
from typing import Any
import torch
from torch import nn
from transformers import PretrainedConfig
from vllm.compilation.decorators import support_torch_compile
from vllm.config import CacheConfig, ModelConfig, VllmConfig
from vllm.distributed import (
get_pp_group,
get_tensor_model_parallel_world_size,
tensor_model_parallel_all_reduce,
)
from vllm.model_executor.layers.attention import Attention
from vllm.model_executor.layers.fused_moe import FusedMoE
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.linear import (
QKVParallelLinear,
ReplicatedLinear,
RowParallelLinear,
)
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.mamba.linear_attn import MiniMaxText01RMSNormTP
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.rotary_embedding import get_rope
from vllm.model_executor.layers.vocab_parallel_embedding import (
ParallelLMHead,
VocabParallelEmbedding,
)
from vllm.model_executor.model_loader.weight_utils import (
default_weight_loader,
maybe_remap_kv_scale_name,
)
from vllm.sequence import IntermediateTensors
from .interfaces import SupportsLoRA, SupportsPP
from .utils import (
AutoWeightsLoader,
PPMissingLayer,
is_pp_missing_parameter,
make_empty_intermediate_tensors_factory,
make_layers,
maybe_prefix,
)
class MiniMaxM2MoE(nn.Module):
def __init__(
self,
config: PretrainedConfig,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
):
super().__init__()
self.tp_size = get_tensor_model_parallel_world_size()
if self.tp_size > config.num_local_experts:
raise ValueError(
f"Tensor parallel size {self.tp_size} is greater than "
f"the number of experts {config.num_local_experts}."
)
self.use_routing_bias = getattr(config, "use_routing_bias", False)
if self.use_routing_bias:
self.e_score_correction_bias = nn.Parameter(
torch.empty(config.num_local_experts, dtype=torch.float32)
)
self.e_score_correction_bias.weight_loader = (
MiniMaxM2MoE.ebias_weight_loader
)
else:
self.e_score_correction_bias = None
self.experts = FusedMoE(
num_experts=config.num_local_experts,
top_k=config.num_experts_per_tok,
scoring_func=config.scoring_func,
e_score_correction_bias=self.e_score_correction_bias,
hidden_size=config.hidden_size,
intermediate_size=config.intermediate_size,
reduce_results=False,
renormalize=True,
quant_config=quant_config,
prefix=f"{prefix}.experts",
router_logits_dtype=torch.float32,
)
self.gate = ReplicatedLinear(
config.hidden_size,
config.num_local_experts,
bias=False,
params_dtype=torch.float32,
quant_config=None,
prefix=f"{prefix}.gate",
)
@staticmethod
def ebias_weight_loader(param: nn.Parameter, loaded_weight: torch.Tensor) -> None:
assert param.size() == loaded_weight.size()
param.data.copy_(loaded_weight.to(torch.float32))
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
num_tokens, hidden_dim = hidden_states.shape
hidden_states = hidden_states.view(-1, hidden_dim)
# router_logits: (num_tokens, n_experts)
router_logits, _ = self.gate(hidden_states.to(torch.float32))
final_hidden_states = self.experts(
hidden_states=hidden_states, router_logits=router_logits
)
final_hidden_states = final_hidden_states
if self.tp_size > 1:
final_hidden_states = tensor_model_parallel_all_reduce(final_hidden_states)
return final_hidden_states.view(num_tokens, hidden_dim)
class MiniMaxM2Attention(nn.Module):
def __init__(
self,
hidden_size: int,
num_heads: int,
num_kv_heads: int,
rotary_dim: int,
rope_parameters: dict[str, Any] | None = None,
attn_window_size: int | None = None,
max_position_embeddings: int = 8192,
head_dim: int | None = None,
rms_norm_eps: float = 1e-06,
qkv_bias: bool = False,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
self.hidden_size = hidden_size
tp_size = get_tensor_model_parallel_world_size()
self.total_num_heads = num_heads
assert self.total_num_heads % tp_size == 0
self.num_heads = self.total_num_heads // tp_size
self.total_num_kv_heads = num_kv_heads
if self.total_num_kv_heads >= tp_size:
# Number of KV heads is greater than TP size, so we partition
# the KV heads across multiple tensor parallel GPUs.
assert self.total_num_kv_heads % tp_size == 0
else:
# Number of KV heads is less than TP size, so we replicate
# the KV heads across multiple tensor parallel GPUs.
assert tp_size % self.total_num_kv_heads == 0
self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
self.head_dim = head_dim or (hidden_size // self.total_num_heads)
self.q_size = self.num_heads * self.head_dim
self.kv_size = self.num_kv_heads * self.head_dim
self.scaling = self.head_dim**-0.5
self.max_position_embeddings = max_position_embeddings
self.qkv_proj = QKVParallelLinear(
hidden_size,
self.head_dim,
self.total_num_heads,
self.total_num_kv_heads,
bias=qkv_bias,
quant_config=quant_config,
prefix=f"{prefix}.qkv_proj",
)
self.o_proj = RowParallelLinear(
self.total_num_heads * self.head_dim,
hidden_size,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.o_proj",
)
if (
rope_parameters is not None
and "partial_rotary_factor" not in rope_parameters
):
rope_parameters["partial_rotary_factor"] = rotary_dim / self.head_dim
self.rotary_emb = get_rope(
self.head_dim,
max_position=max_position_embeddings,
rope_parameters=rope_parameters,
)
self.attn = Attention(
self.num_heads,
self.head_dim,
self.scaling,
num_kv_heads=self.num_kv_heads,
per_layer_sliding_window=attn_window_size,
cache_config=cache_config,
quant_config=quant_config,
prefix=f"{prefix}.attn",
)
self.q_norm = MiniMaxText01RMSNormTP(
self.head_dim * self.total_num_heads, eps=rms_norm_eps
)
self.k_norm = MiniMaxText01RMSNormTP(
self.head_dim * self.total_num_kv_heads, eps=rms_norm_eps
)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
) -> torch.Tensor:
qkv, _ = self.qkv_proj(hidden_states)
q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
q, k = MiniMaxText01RMSNormTP.forward_qk(
self.q_norm, self.k_norm, q.contiguous(), k.contiguous()
)
q, k = self.rotary_emb(positions, q, k)
attn_output = self.attn(q, k, v)
output, _ = self.o_proj(attn_output)
return output
class MiniMaxM2DecoderLayer(nn.Module):
def __init__(
self,
config: PretrainedConfig,
prefix: str,
model_config: ModelConfig,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
) -> None:
super().__init__()
self.hidden_size = config.hidden_size
max_position_embeddings = getattr(config, "max_position_embeddings", 8192)
if hasattr(config, "max_model_len") and isinstance(config.max_model_len, int):
max_position_embeddings = max(
config.max_position_embeddings, config.max_model_len
)
# DecoderLayers are created with `make_layers` which passes the prefix
# with the layer's index.
layer_idx = int(prefix.split(sep=".")[-1])
self.layer_idx = layer_idx
self.self_attn = MiniMaxM2Attention(
hidden_size=self.hidden_size,
num_heads=config.num_attention_heads,
num_kv_heads=config.num_key_value_heads,
rotary_dim=config.rotary_dim,
rope_parameters=config.rope_parameters,
max_position_embeddings=max_position_embeddings,
rms_norm_eps=config.rms_norm_eps,
qkv_bias=getattr(config, "attention_bias", False),
head_dim=getattr(config, "head_dim", None),
cache_config=cache_config,
quant_config=quant_config,
prefix=f"{prefix}.self_attn",
)
self.block_sparse_moe = MiniMaxM2MoE(
config=config,
quant_config=quant_config,
prefix=f"{prefix}.mlp",
)
self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = RMSNorm(
config.hidden_size, eps=config.rms_norm_eps
)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
residual: torch.Tensor | None,
) -> torch.Tensor:
# Self Attention
if residual is None:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
else:
hidden_states, residual = self.input_layernorm(hidden_states, residual)
hidden_states = self.self_attn(
positions=positions,
hidden_states=hidden_states,
)
# Fully Connected
hidden_states, residual = self.post_attention_layernorm(hidden_states, residual)
hidden_states = self.block_sparse_moe(hidden_states)
return hidden_states, residual
@support_torch_compile
class MiniMaxM2Model(nn.Module):
fall_back_to_pt_during_load = False
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
model_config = vllm_config.model_config
cache_config = vllm_config.cache_config
quant_config = vllm_config.quant_config
self.config = config
self.vocab_size = config.vocab_size
if get_pp_group().is_first_rank:
self.embed_tokens = VocabParallelEmbedding(
config.vocab_size,
config.hidden_size,
quant_config=None,
prefix=f"{prefix}.embed_tokens",
)
else:
self.embed_tokens = PPMissingLayer()
self.start_layer, self.end_layer, self.layers = make_layers(
config.num_hidden_layers,
lambda prefix: MiniMaxM2DecoderLayer(
config,
prefix,
model_config=model_config,
cache_config=cache_config,
quant_config=quant_config,
),
prefix=f"{prefix}.layers",
)
if get_pp_group().is_last_rank:
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
else:
self.norm = PPMissingLayer()
self.make_empty_intermediate_tensors = make_empty_intermediate_tensors_factory(
["hidden_states", "residual"], config.hidden_size
)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.embed_tokens(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor | IntermediateTensors:
if get_pp_group().is_first_rank:
if inputs_embeds is not None:
hidden_states = inputs_embeds
else:
hidden_states = self.embed_input_ids(input_ids)
residual = None
else:
assert intermediate_tensors is not None
hidden_states = intermediate_tensors["hidden_states"]
residual = intermediate_tensors["residual"]
for layer in self.layers[self.start_layer : self.end_layer]:
hidden_states, residual = layer(positions, hidden_states, residual)
if not get_pp_group().is_last_rank:
return IntermediateTensors(
{"hidden_states": hidden_states, "residual": residual}
)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states
def get_expert_mapping(self) -> list[tuple[str, str, int, str]]:
return FusedMoE.make_expert_params_mapping(
self,
ckpt_gate_proj_name="w1",
ckpt_down_proj_name="w2",
ckpt_up_proj_name="w3",
num_experts=self.config.num_local_experts,
)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
]
# Params for weights, fp8 weight scales, fp8 activation scales
# (param_name, weight_name, expert_id, shard_id)
expert_params_mapping = self.get_expert_mapping()
params_dict = dict(self.named_parameters())
loaded_params: set[str] = set()
for name, loaded_weight in weights:
if "rotary_emb.inv_freq" in name:
continue
spec_layer = get_spec_layer_idx_from_weight_name(self.config, name)
if spec_layer is not None:
continue # skip spec decode layers for main model
for param_name, weight_name, shard_id in stacked_params_mapping:
# Skip non-stacked layers and experts (experts handled below).
if weight_name not in name:
continue
# We have mlp.experts[0].gate_proj in the checkpoint.
# Since we handle the experts below in expert_params_mapping,
# we need to skip here BEFORE we update the name, otherwise
# name will be updated to mlp.experts[0].gate_up_proj, which
# will then be updated below in expert_params_mapping
# for mlp.experts[0].gate_gate_up_proj, which breaks load.
if ("mlp.experts." in name) and name not in params_dict:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
if is_pp_missing_parameter(name, self):
continue
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
break
else:
for mapping in expert_params_mapping:
param_name, weight_name, expert_id, shard_id = mapping
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
if is_pp_missing_parameter(name, self):
continue
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(
param,
loaded_weight,
name,
shard_id=shard_id,
expert_id=expert_id,
)
break
else:
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
# Remapping the name of FP8 kv-scale.
name = maybe_remap_kv_scale_name(name, params_dict)
if name is None:
continue
if is_pp_missing_parameter(name, self):
continue
param = params_dict[name]
weight_loader = getattr(
param, "weight_loader", default_weight_loader
)
weight_loader(param, loaded_weight)
loaded_params.add(name)
return loaded_params
class MiniMaxM2ForCausalLM(nn.Module, SupportsLoRA, SupportsPP):
packed_modules_mapping = {
"qkv_proj": [
"q_proj",
"k_proj",
"v_proj",
],
}
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
self.config = config
self.quant_config = quant_config
if hasattr(vllm_config.model_config, "max_model_len"):
self.config.max_model_len = vllm_config.model_config.max_model_len
self.model = MiniMaxM2Model(
vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")
)
if get_pp_group().is_last_rank:
self.lm_head = ParallelLMHead(
config.vocab_size, config.hidden_size, quant_config=None
)
else:
self.lm_head = PPMissingLayer()
self.logits_processor = LogitsProcessor(config.vocab_size)
self.make_empty_intermediate_tensors = (
self.model.make_empty_intermediate_tensors
)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.model.embed_input_ids(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
**kwargs,
) -> torch.Tensor | IntermediateTensors:
hidden_states = self.model(
input_ids, positions, intermediate_tensors, inputs_embeds
)
return hidden_states
def compute_logits(
self,
hidden_states: torch.Tensor,
) -> torch.Tensor | None:
logits = self.logits_processor(self.lm_head, hidden_states)
return logits
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
loader = AutoWeightsLoader(self)
return loader.load_weights(weights)
def get_expert_mapping(self) -> list[tuple[str, str, int, str]]:
return self.model.get_expert_mapping()
def get_spec_layer_idx_from_weight_name(
config: PretrainedConfig, weight_name: str
) -> int | None:
if hasattr(config, "num_mtp_modules") and (config.num_mtp_modules > 0):
layer_idx = config.num_hidden_layers
for i in range(config.num_mtp_modules):
if weight_name.startswith(f"model.layers.{layer_idx + i}."):
return layer_idx + i
return None
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/minimax_m2.py",
"license": "Apache License 2.0",
"lines": 494,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/reasoning/minimax_m2_reasoning_parser.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Sequence
from vllm.entrypoints.openai.chat_completion.protocol import (
ChatCompletionRequest,
)
from vllm.entrypoints.openai.engine.protocol import (
DeltaMessage,
)
from vllm.entrypoints.openai.responses.protocol import (
ResponsesRequest,
)
from vllm.logger import init_logger
from vllm.reasoning.abs_reasoning_parsers import ReasoningParser
from vllm.reasoning.basic_parsers import BaseThinkingReasoningParser
from vllm.tokenizers import TokenizerLike
logger = init_logger(__name__)
class MiniMaxM2ReasoningParser(BaseThinkingReasoningParser):
"""
Reasoning parser for MiniMax M2 model.
MiniMax M2 models don't generate <think> start token, only </think> end
token. All content before </think> is reasoning, content after is the
actual response.
"""
@property
def start_token(self) -> str:
"""The token that starts reasoning content."""
return "<think>"
@property
def end_token(self) -> str:
"""The token that ends reasoning content."""
return "</think>"
def extract_reasoning_streaming(
self,
previous_text: str,
current_text: str,
delta_text: str,
previous_token_ids: Sequence[int],
current_token_ids: Sequence[int],
delta_token_ids: Sequence[int],
) -> DeltaMessage | None:
"""
Extract reasoning content from a delta message for streaming.
MiniMax M2 models don't generate <think> start token, so we assume
all content is reasoning until we encounter the </think> end token.
"""
# Skip single end token
if len(delta_token_ids) == 1 and delta_token_ids[0] == self.end_token_id:
return None
# Check if end token has already appeared in previous tokens
# meaning we're past the reasoning phase
if self.end_token_id in previous_token_ids:
# We're past the reasoning phase, this is content
return DeltaMessage(content=delta_text)
# Check if end token is in delta tokens
if self.end_token_id in delta_token_ids:
# End token in delta, split reasoning and content
end_index = delta_text.find(self.end_token)
reasoning = delta_text[:end_index]
content = delta_text[end_index + len(self.end_token) :]
return DeltaMessage(
reasoning=reasoning if reasoning else None,
content=content if content else None,
)
# No end token yet, all content is reasoning
return DeltaMessage(reasoning=delta_text)
class MiniMaxM2AppendThinkReasoningParser(ReasoningParser):
"""
Reasoning parser for MiniMax M2 model.
"""
def __init__(self, tokenizer: TokenizerLike, *args, **kwargs):
super().__init__(tokenizer, *args, **kwargs)
self.end_token_id = self.vocab.get("</think>")
self.start_token_id = self.vocab.get("<think>")
def is_reasoning_end(self, input_ids: Sequence[int]) -> bool:
end_token_id = self.end_token_id
start_token_id = self.start_token_id
for input_id in reversed(input_ids):
if input_id in (end_token_id, start_token_id):
return input_id == end_token_id
return False
def extract_content_ids(self, input_ids: list[int]) -> list[int]:
return input_ids
def extract_reasoning_streaming(
self,
previous_text: str,
current_text: str,
delta_text: str,
previous_token_ids: Sequence[int],
current_token_ids: Sequence[int],
delta_token_ids: Sequence[int],
) -> DeltaMessage | None:
if len(previous_token_ids) == 0:
delta_text = "<think>" + delta_text
return DeltaMessage(content=delta_text)
def extract_reasoning(
self, model_output: str, request: ChatCompletionRequest | ResponsesRequest
) -> tuple[str | None, str | None]:
return None, "<think>" + model_output
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/reasoning/minimax_m2_reasoning_parser.py",
"license": "Apache License 2.0",
"lines": 99,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/utils/argparse_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Argument parsing utilities for vLLM."""
import json
import sys
import textwrap
from argparse import (
Action,
ArgumentDefaultsHelpFormatter,
ArgumentParser,
ArgumentTypeError,
Namespace,
RawDescriptionHelpFormatter,
_ArgumentGroup,
)
from collections import defaultdict
from typing import Any
import regex as re
import yaml
from vllm.logger import init_logger
logger = init_logger(__name__)
class SortedHelpFormatter(ArgumentDefaultsHelpFormatter, RawDescriptionHelpFormatter):
"""SortedHelpFormatter that sorts arguments by their option strings."""
def _split_lines(self, text, width):
"""
1. Sentences split across lines have their single newlines removed.
2. Paragraphs and explicit newlines are split into separate lines.
3. Each line is wrapped to the specified width (width of terminal).
"""
# The patterns also include whitespace after the newline
single_newline = re.compile(r"(?<!\n)\n(?!\n)\s*")
multiple_newlines = re.compile(r"\n{2,}\s*")
text = single_newline.sub(" ", text)
lines = re.split(multiple_newlines, text)
return sum([textwrap.wrap(line, width) for line in lines], [])
def add_arguments(self, actions):
actions = sorted(actions, key=lambda x: x.option_strings)
super().add_arguments(actions)
class FlexibleArgumentParser(ArgumentParser):
"""ArgumentParser that allows both underscore and dash in names."""
_deprecated: set[Action] = set()
_json_tip: str = (
"When passing JSON CLI arguments, the following sets of arguments "
"are equivalent:\n"
' --json-arg \'{"key1": "value1", "key2": {"key3": "value2"}}\'\n'
" --json-arg.key1 value1 --json-arg.key2.key3 value2\n\n"
"Additionally, list elements can be passed individually using +:\n"
' --json-arg \'{"key4": ["value3", "value4", "value5"]}\'\n'
" --json-arg.key4+ value3 --json-arg.key4+='value4,value5'\n\n"
)
_search_keyword: str | None = None
def __init__(self, *args, **kwargs):
# Set the default "formatter_class" to SortedHelpFormatter
if "formatter_class" not in kwargs:
kwargs["formatter_class"] = SortedHelpFormatter
# Pop kwarg "add_json_tip" to control whether to add the JSON tip
self.add_json_tip = kwargs.pop("add_json_tip", True)
super().__init__(*args, **kwargs)
if sys.version_info < (3, 13):
# Enable the deprecated kwarg for Python 3.12 and below
def parse_known_args(self, args=None, namespace=None):
namespace, args = super().parse_known_args(args, namespace)
for action in FlexibleArgumentParser._deprecated:
if (
hasattr(namespace, dest := action.dest)
and getattr(namespace, dest) != action.default
):
logger.warning_once("argument '%s' is deprecated", dest)
return namespace, args
def add_argument(self, *args, **kwargs):
deprecated = kwargs.pop("deprecated", False)
action = super().add_argument(*args, **kwargs)
if deprecated:
FlexibleArgumentParser._deprecated.add(action)
return action
class _FlexibleArgumentGroup(_ArgumentGroup):
def add_argument(self, *args, **kwargs):
deprecated = kwargs.pop("deprecated", False)
action = super().add_argument(*args, **kwargs)
if deprecated:
FlexibleArgumentParser._deprecated.add(action)
return action
def add_argument_group(self, *args, **kwargs):
group = self._FlexibleArgumentGroup(self, *args, **kwargs)
self._action_groups.append(group)
return group
def format_help(self):
# Only use custom help formatting for bottom level parsers
if self._subparsers is not None:
return super().format_help()
formatter = self._get_formatter()
# Handle keyword search of the args
if (search_keyword := self._search_keyword) is not None:
# Normalise the search keyword
search_keyword = search_keyword.lower().replace("_", "-")
# Return full help if searching for 'all'
if search_keyword == "all":
self.epilog = self._json_tip
return super().format_help()
# Return group help if searching for a group title
for group in self._action_groups:
if group.title and group.title.lower() == search_keyword:
formatter.start_section(group.title)
formatter.add_text(group.description)
formatter.add_arguments(group._group_actions)
formatter.end_section()
formatter.add_text(self._json_tip)
return formatter.format_help()
# Return matched args if searching for an arg name
matched_actions = []
for group in self._action_groups:
for action in group._group_actions:
# search option name
if any(
search_keyword in opt.lower() for opt in action.option_strings
):
matched_actions.append(action)
if matched_actions:
formatter.start_section(f"Arguments matching '{search_keyword}'")
formatter.add_arguments(matched_actions)
formatter.end_section()
formatter.add_text(self._json_tip)
return formatter.format_help()
# No match found
formatter.add_text(
f"No group or arguments matching '{search_keyword}'.\n"
"Use '--help' to see available groups or "
"'--help=all' to see all available parameters."
)
return formatter.format_help()
# usage
formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups)
# description
formatter.add_text(self.description)
# positionals, optionals and user-defined groups
formatter.start_section("Config Groups")
config_groups = ""
for group in self._action_groups:
if not group._group_actions:
continue
title = group.title
description = group.description or ""
config_groups += f"{title: <24}{description}\n"
formatter.add_text(config_groups)
formatter.end_section()
# epilog
formatter.add_text(self.epilog)
# determine help from format above
return formatter.format_help()
def parse_args( # type: ignore[override]
self,
args: list[str] | None = None,
namespace: Namespace | None = None,
):
if args is None:
args = sys.argv[1:]
if args and args[0] == "serve":
# Check for --model in command line arguments first
try:
model_idx = next(
i for i, arg in enumerate(args) if re.match(r"^--model(=.+|$)", arg)
)
logger.warning(
"With `vllm serve`, you should provide the model as a "
"positional argument or in a config file instead of via "
"the `--model` option. "
"The `--model` option will be removed in v0.13."
)
if args[model_idx] == "--model":
model_tag = args[model_idx + 1]
rest_start_idx = model_idx + 2
else:
model_tag = args[model_idx].removeprefix("--model=")
rest_start_idx = model_idx + 1
# Move <model> to the front, e,g:
# [Before]
# vllm serve -tp 2 --model <model> --enforce-eager --port 8001
# [After]
# vllm serve <model> -tp 2 --enforce-eager --port 8001
args = [
"serve",
model_tag,
*args[1:model_idx],
*args[rest_start_idx:],
]
except StopIteration:
pass
# Check for --served-model-name without a positional model argument
if (
len(args) > 1
and args[1].startswith("-")
and not any(re.match(r"^--config(=.+|$)", arg) for arg in args)
and any(
re.match(r"^--served[-_]model[-_]name(=.+|$)", arg) for arg in args
)
):
raise ValueError(
"`model` should be provided as the first positional argument when "
"using `vllm serve`. i.e. `vllm serve <model> --<arg> <value>`."
)
if "--config" in args:
args = self._pull_args_from_config(args)
def repl(match: re.Match) -> str:
"""Replaces underscores with dashes in the matched string."""
return match.group(0).replace("_", "-")
# Everything between the first -- and the first .
pattern = re.compile(r"(?<=--)[^\.]*")
# Convert underscores to dashes and vice versa in argument names
processed_args = list[str]()
for i, arg in enumerate(args):
if arg.startswith("--help="):
FlexibleArgumentParser._search_keyword = arg.split("=", 1)[-1].lower()
processed_args.append("--help")
elif arg.startswith("--"):
if "=" in arg:
key, value = arg.split("=", 1)
key = pattern.sub(repl, key, count=1)
processed_args.append(f"{key}={value}")
else:
key = pattern.sub(repl, arg, count=1)
processed_args.append(key)
elif arg.startswith("-O") and arg != "-O":
# allow -O flag to be used without space, e.g. -O3 or -Odecode
# also handle -O=<optimization_level> here
optimization_level = arg[3:] if arg[2] == "=" else arg[2:]
processed_args += ["--optimization-level", optimization_level]
elif (
arg == "-O"
and i + 1 < len(args)
and args[i + 1] in {"0", "1", "2", "3"}
):
# Convert -O <n> to --optimization-level <n>
processed_args.append("--optimization-level")
else:
processed_args.append(arg)
def create_nested_dict(keys: list[str], value: str) -> dict[str, Any]:
"""Creates a nested dictionary from a list of keys and a value.
For example, `keys = ["a", "b", "c"]` and `value = 1` will create:
`{"a": {"b": {"c": 1}}}`
"""
nested_dict: Any = value
for key in reversed(keys):
nested_dict = {key: nested_dict}
return nested_dict
def recursive_dict_update(
original: dict[str, Any],
update: dict[str, Any],
) -> set[str]:
"""Recursively updates a dictionary with another dictionary.
Returns a set of duplicate keys that were overwritten.
"""
duplicates = set[str]()
for k, v in update.items():
if isinstance(v, dict) and isinstance(original.get(k), dict):
nested_duplicates = recursive_dict_update(original[k], v)
duplicates |= {f"{k}.{d}" for d in nested_duplicates}
elif isinstance(v, list) and isinstance(original.get(k), list):
original[k] += v
else:
if k in original:
duplicates.add(k)
original[k] = v
return duplicates
delete = set[int]()
dict_args = defaultdict[str, dict[str, Any]](dict)
duplicates = set[str]()
# Track regular arguments (non-dict args) for duplicate detection
regular_args_seen = set[str]()
for i, processed_arg in enumerate(processed_args):
if i in delete: # skip if value from previous arg
continue
if processed_arg.startswith("--") and "." not in processed_arg:
if "=" in processed_arg:
arg_name = processed_arg.split("=", 1)[0]
else:
arg_name = processed_arg
if arg_name in regular_args_seen:
duplicates.add(arg_name)
else:
regular_args_seen.add(arg_name)
continue
if processed_arg.startswith("-") and "." in processed_arg:
if "=" in processed_arg:
processed_arg, value_str = processed_arg.split("=", 1)
if "." not in processed_arg:
# False positive, '.' was only in the value
continue
else:
value_str = processed_args[i + 1]
delete.add(i + 1)
if processed_arg.endswith("+"):
processed_arg = processed_arg[:-1]
value_str = json.dumps(list(value_str.split(",")))
key, *keys = processed_arg.split(".")
try:
value = json.loads(value_str)
except json.decoder.JSONDecodeError:
value = value_str
# Merge all values with the same key into a single dict
arg_dict = create_nested_dict(keys, value)
arg_duplicates = recursive_dict_update(dict_args[key], arg_dict)
duplicates |= {f"{key}.{d}" for d in arg_duplicates}
delete.add(i)
# Filter out the dict args we set to None
processed_args = [a for i, a in enumerate(processed_args) if i not in delete]
if duplicates:
logger.warning("Found duplicate keys %s", ", ".join(duplicates))
# Add the dict args back as if they were originally passed as JSON
for dict_arg, dict_value in dict_args.items():
processed_args.append(dict_arg)
processed_args.append(json.dumps(dict_value))
return super().parse_args(processed_args, namespace)
def check_port(self, value):
try:
value = int(value)
except ValueError:
msg = "Port must be an integer"
raise ArgumentTypeError(msg) from None
if not (1024 <= value <= 65535):
raise ArgumentTypeError("Port must be between 1024 and 65535")
return value
def _pull_args_from_config(self, args: list[str]) -> list[str]:
"""Method to pull arguments specified in the config file
into the command-line args variable.
The arguments in config file will be inserted between
the argument list.
example:
```yaml
port: 12323
tensor-parallel-size: 4
```
```python
$: vllm {serve,chat,complete} "facebook/opt-12B" \
--config config.yaml -tp 2
$: args = [
"serve,chat,complete",
"facebook/opt-12B",
'--config', 'config.yaml',
'-tp', '2'
]
$: args = [
"serve,chat,complete",
"facebook/opt-12B",
'--port', '12323',
'--tensor-parallel-size', '4',
'-tp', '2'
]
```
Please note how the config args are inserted after the sub command.
this way the order of priorities is maintained when these are args
parsed by super().
"""
assert args.count("--config") <= 1, "More than one config file specified!"
index = args.index("--config")
if index == len(args) - 1:
raise ValueError(
"No config file specified! Please check your command-line arguments."
)
file_path = args[index + 1]
config_args = self.load_config_file(file_path)
# 0th index might be the sub command {serve,chat,complete,...}
# optionally followed by model_tag (only for serve)
# followed by config args
# followed by rest of cli args.
# maintaining this order will enforce the precedence
# of cli > config > defaults
if args[0].startswith("-"):
# No sub command (e.g., api_server entry point)
args = config_args + args[0:index] + args[index + 2 :]
elif args[0] == "serve":
model_in_cli = len(args) > 1 and not args[1].startswith("-")
model_in_config = any(arg == "--model" for arg in config_args)
if not model_in_cli and not model_in_config:
raise ValueError(
"No model specified! Please specify model either "
"as a positional argument or in a config file."
)
if model_in_cli:
# Model specified as positional arg, keep CLI version
args = (
[args[0]]
+ [args[1]]
+ config_args
+ args[2:index]
+ args[index + 2 :]
)
else:
# No model in CLI, use config if available
args = [args[0]] + config_args + args[1:index] + args[index + 2 :]
else:
args = [args[0]] + config_args + args[1:index] + args[index + 2 :]
return args
def load_config_file(self, file_path: str) -> list[str]:
"""Loads a yaml file and returns the key value pairs as a
flattened list with argparse like pattern.
Supports both flat configs and nested YAML structures.
Flat config example:
```yaml
port: 12323
tensor-parallel-size: 4
```
returns:
['--port', '12323', '--tensor-parallel-size', '4']
Nested config example:
```yaml
compilation-config:
pass_config:
fuse_allreduce_rms: true
speculative-config:
model: "nvidia/gpt-oss-120b-Eagle3-v2"
num_speculative_tokens: 3
```
returns:
['--compilation-config', '{"pass_config": {"fuse_allreduce_rms": true}}',
'--speculative-config', '{"model": "nvidia/gpt-oss-120b-Eagle3-v2", ...}']
"""
extension: str = file_path.split(".")[-1]
if extension not in ("yaml", "yml"):
raise ValueError(
f"Config file must be of a yaml/yml type. {extension} supplied"
)
# Supports both flat configs and nested dicts
processed_args: list[str] = []
config: dict[str, Any] = {}
try:
with open(file_path) as config_file:
config = yaml.safe_load(config_file)
except Exception as ex:
logger.error(
"Unable to read the config file at %s. Check path correctness",
file_path,
)
raise ex
for key, value in config.items():
if isinstance(value, bool):
if value:
processed_args.append("--" + key)
elif isinstance(value, list):
if value:
processed_args.append("--" + key)
for item in value:
processed_args.append(str(item))
elif isinstance(value, dict):
# Convert nested dicts to JSON strings so they can be parsed
# by the existing JSON argument parsing machinery.
processed_args.append("--" + key)
processed_args.append(json.dumps(value))
else:
processed_args.append("--" + key)
processed_args.append(str(value))
return processed_args
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/utils/argparse_utils.py",
"license": "Apache License 2.0",
"lines": 451,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/utils/math_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Math utility functions for vLLM."""
# Approximate value of 1/ln(2), used for log/exp base conversion
# Best FP32 approximation: 1.4426950216 (hex 0x3FB8AA3B)
RCP_LN2 = 1.4426950216
def cdiv(a: int, b: int) -> int:
"""Ceiling division."""
return -(a // -b)
def next_power_of_2(n: int) -> int:
"""The next power of 2 (inclusive)"""
return 1 if n < 1 else 1 << (n - 1).bit_length()
def prev_power_of_2(n: int) -> int:
"""The previous power of 2 (inclusive)"""
return 0 if n <= 0 else 1 << (n.bit_length() - 1)
def round_up(x: int, y: int) -> int:
"""Round up x to the nearest multiple of y."""
return ((x + y - 1) // y) * y
def round_down(x: int, y: int) -> int:
"""Round down x to the nearest multiple of y."""
return (x // y) * y
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/utils/math_utils.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# Standard
import os
import threading
from typing import TYPE_CHECKING, Union
import torch
from lmcache.logging import init_logger
from lmcache.v1.config import LMCacheEngineConfig as V1Config
if TYPE_CHECKING:
from vllm.config import ModelConfig
from vllm.multimodal.inputs import PlaceholderRange
from vllm.v1.core.sched.output import NewRequestData
from vllm.v1.request import Request
logger = init_logger(__name__)
ENGINE_NAME = "vllm-instance"
# Thread-safe singleton storage
_config_instance: V1Config | None = None
_config_lock = threading.Lock()
def is_false(value: str) -> bool:
"""Check if the given string value is equivalent to 'false'."""
return value.lower() in ("false", "0", "no", "n", "off")
def lmcache_get_or_create_config() -> V1Config:
"""Get the LMCache configuration from the environment variable
`LMCACHE_CONFIG_FILE`. If the environment variable is not set, this
function will return the default configuration.
This function is thread-safe and implements singleton pattern,
ensuring the configuration is loaded only once.
"""
global _config_instance
# Double-checked locking for thread-safe singleton
if _config_instance is None:
with _config_lock:
if _config_instance is None: # Check again within lock
LMCacheEngineConfig = V1Config # type: ignore[assignment]
if "LMCACHE_CONFIG_FILE" not in os.environ:
logger.warning(
"No LMCache configuration file is set. Trying to read"
" configurations from the environment variables."
)
logger.warning(
"You can set the configuration file through "
"the environment variable: LMCACHE_CONFIG_FILE"
)
_config_instance = LMCacheEngineConfig.from_env()
else:
config_file = os.environ["LMCACHE_CONFIG_FILE"]
logger.info("Loading LMCache config file %s", config_file)
_config_instance = LMCacheEngineConfig.from_file(config_file)
# Update config from environment variables
_config_instance.update_config_from_env()
return _config_instance
def hex_hash_to_int16(s: str) -> int:
"""
Convert a hex hash string to a 16-bit integer.
"""
return int(s, 16) & 0xFFFF
def apply_mm_hashes_to_token_ids(
token_ids: torch.Tensor,
mm_hashes: list[str],
mm_positions: list["PlaceholderRange"],
) -> torch.Tensor:
"""
Overwrite token_ids in-place for multimodal placeholders using
efficient slice assignments.
"""
n = token_ids.size(0)
for hash_str, placeholder in zip(mm_hashes, mm_positions):
start, length = placeholder.offset, placeholder.length
if start >= n:
continue
end = min(start + length, n)
token_ids[start:end] = hex_hash_to_int16(hash_str)
return token_ids
def mla_enabled(model_config: "ModelConfig") -> bool:
return (
hasattr(model_config, "use_mla")
and isinstance(model_config.use_mla, bool)
and model_config.use_mla
)
def create_lmcache_metadata(
vllm_config=None, model_config=None, parallel_config=None, cache_config=None
):
"""
Create LMCacheEngineMetadata from vLLM configuration.
This function extracts common metadata creation logic that was duplicated
across multiple files.
Args:
vllm_config (VllmConfig): vLLM configuration object containing model,
parallel, and cache configs (alternative to
individual config parameters)
model_config (ModelConfig): Model configuration (alternative to
vllm_config)
parallel_config (ParallelConfig): Parallel configuration (alternative
to vllm_config)
cache_config (CacheConfig): Cache configuration (alternative to
vllm_config)
"""
# Third Party
# First Party
from lmcache.config import LMCacheEngineMetadata
from vllm.utils.torch_utils import get_kv_cache_torch_dtype
config = lmcache_get_or_create_config()
# Support both vllm_config object and individual config parameters
if vllm_config is not None:
model_cfg = vllm_config.model_config
parallel_cfg = vllm_config.parallel_config
cache_cfg = vllm_config.cache_config
else:
if model_config is None or parallel_config is None or cache_config is None:
raise ValueError(
"Either vllm_config must be provided, or all of "
"model_config, parallel_config, and cache_config must be provided."
)
model_cfg = model_config
parallel_cfg = parallel_config
cache_cfg = cache_config
# Get KV cache dtype
kv_dtype = get_kv_cache_torch_dtype(cache_cfg.cache_dtype, model_cfg.dtype)
# Check if MLA is enabled
use_mla = mla_enabled(model_cfg)
# Construct KV shape (for memory pool)
num_layer = model_cfg.get_num_layers(parallel_cfg)
chunk_size = config.chunk_size
num_kv_head = model_cfg.get_num_kv_heads(parallel_cfg)
head_size = model_cfg.get_head_size()
kv_shape = (num_layer, 1 if use_mla else 2, chunk_size, num_kv_head, head_size)
# Create metadata
metadata = LMCacheEngineMetadata(
model_cfg.model,
parallel_cfg.world_size,
parallel_cfg.rank,
"vllm",
kv_dtype,
kv_shape,
use_mla,
)
return metadata, config
def extract_mm_features(
request: Union["Request", "NewRequestData"], modify: bool = False
) -> tuple[list[str], list["PlaceholderRange"]]:
"""
Normalize multimodal information from a Request into parallel lists.
This helper reads either:
1) `request.mm_features` (objects each exposing `.identifier` and
`.mm_position`), or
2) legacy fields `request.mm_hashes` and `request.mm_positions`.
It returns two equally sized lists: the multimodal hash identifiers and
their corresponding positions. If the request contains no multimodal info,
it returns `([], [])`.
Args:
request (Request): The source object.
modify (bool):
Controls copy semantics for the legacy-path return values.
- If True and legacy fields are used, shallow-copies are returned so
the caller can mutate the lists without affecting `request`.
- If False, the original legacy sequences are returned as-is
(zero-copy); treat them as read-only.
Returns:
tuple[list[str], list[PlaceholderRange]]: (`mm_hashes`, `mm_positions`).
May be `([], [])` when no multimodal data is present.
"""
if getattr(request, "mm_features", None):
mm_hashes, mm_positions = zip(
*((f.identifier, f.mm_position) for f in request.mm_features)
)
return (list(mm_hashes), list(mm_positions))
elif getattr(request, "mm_hashes", None):
if modify:
return (
request.mm_hashes.copy(), # type: ignore
request.mm_positions.copy(), # type: ignore
)
else:
return (request.mm_hashes, request.mm_positions) # type: ignore
else:
return ([], [])
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/utils.py",
"license": "Apache License 2.0",
"lines": 177,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/vllm_v1_adapter.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# Standard
import os
import uuid
from collections.abc import Generator
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any
import torch
from lmcache import utils
from lmcache.config import LMCacheEngineMetadata
from lmcache.logging import init_logger
from lmcache.observability import LMCStatsMonitor
from lmcache.utils import _lmcache_nvtx_annotate
from lmcache.v1.cache_engine import LMCacheEngine, LMCacheEngineBuilder
from lmcache.v1.compute.blend import LMCBlenderBuilder
from lmcache.v1.config import LMCacheEngineConfig, _validate_and_set_config_value
from lmcache.v1.gpu_connector import (
VLLMBufferLayerwiseGPUConnector,
VLLMPagedMemGPUConnectorV2,
VLLMPagedMemLayerwiseGPUConnector,
)
from lmcache.v1.internal_api_server.api_server import InternalAPIServer
from lmcache.v1.lookup_client import LookupClientFactory
from lmcache.v1.lookup_client.lmcache_async_lookup_client import (
LMCacheAsyncLookupServer,
)
from lmcache.v1.offload_server.zmq_server import ZMQOffloadServer
try:
from lmcache.v1.plugin.runtime_plugin_launcher import RuntimePluginLauncher
except ImportError:
# Backwards compatibility for lmcache <= 0.3.10-post1
from lmcache.v1.plugin.plugin_launcher import (
PluginLauncher as RuntimePluginLauncher,
)
from vllm.config import VllmConfig
from vllm.distributed.kv_transfer.kv_connector.v1.base import (
KVConnectorBase_V1,
KVConnectorMetadata,
KVConnectorRole,
)
from vllm.distributed.kv_transfer.kv_connector.v1.lmcache_integration.utils import (
ENGINE_NAME,
apply_mm_hashes_to_token_ids,
extract_mm_features,
lmcache_get_or_create_config,
mla_enabled,
)
from vllm.distributed.parallel_state import get_tensor_model_parallel_rank, get_tp_group
from vllm.sampling_params import SamplingParams
from vllm.utils.math_utils import cdiv
from vllm.utils.torch_utils import get_kv_cache_torch_dtype
from vllm.v1.attention.backend import AttentionMetadata
from vllm.v1.core.sched.output import SchedulerOutput
from vllm.version import __version__ as VLLM_VERSION
if TYPE_CHECKING:
from vllm.forward_context import ForwardContext
from vllm.multimodal.inputs import PlaceholderRange
from vllm.v1.core.kv_cache_manager import KVCacheManager
from vllm.v1.core.sched.output import NewRequestData
from vllm.v1.request import Request
logger = init_logger(__name__)
@dataclass
class LoadSpec:
# Number of tokens cached in vLLM
vllm_cached_tokens: int
# Number of tokens that are cached in LMCache
lmcache_cached_tokens: int
# Whether the scheduler allow us to load the tokens
can_load: bool
@dataclass
class SaveSpec:
# Skip already saved tokens
skip_leading_tokens: int
# Whether the scheduler allow us to save the tokens
can_save: bool
@dataclass
class DisaggSpec:
req_id: str
receiver_id: str
receiver_host: str
receiver_init_port: int
receiver_alloc_port: int
is_last_prefill: bool = False
num_transferred_tokens: int = 0
tmp_disagg_tracker: dict[str, DisaggSpec] = {}
def extract_request_configs(sampling_params: SamplingParams) -> dict | None:
request_configs = None
if (
sampling_params.extra_args is not None
and "kv_transfer_params" in sampling_params.extra_args
):
kv_transfer_params = sampling_params.extra_args.get("kv_transfer_params")
if kv_transfer_params is None:
return None
assert isinstance(kv_transfer_params, dict)
for k, v in kv_transfer_params.items():
if k.startswith("lmcache."):
if request_configs is None:
request_configs = {}
request_configs[k] = v
return request_configs
@dataclass
class RequestTracker:
# Request id
req_id: str
# Total prompt token length
prompt_len: int
# The token ids that has been scheduled so far
token_ids: list[int]
# The block ids that has been allocated so far
# NOTE: allocated blocks could be more than the number of tokens
allocated_block_ids: list[int]
# The number of tokens that has been saved
num_saved_tokens: int = 0
# Disagg spec for the request
disagg_spec: DisaggSpec | None = None
# Multimodal hashes and positions
mm_hashes: list[str] | None = None
mm_positions: list["PlaceholderRange"] | None = None
# The configs of the request, includes tags and other configs
request_configs: dict | None = None
# Whether the request is in decode phase
is_decode_phase = False
# Whether the request cache should be saved
skip_save: bool = False
@_lmcache_nvtx_annotate
@staticmethod
def from_new_request(
lmcache_config: LMCacheEngineConfig,
new_request: "NewRequestData",
num_tokens_to_compute: int,
lmcache_cached_tokens: int,
skip_save: bool,
) -> "RequestTracker":
"""Create the request tracker from a new request.
Args:
lmcache_config (LMCacheEngineConfig): the LMCache engine config.
new_request (NewRequestData): the new request data.
num_tokens_to_compute (int): the number of tokens that will
be 'computed', including the `num_computed_tokens` (vLLM's
local cache hit) and new tokens that will be scheduled.
lmcache_cached_tokens (int): the number of tokens that are
cached in LMCache.
skip_save (bool): whether the request cache should be saved
"""
# vLLM 0.9.0 update: request.block_ids changed from list[int] to
# list[list[int]]
# Need to check the type of request.block_ids
unfolded_block_ids = []
if not isinstance(new_request.block_ids[0], list):
unfolded_block_ids = new_request.block_ids.copy()
else:
# According to the vLLM code
# (https://github.com/vllm-project/vllm/blob/main/vllm/v1/core/
# sched/scheduler.py#L943),
# only one KVCacheGroup is supported in connector for now.
unfolded_block_ids = new_request.block_ids[0].copy()
# NOTE: Initialized in `update_state_after_alloc`
disagg_spec = tmp_disagg_tracker.pop(new_request.req_id, None)
if new_request.sampling_params:
request_configs = extract_request_configs(new_request.sampling_params)
else:
request_configs = None
mm_hashes, mm_positions = extract_mm_features(new_request, modify=True)
assert new_request.prompt_token_ids is not None
return RequestTracker(
req_id=new_request.req_id,
prompt_len=len(new_request.prompt_token_ids),
token_ids=new_request.prompt_token_ids[:num_tokens_to_compute].copy(),
allocated_block_ids=unfolded_block_ids,
num_saved_tokens=lmcache_cached_tokens,
disagg_spec=disagg_spec,
mm_hashes=mm_hashes,
mm_positions=mm_positions,
skip_save=skip_save,
request_configs=request_configs,
)
def update(
self,
new_token_ids: list[int],
new_block_ids: tuple[list[int], ...] | None | list[int],
) -> None:
"""Update the request tracker when a running request is
scheduled again
"""
self.token_ids.extend(new_token_ids)
if new_block_ids is None:
# https://github.com/vllm-project/vllm/commit/
# b029de9902aa3ac58806c8c17776c7074175b6db
new_block_ids = []
elif len(new_block_ids) == 0:
new_block_ids = []
elif isinstance(new_block_ids, tuple):
new_block_ids = new_block_ids[0]
elif isinstance(new_block_ids, list):
pass
else:
raise ValueError(
f"Unsupported new_block_ids type {type(new_block_ids)}: "
f"should be None[list[int], ...], tuple or list[int]."
)
self.allocated_block_ids.extend(new_block_ids)
# When a request is scheduled again, and the number of new tokens
# is 1 (excluding chunked prefill), the request is in decode phase.
if len(new_token_ids) == 1:
self.is_decode_phase = True
@dataclass
class ReqMeta:
# Request id
req_id: str
# Request tokens
token_ids: list[int] # torch.Tensor
# Slot mapping
slot_mapping: torch.Tensor
# Whether is last prefill or not
is_last_prefill: bool = False
# Skip save or not
save_spec: SaveSpec | None = None
# load_spec
load_spec: LoadSpec | None = None
# disagg spec
disagg_spec: DisaggSpec | None = None
# the configs of the request
request_configs: dict | None = None
@staticmethod
def from_request_tracker(
tracker: RequestTracker,
block_size: int,
lmcache_chunk_size: int = 256,
load_spec: LoadSpec | None = None,
discard_partial_chunks: bool = True,
save_decode_cache: bool = False,
) -> "ReqMeta | None":
"""Create the request metadata from a request tracker.
Args:
tracker (RequestTracker): the request tracker.
block_size (int): the block size in vLLM.
lmcache_chunk_size (int): the chunk size for LMCache.
load_spec (Optional[LoadSpec]): the load spec for KV cache loading.
discard_partial_chunks (bool): whether to discard partial chunks.
save_decode_cache (bool): whether to save the cache in decode phase.
Returns:
the request metadata if we need to perform load/save
operations, None otherwise.
"""
input_token_ids = tracker.token_ids
input_token_len = len(input_token_ids)
is_last_prefill = False
if input_token_len == tracker.prompt_len:
is_last_prefill = True
# For save operation: do not save if the following condition is met
# 1. has already been saved before (num_saved_tokens > 0)
# 2. number of unsaved tokens is not reached the chunk boundary
# 3. if save_decode_cache is False and it is in decode phase
skip_leading_tokens = tracker.num_saved_tokens
chunk_boundary = (
cdiv(tracker.num_saved_tokens + 1, lmcache_chunk_size) * lmcache_chunk_size
)
# NOTE(vladnosiv): for disagg, you cannot skip saving, as saving is a
# trqansfer. Check if request_configs has lmcache.skip_save set to True
request_skip = (tracker.request_configs or {}).get("lmcache.skip_save", False)
skip_save = tracker.disagg_spec is None and (
tracker.skip_save
or (tracker.num_saved_tokens > 0 and input_token_len < chunk_boundary)
or (tracker.is_decode_phase and not save_decode_cache)
or request_skip
)
if skip_save and load_spec is None:
return None
# Calculate number of tokens to save based on discard_partial_chunks
# setting
# NOTE(vladnosiv): for the input_token_len chunk prefill,
# we are required to discard partial chunks,
# as new tokens will be added in the next iteration.
num_tokens_to_save = (
(input_token_len // lmcache_chunk_size * lmcache_chunk_size)
if not is_last_prefill or discard_partial_chunks
else input_token_len
)
# If we need to save, update the number of saved tokens
if not skip_save:
tracker.num_saved_tokens = num_tokens_to_save
save_spec = SaveSpec(skip_leading_tokens, not skip_save)
# Calculate the token ids and slot mappings for load and save
token_ids = input_token_ids[:num_tokens_to_save]
# If the request has multimodal hashes, apply them to the token ids
if tracker.mm_hashes:
token_ids_tensor = torch.tensor(token_ids)
assert tracker.mm_positions is not None, (
"tracker got mm_hashes but no mm_positions"
)
apply_mm_hashes_to_token_ids(
token_ids_tensor, tracker.mm_hashes, tracker.mm_positions
)
token_ids = token_ids_tensor.tolist()
num_blocks = len(tracker.allocated_block_ids)
if len(token_ids) > num_blocks * block_size:
logger.error(
"The number of tokens is more than the number of blocks."
"Something might be wrong in scheduling logic!"
)
logger.error(
"Num tokens: %d, num blocks: %d, block size: %d",
len(token_ids),
num_blocks,
block_size,
)
block_ids = torch.tensor(tracker.allocated_block_ids, dtype=torch.long)
block_offsets = torch.arange(0, block_size, dtype=torch.long)
slot_mapping = (
block_offsets.reshape((1, block_size))
+ block_ids.reshape((num_blocks, 1)) * block_size
)
slot_mapping = slot_mapping.flatten()[: len(token_ids)]
assert slot_mapping.dtype == torch.long
# For load operation: check whether the request is scheduled to load
if load_spec is not None and load_spec.can_load:
logger.debug(
"Scheduled to load %d tokens for request %s",
load_spec.lmcache_cached_tokens,
tracker.req_id,
)
else:
# Do not load if not in `can_load` state
load_spec = None
return ReqMeta(
req_id=tracker.req_id,
token_ids=token_ids,
slot_mapping=slot_mapping,
is_last_prefill=is_last_prefill,
save_spec=save_spec,
load_spec=load_spec,
disagg_spec=tracker.disagg_spec,
request_configs=tracker.request_configs,
)
def need_gpu_interm_buffer(lmcache_config: LMCacheEngineConfig):
return not lmcache_config.enable_pd
def _calculate_mtp_layers(vllm_config, model_config):
num_mtp_layers = 0
if vllm_config is not None and vllm_config.speculative_config is not None:
logger.info(
"vllm_config.speculative_config: %s", vllm_config.speculative_config
)
# TODO(baoloongmao): Support other MTP methods
if vllm_config.speculative_config.method == "deepseek_mtp":
num_mtp_layers = getattr(
model_config.hf_config, "num_nextn_predict_layers", 0
)
elif vllm_config.speculative_config.use_eagle():
try:
draft_model_config = vllm_config.speculative_config.draft_model_config
num_mtp_layers = draft_model_config.get_num_layers(
vllm_config.parallel_config
)
logger.info("EAGLE detected %d extra layer(s)", num_mtp_layers)
except Exception:
logger.info(
"EAGLE detected, but failed to get the number of extra layers"
"falling back to 1"
)
num_mtp_layers = 1
return num_mtp_layers
def _init_lmcache_engine(
lmcache_config: LMCacheEngineConfig,
vllm_config: "VllmConfig",
) -> LMCacheEngine:
"""Initialize the LMCache engine by the given model config and parallel
config. This function will check the environment variable
`LMCACHE_CONFIG_FILE` to load the configuration file. If that environment
variable is not set, this function will return None.
:param lmcache_config: The LMCache configuration.
:type lmcache_config: LMCacheEngineConfig
:param vllm_config: The vLLM configuration.
:type vllm_config: VllmConfig
:return: The initialized LMCache engine
:rtype: LMCacheEngine
"""
if curr_engine := LMCacheEngineBuilder.get(ENGINE_NAME):
return curr_engine
model_config = vllm_config.model_config
parallel_config = vllm_config.parallel_config
cache_config = vllm_config.cache_config
assert isinstance(lmcache_config, LMCacheEngineConfig), (
"LMCache v1 configuration is should be passed."
)
kv_dtype = get_kv_cache_torch_dtype(cache_config.cache_dtype, model_config.dtype)
use_mla = mla_enabled(model_config)
if use_mla and (
lmcache_config.remote_serde != "naive"
and lmcache_config.remote_serde is not None
):
raise ValueError("MLA only works with naive serde mode..")
# construct kv shape (for mem pool)
num_layer = model_config.get_num_layers(parallel_config)
num_mtp_layers = _calculate_mtp_layers(vllm_config, model_config)
num_layer += num_mtp_layers
chunk_size = lmcache_config.chunk_size
num_kv_head = model_config.get_num_kv_heads(parallel_config)
head_size = model_config.get_head_size()
kv_shape = (num_layer, 1 if use_mla else 2, chunk_size, num_kv_head, head_size)
logger.info(
"use mla: %s, kv shape: %s, num_mtp_layers: %s",
use_mla,
kv_shape,
num_mtp_layers,
)
# Change current device.
num_gpus = torch.cuda.device_count()
local_rank = parallel_config.rank % num_gpus
torch.cuda.set_device(local_rank)
device = torch.device(f"cuda:{local_rank}")
metadata = LMCacheEngineMetadata(
model_config.model,
parallel_config.world_size,
parallel_config.rank,
"vllm",
kv_dtype,
kv_shape,
use_mla,
)
use_gpu = need_gpu_interm_buffer(lmcache_config)
vllm_gpu_connector: (
VLLMBufferLayerwiseGPUConnector
| VLLMPagedMemGPUConnectorV2
| VLLMPagedMemLayerwiseGPUConnector
)
if use_mla and lmcache_config.use_layerwise:
raise ValueError("layerwise MLA connector is not supported yet")
# When use_mla is True, num_kv_head is 1
hidden_dim_size = num_kv_head * head_size
if lmcache_config.use_layerwise:
if lmcache_config.enable_blending:
# Use layerwise connector for blending
vllm_gpu_connector = VLLMBufferLayerwiseGPUConnector(
hidden_dim_size,
num_layer,
use_gpu=use_gpu,
chunk_size=chunk_size,
dtype=kv_dtype,
device=device,
)
else:
vllm_gpu_connector = VLLMPagedMemLayerwiseGPUConnector(
hidden_dim_size,
num_layer,
use_gpu=use_gpu,
chunk_size=chunk_size,
dtype=kv_dtype,
device=device,
)
else:
vllm_gpu_connector = VLLMPagedMemGPUConnectorV2(
hidden_dim_size,
num_layer,
use_gpu=use_gpu,
chunk_size=chunk_size,
dtype=kv_dtype,
device=device,
use_mla=use_mla,
)
tpg = get_tp_group()
engine = LMCacheEngineBuilder.get_or_create(
ENGINE_NAME,
lmcache_config,
metadata,
vllm_gpu_connector,
tpg.broadcast,
tpg.broadcast_object,
)
return engine
@dataclass
class LMCacheConnectorMetadata(KVConnectorMetadata):
requests: list[ReqMeta] = field(default_factory=list)
lookup_requests_in_step: list[str] = field(default_factory=list)
@_lmcache_nvtx_annotate
def add_request(self, req_meta: ReqMeta) -> None:
"""Add a request to the metadata.
Args:
req_meta (ReqMeta): the request metadata.
"""
self.requests.append(req_meta)
class LMCacheConnectorV1Impl:
def __init__(
self,
vllm_config: "VllmConfig",
role: KVConnectorRole,
parent: KVConnectorBase_V1,
):
assert vllm_config.kv_transfer_config is not None
self._parent = parent
self._vllm_config = vllm_config
self.kv_role = vllm_config.kv_transfer_config.kv_role
self.worker_count = vllm_config.parallel_config.tensor_parallel_size
config = lmcache_get_or_create_config()
assert isinstance(config, LMCacheEngineConfig), (
"LMCache v1 configuration is should be passed for vLLM v1."
)
# Put the leading with "lmcache." and matched configs from
# vllm extra_config to the config
kv_connector_extra_config = (
vllm_config.kv_transfer_config.kv_connector_extra_config
)
if kv_connector_extra_config:
for key, value in kv_connector_extra_config.items():
if key.startswith("lmcache."):
config_key = key[8:] # Remove "lmcache." prefix
if _validate_and_set_config_value(config, config_key, value):
logger.info(
"Updated config %s from vLLM extra config: %s",
config_key,
value,
)
self.config = config
self.async_loading = config.enable_async_loading
self.layerwise_retrievers: list[Generator[torch.Tensor | None, None, None]] = []
self._stats_monitor = LMCStatsMonitor.GetOrCreate()
if role == KVConnectorRole.SCHEDULER:
# Create lookup client using factory
self.lookup_client = LookupClientFactory.create_lookup_client(
vllm_config, config
)
self._unfinished_requests: dict[str, Request] = {}
self._lookup_requests_in_step: list[str] = []
self.lmcache_engine = None
else:
self.lmcache_engine = _init_lmcache_engine(
config,
vllm_config,
)
self.use_layerwise = config.use_layerwise
self.enable_blending = config.enable_blending
if self.enable_blending:
self.blender = LMCBlenderBuilder.get_or_create(
ENGINE_NAME,
self.lmcache_engine,
self.lmcache_engine.gpu_connector,
config,
)
# Create lookup server using factory
assert self.lmcache_engine is not None
self.lookup_server = LookupClientFactory.create_lookup_server(
self.lmcache_engine, vllm_config
)
self.offload_server = ZMQOffloadServer(
self.lmcache_engine,
vllm_config,
get_tensor_model_parallel_rank(),
)
# In case of MLA, the lookup server is only created on worker 0
if self.async_loading and self.lookup_server is not None:
assert isinstance(self.lookup_server, LMCacheAsyncLookupServer)
self.lmcache_engine.post_init(async_lookup_server=self.lookup_server)
self.kv_caches: dict[str, torch.Tensor] = {}
self._block_size = vllm_config.cache_config.block_size
# request_id -> (vllm cached tokens, lmcache cached tokens)
self.load_specs: dict[str, LoadSpec] = {}
self.kv_cache_manager: KVCacheManager | None = None
# request_id -> full_token_ids
self._request_trackers: dict[str, RequestTracker] = {}
# Whether to discard partial chunks
self._discard_partial_chunks = (
vllm_config.kv_transfer_config.get_from_extra_config(
"discard_partial_chunks", False
)
or not config.save_unfull_chunk
)
self._lmcache_chunk_size = config.chunk_size
self._save_decode_cache = config.save_decode_cache
self.skip_last_n_tokens = vllm_config.kv_transfer_config.get_from_extra_config(
"skip_last_n_tokens", 0
)
self.num_layers = vllm_config.model_config.get_num_layers(
vllm_config.parallel_config
)
self.current_layer = 0
self.force_skip_save = bool(os.environ.get("LMCACHE_FORCE_SKIP_SAVE", False))
self._requests_priority: dict[str, int] = {}
# TODO(baoloongmao): Internal api server & plugin framework support
# dp > 1
if (
vllm_config.parallel_config.data_parallel_size_local == 1
or vllm_config.parallel_config.data_parallel_rank_local == 0
):
# Start internal API server if enabled
# The enabled check is in the InternalAPIServer constructor
self.api_server = InternalAPIServer(self)
self.api_server.start()
# Launch plugins
self.plugin_launcher = RuntimePluginLauncher(
self.config,
role,
self.worker_count,
-1
if self.lmcache_engine is None # scheduler side
else self.lmcache_engine.metadata.worker_id,
)
self.plugin_launcher.launch_plugins()
else:
self.api_server = None # type: ignore[assignment]
self.plugin_launcher = None # type: ignore[assignment]
logger.info(
"LMCache initialized for role %s with version %s, "
"vllm version %s, lmcache cache_engine metadata: %s",
role,
utils.get_version(),
VLLM_VERSION,
getattr(self.lmcache_engine, "metadata", None),
)
def get_inference_info(self) -> dict:
"""Get inference information including vLLM config and related details.
Returns:
dict: Dictionary containing inference information
"""
# Get vLLM config information
vllm_config = self._vllm_config
# Use vLLM config's string representation and add specific configs
inference_info = {
"vllm_version": VLLM_VERSION,
"lmcache_version": utils.get_version(),
"vllm_config": str(vllm_config),
"model_config": {
"model": getattr(vllm_config.model_config, "model", None),
"dtype": str(getattr(vllm_config.model_config, "dtype", None)),
"max_model_len": getattr(
vllm_config.model_config, "max_model_len", None
),
"vocab_size": vllm_config.model_config.get_vocab_size(),
"num_layers": getattr(
vllm_config.model_config, "get_num_layers", lambda _: None
)(vllm_config.parallel_config),
"num_attention_heads": getattr(
vllm_config.model_config, "get_num_attention_heads", lambda _: None
)(vllm_config.parallel_config),
"num_kv_heads": getattr(
vllm_config.model_config, "get_num_kv_heads", lambda _: None
)(vllm_config.parallel_config),
"head_size": getattr(
vllm_config.model_config, "get_head_size", lambda: None
)(),
},
"cache_config": {
"block_size": getattr(vllm_config.cache_config, "block_size", None),
"cache_dtype": str(
getattr(vllm_config.cache_config, "cache_dtype", None)
),
"gpu_memory_utilization": getattr(
vllm_config.cache_config, "gpu_memory_utilization", None
),
},
}
return inference_info
def get_inference_version(self) -> str:
"""Get vLLM version information.
Returns:
str: vLLM version string
"""
return VLLM_VERSION
@_lmcache_nvtx_annotate
def _init_kv_caches_from_forward_context(self, forward_context: "ForwardContext"):
for layer_name in forward_context.no_compile_layers:
attn_layer = forward_context.no_compile_layers[layer_name]
if not hasattr(attn_layer, "kv_cache"):
logger.debug("The layer %s does not have kv_cache, skip it", layer_name)
continue
if layer_name not in self.kv_caches:
self.kv_caches[layer_name] = attn_layer.kv_cache[
forward_context.virtual_engine
]
####################
# Worker side APIs
####################
@_lmcache_nvtx_annotate
def register_kv_caches(self, kv_caches: dict[str, torch.Tensor]):
logger.info("Registering KV caches")
# TODO(chunxiaozheng): `_init_kv_caches_from_forward_context` is
# not called, we should consider removing it.
assert len(self.kv_caches) == 0 and len(kv_caches) > 0
self.kv_caches = kv_caches
if self.lmcache_engine is not None:
kvcaches = list(self.kv_caches.values())
self.lmcache_engine.post_init(kvcaches=kvcaches)
@_lmcache_nvtx_annotate
def start_load_kv(self, forward_context: "ForwardContext", **kwargs) -> None:
"""Start loading the KV cache from the connector buffer to vLLM's
paged KV buffer.
Args:
forward_context (ForwardContext): the forward context.
Note:
The number of elements in kv_caches and layer_names should be
the same.
"""
self.current_layer = 0
if len(self.kv_caches) == 0:
self._init_kv_caches_from_forward_context(forward_context)
metadata = self._parent._get_connector_metadata()
assert isinstance(metadata, LMCacheConnectorMetadata)
assert len(self.kv_caches) > 0
kvcaches = list(self.kv_caches.values())
attn_metadata = forward_context.attn_metadata
if attn_metadata is None:
logger.debug("In connector.start_load_kv, but the attn_metadata is None")
return
assert self.lmcache_engine is not None
self.lmcache_engine.post_init(kvcaches=kvcaches)
self.layerwise_retrievers = []
for idx, request in enumerate(metadata.requests):
if request.load_spec is None:
continue
last_idx = idx
for idx, request in enumerate(metadata.requests):
if request.load_spec is None:
continue
tokens = request.token_ids
# TODO: have a pre-allocated buffer to hold the slot_mappings
slot_mapping = request.slot_mapping.cuda()
assert len(tokens) == len(slot_mapping)
self._stats_monitor.update_interval_vllm_hit_tokens(
request.load_spec.vllm_cached_tokens
)
token_mask = torch.ones(len(tokens), dtype=torch.bool)
masked_token_count = (
request.load_spec.vllm_cached_tokens
// self._lmcache_chunk_size
* self._lmcache_chunk_size
)
token_mask[:masked_token_count] = False
lmcache_cached_tokens = request.load_spec.lmcache_cached_tokens
if self.use_layerwise:
sync = idx == last_idx
# NOTE(Jiayi): Perform blending before layerwise prefix caching
if self.enable_blending:
# TODO(Jiayi): Need to make prefix caching and blending
# compatible
self.blender.blend(
tokens[:lmcache_cached_tokens],
token_mask[:lmcache_cached_tokens],
kvcaches=kvcaches,
slot_mapping=slot_mapping[:lmcache_cached_tokens],
)
else:
layerwise_retriever = self.lmcache_engine.retrieve_layer(
tokens[:lmcache_cached_tokens],
token_mask[:lmcache_cached_tokens],
kvcaches=kvcaches,
slot_mapping=slot_mapping[:lmcache_cached_tokens],
sync=sync,
)
# NOTE: retrieve for two layers at the first layer
next(layerwise_retriever)
next(layerwise_retriever)
self.layerwise_retrievers.append(layerwise_retriever)
else:
ret_token_mask = self.lmcache_engine.retrieve(
tokens[:lmcache_cached_tokens],
token_mask[:lmcache_cached_tokens],
kvcaches=kvcaches,
slot_mapping=slot_mapping[:lmcache_cached_tokens],
request_configs=request.request_configs,
req_id=request.req_id,
)
# Check the result
num_retrieved_tokens = ret_token_mask.sum().item()
num_expected_tokens = (
lmcache_cached_tokens - request.load_spec.vllm_cached_tokens
)
if num_retrieved_tokens < num_expected_tokens:
logger.error(
"The number of retrieved tokens is less than the "
"expected number of tokens! This should not happen!"
)
logger.error(
"Num retrieved tokens: %d, num expected tokens: %d",
num_retrieved_tokens,
num_expected_tokens,
)
@_lmcache_nvtx_annotate
def wait_for_layer_load(self, layer_name: str) -> None:
"""Blocking until the KV for a specific layer is loaded into vLLM's
paged buffer.
This interface will be useful for layer-by-layer pipelining.
Args:
layer_name: the name of that layer
"""
if self.layerwise_retrievers:
logger.debug("Waiting for layer %s to be loaded", self.current_layer)
# Wait for the layer to be loaded
for layerwise_retriever in self.layerwise_retrievers:
ret_token_mask = next(layerwise_retriever)
if self.current_layer == self.num_layers - 1:
assert ret_token_mask is not None
num_retrieved_tokens = ret_token_mask.sum().item()
logger.info("Retrieved %s tokens", num_retrieved_tokens)
return
@_lmcache_nvtx_annotate
def save_kv_layer(
self,
layer_name: str,
kv_layer: torch.Tensor,
attn_metadata: AttentionMetadata,
**kwargs,
) -> None:
"""Start saving the a layer of KV cache from vLLM's paged buffer
to the connector.
Args:
layer_name (str): the name of the layer.
kv_layer (torch.Tensor): the paged KV buffer of the current
layer in vLLM.
attn_metadata (AttentionMetadata): the attention metadata.
"""
assert self.lmcache_engine is not None
if not self.use_layerwise:
return
if self.kv_role == "kv_consumer":
# Don't do save if the role is kv_consumer
return
if self._parent._connector_metadata is None:
logger.warning(
"In connector.save_kv_layer, but the connector metadata is None"
)
return
connector_metadata = self._parent._get_connector_metadata()
assert isinstance(connector_metadata, LMCacheConnectorMetadata)
assert len(self.kv_caches) > 0
kvcaches = list(self.kv_caches.values())
if self.current_layer == 0:
self.layerwise_storers = []
is_first = True
for idx, request in enumerate(connector_metadata.requests):
save_spec = request.save_spec
if save_spec is None or not save_spec.can_save:
continue
token_ids = request.token_ids
assert isinstance(token_ids, list)
slot_mapping = request.slot_mapping
assert isinstance(slot_mapping, torch.Tensor)
assert len(slot_mapping) == len(token_ids)
# TODO: have a pre-allocated buffer to hold the slot_mappings
slot_mapping = slot_mapping.cuda()
if self.kv_role == "kv_producer":
skip_leading_tokens = 0
else:
skip_leading_tokens = save_spec.skip_leading_tokens
if skip_leading_tokens == len(token_ids):
continue # skip this request
# Align to lmcache chunk size
skip_leading_tokens = (
skip_leading_tokens
// self._lmcache_chunk_size
* self._lmcache_chunk_size
)
store_mask = torch.ones(len(token_ids), dtype=torch.bool)
store_mask[:skip_leading_tokens] = False
logger.info(
"Storing KV cache for %d out of %d tokens "
"(skip_leading_tokens=%d) for request %s",
len(token_ids) - skip_leading_tokens,
len(token_ids),
skip_leading_tokens,
request.req_id,
)
# TODO (Jiayi): need to make layerwise storing
# compatible with disagg spec
layerwise_storer = self.lmcache_engine.store_layer(
token_ids,
mask=store_mask,
kvcaches=kvcaches,
slot_mapping=slot_mapping,
offset=skip_leading_tokens,
sync=is_first,
)
self.layerwise_storers.append(layerwise_storer)
if is_first:
is_first = False
for layerwise_storer in self.layerwise_storers:
next(layerwise_storer)
self.current_layer += 1
@_lmcache_nvtx_annotate
def wait_for_save(self):
"""Blocking until the KV cache is saved to the connector buffer."""
connector_metadata = self._parent._get_connector_metadata()
assert isinstance(connector_metadata, LMCacheConnectorMetadata)
self.lmcache_engine.lookup_unpin( # type: ignore
connector_metadata.lookup_requests_in_step
)
if self.kv_role == "kv_consumer":
# Don't do save if the role is kv_consumer
return
if self.use_layerwise:
for layerwise_storer in self.layerwise_storers:
next(layerwise_storer)
return
assert len(self.kv_caches) > 0
kvcaches = list(self.kv_caches.values())
assert self.lmcache_engine is not None
for request in connector_metadata.requests:
save_spec = request.save_spec
if (
save_spec is None or not save_spec.can_save
) and self.kv_role != "kv_producer":
continue
token_ids = request.token_ids
slot_mapping = request.slot_mapping
assert isinstance(slot_mapping, torch.Tensor)
assert len(slot_mapping) == len(token_ids)
assert save_spec is not None
# TODO: have a pre-allocated buffer to hold the slot_mappings
slot_mapping = slot_mapping.cuda()
skip_leading_tokens = save_spec.skip_leading_tokens
if self.kv_role == "kv_producer":
assert request.disagg_spec is not None
skip_leading_tokens = min(
skip_leading_tokens, request.disagg_spec.num_transferred_tokens
)
if skip_leading_tokens == len(token_ids):
continue # skip this request
# Align to lmcache chunk size
skip_leading_tokens = (
skip_leading_tokens
// self._lmcache_chunk_size
* self._lmcache_chunk_size
)
store_mask = torch.ones(len(token_ids), dtype=torch.bool)
store_mask[:skip_leading_tokens] = False
logger.info(
"Storing KV cache for %d out of %d tokens "
"(skip_leading_tokens=%d) for request %s",
len(token_ids) - skip_leading_tokens,
len(token_ids),
skip_leading_tokens,
request.req_id,
)
is_last_prefill = request.is_last_prefill
if is_last_prefill:
if request.disagg_spec:
request.disagg_spec.is_last_prefill = True
else:
token_len = len(token_ids)
aligned_token_len = (
token_len // self._lmcache_chunk_size * self._lmcache_chunk_size
)
token_ids = token_ids[:aligned_token_len]
store_mask = store_mask[:aligned_token_len]
slot_mapping = slot_mapping[:aligned_token_len]
self.lmcache_engine.store(
token_ids,
mask=store_mask,
kvcaches=kvcaches,
slot_mapping=slot_mapping,
offset=skip_leading_tokens,
transfer_spec=request.disagg_spec,
request_configs=request.request_configs,
)
# NOTE(Jiayi): We assume all tokens are saved
save_spec.skip_leading_tokens = len(token_ids)
if request.disagg_spec:
request.disagg_spec.num_transferred_tokens = len(token_ids)
@_lmcache_nvtx_annotate
def get_finished(
self, finished_req_ids: set[str]
) -> tuple[set[str] | None, set[str] | None]:
return None, None
###################
# Scheduler side APIs
####################
@_lmcache_nvtx_annotate
def get_num_new_matched_tokens(
self,
request: "Request",
num_computed_tokens: int,
) -> int | None:
"""
Check for external KV cache hit.
Args:
request (Request): the request object.
num_computed_tokens (int): the number of locally
computed tokens for this request
Returns:
the number of tokens that can be loaded from the
external KV cache beyond what is already computed.
"""
if self.kv_role == "kv_producer" and not hasattr(
self.lookup_client, "supports_producer_reuse"
):
return 0
self._requests_priority[request.request_id] = request.priority
token_ids = request.prompt_token_ids
# If the request has multimodal hashes, apply them to the token ids
mm_hashes, mm_positions = extract_mm_features(request)
if mm_hashes and mm_positions:
# TODO(Jiayi): Optimize this
token_ids_tensor = torch.tensor(request.prompt_token_ids)
apply_mm_hashes_to_token_ids(token_ids_tensor, mm_hashes, mm_positions)
token_ids = token_ids_tensor.tolist()
if request.sampling_params:
request_configs = extract_request_configs(request.sampling_params)
else:
request_configs = None
if self.skip_last_n_tokens > 0:
assert token_ids is not None
token_ids = token_ids[: -self.skip_last_n_tokens]
lookup_id = request.request_id if self.async_loading else str(uuid.uuid4())
self._lookup_requests_in_step.append(lookup_id)
num_external_hit_tokens = self.lookup_client.lookup(
token_ids,
lookup_id=lookup_id,
request_configs=request_configs,
)
if num_external_hit_tokens is None:
logger.info(
"Reqid: %s, Total tokens %d, LMCache hit tokens: None.",
request.request_id,
request.num_tokens,
)
return None
# When prompt length is divisible by the block size and all
# blocks are cached, we need to recompute the last token.
# This will be removed in the future if vLLM's scheduler provides
# a better support for this case.
need_to_allocate = num_external_hit_tokens - num_computed_tokens
# In, full-prompt-hit case, we need to recompute the last token
if num_external_hit_tokens == request.num_tokens:
need_to_allocate -= 1
logger.info(
"Reqid: %s, Total tokens %d, LMCache hit tokens: %d, need to load: %d",
request.request_id,
request.num_tokens,
num_external_hit_tokens,
need_to_allocate,
)
self.load_specs[request.request_id] = LoadSpec(
vllm_cached_tokens=num_computed_tokens,
lmcache_cached_tokens=num_external_hit_tokens,
can_load=False,
)
if need_to_allocate <= 0:
return 0
return need_to_allocate
@_lmcache_nvtx_annotate
def update_state_after_alloc(self, request: "Request", num_external_tokens: int):
"""
Update KVConnector state after temporary buffer alloc.
For SharedStorageConnector, update _request_needs_load
if the CacheManager this allocated blocks for us.
"""
# Clear local status in lookup client when a new request is
# successfully scheduled.
self.lookup_client.clear_lookup_status(request.request_id)
kv_transfer_params = (
request.kv_transfer_params
if hasattr(request, "kv_transfer_params")
else None
)
if kv_transfer_params is not None and "disagg_spec" in kv_transfer_params:
req_disagg_spec = kv_transfer_params["disagg_spec"]
receiver_id = req_disagg_spec["receiver_host"] + str(
req_disagg_spec["receiver_init_port"]
)
disagg_spec = DisaggSpec(
req_id=req_disagg_spec["req_id"],
receiver_id=receiver_id,
receiver_host=req_disagg_spec["receiver_host"],
receiver_init_port=req_disagg_spec["receiver_init_port"],
receiver_alloc_port=req_disagg_spec["receiver_alloc_port"],
)
tmp_disagg_tracker[request.request_id] = disagg_spec
self._unfinished_requests[request.request_id] = request
if request.request_id not in self.load_specs:
# No KV tokens from external KV cache, return
return
if num_external_tokens == 0:
# No need to load anything
self.load_specs[request.request_id].can_load = False
return
# Only check for non-prompt-hit case
if (
self.load_specs[request.request_id].lmcache_cached_tokens
!= request.num_tokens
):
assert (
num_external_tokens > 0
and num_external_tokens
== self.load_specs[request.request_id].lmcache_cached_tokens
- self.load_specs[request.request_id].vllm_cached_tokens
), (
f"Mismatch in number of tokens: {num_external_tokens} vs "
f"{self.load_specs[request.request_id].lmcache_cached_tokens} -"
f" {self.load_specs[request.request_id].vllm_cached_tokens}"
f" for request {request.request_id}"
)
self.load_specs[request.request_id].can_load = True
@_lmcache_nvtx_annotate
def build_connector_meta(
self, scheduler_output: SchedulerOutput
) -> KVConnectorMetadata:
"""Attach the connector metadata to the request object.
This function should NOT modify other fields in the scheduler_output
except the `kv_connector_metadata` field.
Also, calling this function will reset the state of the connector.
Args:
scheduler_output (SchedulerOutput): the scheduler output object.
"""
force_skip_save = self.kv_role == "kv_consumer" or self.force_skip_save
meta = LMCacheConnectorMetadata()
# set and update lookup requests for unpin
meta.lookup_requests_in_step = self._lookup_requests_in_step
self._lookup_requests_in_step = []
for finished_req_id in scheduler_output.finished_req_ids:
self._request_trackers.pop(finished_req_id, None)
self._unfinished_requests.pop(finished_req_id, None)
for request in scheduler_output.scheduled_new_reqs:
# Right now, we only load KV for new requests
load_spec = self.load_specs.pop(request.req_id, None)
num_tokens_to_compute = (
request.num_computed_tokens
+ scheduler_output.num_scheduled_tokens[request.req_id]
)
lmcache_cached_tokens = 0
if load_spec is not None:
lmcache_cached_tokens = load_spec.lmcache_cached_tokens
request_priority = self._requests_priority.pop(request.req_id, 0)
skip_save = force_skip_save or (
self.config.priority_limit is not None
and request_priority > self.config.priority_limit
)
request_tracker = RequestTracker.from_new_request(
self.config,
request,
num_tokens_to_compute,
lmcache_cached_tokens,
skip_save,
)
self._request_trackers[request.req_id] = request_tracker
req_meta = ReqMeta.from_request_tracker(
request_tracker,
self._block_size,
self._lmcache_chunk_size,
load_spec=load_spec,
discard_partial_chunks=self._discard_partial_chunks,
save_decode_cache=self._save_decode_cache,
)
if req_meta is not None:
meta.add_request(req_meta)
cached_reqs = scheduler_output.scheduled_cached_reqs
# NOTE: For backward compatibility with vllm version < 0.9.2,
# In the latest vllm version, the type of scheduled_cached_reqs has
# changed from list to object `CachedRequestData`
if isinstance(cached_reqs, list):
for i, req in enumerate(cached_reqs):
request_tracker = self._request_trackers[req.req_id]
request_tracker.update(req.new_token_ids, req.new_block_ids)
req_meta = ReqMeta.from_request_tracker(
request_tracker,
self._block_size,
self._lmcache_chunk_size,
load_spec=None,
discard_partial_chunks=self._discard_partial_chunks,
)
if req_meta is not None:
meta.add_request(req_meta)
return meta
for i, req_id in enumerate(cached_reqs.req_ids):
request_tracker = self._request_trackers[req_id]
num_new_tokens = scheduler_output.num_scheduled_tokens[req_id]
if cached_request := self._unfinished_requests.get(req_id):
num_current_tokens = len(request_tracker.token_ids)
new_token_ids = cached_request.all_token_ids[
num_current_tokens : num_current_tokens + num_new_tokens
]
else:
raise ValueError(
f"Request {req_id} is not in _unfinished_requests, "
f"but it is scheduled to be cached"
)
new_block_ids = cached_reqs.new_block_ids[i]
request_tracker.update(new_token_ids, new_block_ids)
req_meta = ReqMeta.from_request_tracker(
request_tracker,
self._block_size,
self._lmcache_chunk_size,
load_spec=None,
discard_partial_chunks=self._discard_partial_chunks,
save_decode_cache=self._save_decode_cache,
)
if req_meta is not None:
meta.add_request(req_meta)
return meta
@_lmcache_nvtx_annotate
def request_finished(
self,
request: "Request",
block_ids: list[int],
) -> tuple[bool, dict[str, Any] | None]:
params = (
request.kv_transfer_params
if hasattr(request, "kv_transfer_params")
else None
)
return_params = None
# NOTE: Used to stream back the first token
# for disagg prefill
if params is not None and "ret_first_tok" in params:
return_params = {
"first_tok": request._output_token_ids[0],
}
return False, return_params
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/distributed/kv_transfer/kv_connector/v1/lmcache_integration/vllm_v1_adapter.py",
"license": "Apache License 2.0",
"lines": 1206,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/quantization/test_gptq_v2.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Tests whether vllm correctly load and run gptq_v2 format checkpoints.
Run `pytest tests/quantization/test_gptq_v2.py --forked`.
"""
import pytest
import torch
from transformers import AutoTokenizer
from vllm import SamplingParams
from vllm.model_executor.layers.quantization.gptq import GPTQLinearMethod
# A dummy small model quantized by GPTQModel, stored in GPTQ v2 format
MODELS = ["XXXXyu/Qwen3-1.7B-w2g64-gptq_v2"]
# Generate multiple sequences for testing, because an 1.7B 2-bit model
# cannot always generate normal texts.
N_SEQ = 5
@pytest.mark.parametrize("model_id", MODELS)
def test_model_load(vllm_runner, model_id, monkeypatch):
# `LLM.apply_model` requires pickling a function.
monkeypatch.setenv("VLLM_ALLOW_INSECURE_SERIALIZATION", "1")
# Only check the default GPTQ linear method (used for 2/3-bit models).
# 4/8-bit linear methods like Marlin already support gptq_v2.
linear_method_cls = GPTQLinearMethod
with vllm_runner(model_id, dtype=torch.float16, max_model_len=512) as llm:
def check_model(model_id):
for name, submodule in model_id.named_modules():
# Could check more modules if necessary
if name == "model_id.layers.0.self_attn.qkv_proj":
assert isinstance(submodule.quant_method, linear_method_cls)
config = submodule.quant_method.quant_config
assert config.checkpoint_format == "gptq_v2"
assert submodule.quant_method.use_v2_format
# Just break since currently we only check 1 module
break
# Check if gptq_v2 format is correctly loaded
llm.apply_model(check_model)
@pytest.mark.parametrize("model_id", MODELS)
def test_model_inference(vllm_runner, model_id):
# Prepare prompt to test the model's generation result.
prompt = "What is the meaning of life?"
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt},
]
tokenizer = AutoTokenizer.from_pretrained(model_id)
text = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
enable_thinking=False, # If thinking model, set it to false
)
sampling_params = SamplingParams(
n=N_SEQ,
max_tokens=128,
temperature=0.7,
top_p=0.8,
top_k=20,
min_p=0,
presence_penalty=2,
)
with vllm_runner(model_id, dtype=torch.float16, max_model_len=512) as llm:
# Generate a response to verify inference correctness
output = llm.generate(text, sampling_params)
# Make sure the output exists
assert output
assert output[0][1]
assert len(output[0][1]) == N_SEQ
def has_normal_char_distribution(texts, min_len):
for text in texts:
# Response too short
if len(text) < min_len:
return False
# Basic ratio checks
letters = sum(c.isalpha() for c in text)
spaces = sum(c.isspace() for c in text)
total = len(text)
letter_ratio = letters / total
space_ratio = spaces / total
# At least 1 normal text should exist within output sequences
# Normal text should be mostly letters with reasonable spacing
# Some magic numbers, could be adjusted
if 0.5 <= letter_ratio <= 0.9 and 0.01 <= space_ratio <= 0.3:
return True
# No sequence contains normal text, output might be broken
return False
# Apply some simple checks for giberish output
# Print the output sequences if failed
assert has_normal_char_distribution(output[0][1], 5), output[0][1]
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/quantization/test_gptq_v2.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.