sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
vllm-project/vllm:vllm/transformers_utils/configs/colqwen3.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
ColQwen3 configuration that extends Qwen3VLConfig with embedding projection
fields. This allows ColQwen3 models to be loaded without trust_remote_code
by mapping their custom model_type (colqwen3, ops_colqwen3, etc.) to a
standard config class that vLLM understands.
Supported model_types:
- colqwen3 (TomoroAI/tomoro-colqwen3-embed-8b)
- ops_colqwen3 (OpenSearch-AI/Ops-Colqwen3-4B)
- qwen3_vl_nemotron_embed (nvidia/nemotron-colembed-vl-8b-v2)
"""
from transformers.models.qwen3_vl.configuration_qwen3_vl import Qwen3VLConfig
class ColQwen3Config(Qwen3VLConfig):
"""Configuration class for ColQwen3 models.
Extends Qwen3VLConfig with additional fields used by ColQwen3 variants
for the embedding projection layer.
"""
# Accept any ColQwen3 variant model_type
model_type = "colqwen3"
def __init__(
self,
embed_dim: int | None = None,
dims: int | None = None,
dim: int | None = None,
projection_dim: int | None = None,
colbert_dim: int | None = None,
pooling: str | None = None,
**kwargs,
):
# Store embedding projection config fields
self.embed_dim = embed_dim
self.dims = dims
self.dim = dim
self.projection_dim = projection_dim
self.colbert_dim = colbert_dim
self.pooling = pooling
super().__init__(**kwargs)
class OpsColQwen3Config(ColQwen3Config):
"""Configuration for OpenSearch-AI ColQwen3 variants."""
model_type = "ops_colqwen3"
class Qwen3VLNemotronEmbedConfig(ColQwen3Config):
"""Configuration for NVIDIA Nemotron ColEmbed variants."""
model_type = "qwen3_vl_nemotron_embed"
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/transformers_utils/configs/colqwen3.py",
"license": "Apache License 2.0",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/transformers_utils/configs/qwen3_5.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# Copyright 2025 The Qwen Team and The HuggingFace Inc. team.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Qwen3.5 model configuration"""
from transformers.configuration_utils import PretrainedConfig, layer_type_validation
class Qwen3_5TextConfig(PretrainedConfig):
model_type = "qwen3_5_text"
keys_to_ignore_at_inference = ["past_key_values"]
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
base_config_key = "text_config"
def __init__(
self,
vocab_size=248320,
hidden_size=4096,
intermediate_size=12288,
num_hidden_layers=32,
num_attention_heads=16,
num_key_value_heads=4,
hidden_act="silu",
max_position_embeddings=32768,
initializer_range=0.02,
rms_norm_eps=1e-6,
use_cache=True,
tie_word_embeddings=False,
rope_parameters=None,
attention_bias=False,
attention_dropout=0.0,
head_dim=256,
linear_conv_kernel_dim=4,
linear_key_head_dim=128,
linear_value_head_dim=128,
linear_num_key_heads=16,
linear_num_value_heads=32,
layer_types=None,
pad_token_id=None,
bos_token_id=None,
eos_token_id=None,
**kwargs,
):
kwargs["ignore_keys_at_rope_validation"] = [
"mrope_section",
"mrope_interleaved",
]
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.head_dim = head_dim
self.rope_parameters = rope_parameters
kwargs.setdefault("partial_rotary_factor", 0.25)
self.layer_types = layer_types
if self.layer_types is None:
interval_pattern = kwargs.get("full_attention_interval", 4)
self.layer_types = [
"linear_attention"
if bool((i + 1) % interval_pattern)
else "full_attention"
for i in range(self.num_hidden_layers)
]
layer_type_validation(self.layer_types, self.num_hidden_layers)
# linear attention part
self.linear_conv_kernel_dim = linear_conv_kernel_dim
self.linear_key_head_dim = linear_key_head_dim
self.linear_value_head_dim = linear_value_head_dim
self.linear_num_key_heads = linear_num_key_heads
self.linear_num_value_heads = linear_num_value_heads
super().__init__(**kwargs)
# Set these AFTER super().__init__() because transformers v4's
# PretrainedConfig.__init__ has these as explicit params with different
# defaults (e.g. tie_word_embeddings=True) that would overwrite our values.
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.tie_word_embeddings = tie_word_embeddings
class Qwen3_5VisionConfig(PretrainedConfig):
model_type = "qwen3_5"
base_config_key = "vision_config"
def __init__(
self,
depth=27,
hidden_size=1152,
hidden_act="gelu_pytorch_tanh",
intermediate_size=4304,
num_heads=16,
in_channels=3,
patch_size=16,
spatial_merge_size=2,
temporal_patch_size=2,
out_hidden_size=3584,
num_position_embeddings=2304,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
self.depth = depth
self.hidden_size = hidden_size
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.num_heads = num_heads
self.in_channels = in_channels
self.patch_size = patch_size
self.spatial_merge_size = spatial_merge_size
self.temporal_patch_size = temporal_patch_size
self.out_hidden_size = out_hidden_size
self.num_position_embeddings = num_position_embeddings
self.initializer_range = initializer_range
class Qwen3_5Config(PretrainedConfig):
model_type = "qwen3_5"
sub_configs = {
"vision_config": Qwen3_5VisionConfig,
"text_config": Qwen3_5TextConfig,
}
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
text_config=None,
vision_config=None,
image_token_id=248056,
video_token_id=248057,
vision_start_token_id=248053,
vision_end_token_id=248054,
tie_word_embeddings=False,
**kwargs,
):
if isinstance(vision_config, dict):
self.vision_config = self.sub_configs["vision_config"](**vision_config)
elif vision_config is None:
self.vision_config = self.sub_configs["vision_config"]()
if isinstance(text_config, dict):
self.text_config = self.sub_configs["text_config"](**text_config)
elif text_config is None:
self.text_config = self.sub_configs["text_config"]()
self.image_token_id = image_token_id
self.video_token_id = video_token_id
self.vision_start_token_id = vision_start_token_id
self.vision_end_token_id = vision_end_token_id
super().__init__(**kwargs)
# Set after super().__init__() to avoid v4 PretrainedConfig overwrite
self.tie_word_embeddings = tie_word_embeddings
__all__ = ["Qwen3_5Config", "Qwen3_5TextConfig"]
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/transformers_utils/configs/qwen3_5.py",
"license": "Apache License 2.0",
"lines": 175,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/transformers_utils/configs/qwen3_5_moe.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# Copyright 2025 The Qwen Team and The HuggingFace Inc. team.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Qwen3.5-MoE model configuration"""
from transformers.configuration_utils import PretrainedConfig, layer_type_validation
class Qwen3_5MoeTextConfig(PretrainedConfig):
model_type = "qwen3_5_moe_text"
keys_to_ignore_at_inference = ["past_key_values"]
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.experts.gate_up_proj": "packed_colwise",
"layers.*.mlp.experts.down_proj": "rowwise",
"layers.*.mlp.shared_expert.gate_proj": "colwise",
"layers.*.mlp.shared_expert.up_proj": "colwise",
"layers.*.mlp.shared_expert.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
base_config_key = "text_config"
def __init__(
self,
vocab_size=248320,
hidden_size=2048,
num_hidden_layers=40,
num_attention_heads=16,
num_key_value_heads=2,
hidden_act="silu",
max_position_embeddings=32768,
initializer_range=0.02,
rms_norm_eps=1e-6,
use_cache=True,
tie_word_embeddings=False,
rope_parameters=None,
attention_bias=False,
attention_dropout=0.0,
head_dim=256,
linear_conv_kernel_dim=4,
linear_key_head_dim=128,
linear_value_head_dim=128,
linear_num_key_heads=16,
linear_num_value_heads=32,
moe_intermediate_size=512,
shared_expert_intermediate_size=512,
num_experts_per_tok=8,
num_experts=256,
output_router_logits=False,
router_aux_loss_coef=0.001,
layer_types=None,
pad_token_id=None,
bos_token_id=None,
eos_token_id=None,
**kwargs,
):
kwargs["ignore_keys_at_rope_validation"] = [
"mrope_section",
"mrope_interleaved",
]
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.head_dim = head_dim
self.rope_parameters = rope_parameters
kwargs.setdefault("partial_rotary_factor", 0.25)
self.layer_types = layer_types
if self.layer_types is None:
interval_pattern = kwargs.get("full_attention_interval", 4)
self.layer_types = [
"linear_attention"
if bool((i + 1) % interval_pattern)
else "full_attention"
for i in range(self.num_hidden_layers)
]
layer_type_validation(self.layer_types, self.num_hidden_layers)
# linear attention part
self.linear_conv_kernel_dim = linear_conv_kernel_dim
self.linear_key_head_dim = linear_key_head_dim
self.linear_value_head_dim = linear_value_head_dim
self.linear_num_key_heads = linear_num_key_heads
self.linear_num_value_heads = linear_num_value_heads
self.moe_intermediate_size = moe_intermediate_size
self.shared_expert_intermediate_size = shared_expert_intermediate_size
self.num_experts_per_tok = num_experts_per_tok
self.num_experts = num_experts
self.output_router_logits = output_router_logits
self.router_aux_loss_coef = router_aux_loss_coef
super().__init__(**kwargs)
# Set these AFTER super().__init__() because transformers v4's
# PretrainedConfig.__init__ has these as explicit params with different
# defaults (e.g. tie_word_embeddings=True) that would overwrite our values.
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.tie_word_embeddings = tie_word_embeddings
class Qwen3_5MoeVisionConfig(PretrainedConfig):
model_type = "qwen3_5_moe"
base_config_key = "vision_config"
def __init__(
self,
depth=27,
hidden_size=1152,
hidden_act="gelu_pytorch_tanh",
intermediate_size=4304,
num_heads=16,
in_channels=3,
patch_size=16,
spatial_merge_size=2,
temporal_patch_size=2,
out_hidden_size=3584,
num_position_embeddings=2304,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
self.depth = depth
self.hidden_size = hidden_size
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.num_heads = num_heads
self.in_channels = in_channels
self.patch_size = patch_size
self.spatial_merge_size = spatial_merge_size
self.temporal_patch_size = temporal_patch_size
self.out_hidden_size = out_hidden_size
self.num_position_embeddings = num_position_embeddings
self.initializer_range = initializer_range
class Qwen3_5MoeConfig(PretrainedConfig):
model_type = "qwen3_5_moe"
sub_configs = {
"vision_config": Qwen3_5MoeVisionConfig,
"text_config": Qwen3_5MoeTextConfig,
}
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
text_config=None,
vision_config=None,
image_token_id=248056,
video_token_id=248057,
vision_start_token_id=248053,
vision_end_token_id=248054,
tie_word_embeddings=False,
**kwargs,
):
if isinstance(vision_config, dict):
self.vision_config = self.sub_configs["vision_config"](**vision_config)
elif vision_config is None:
self.vision_config = self.sub_configs["vision_config"]()
if isinstance(text_config, dict):
self.text_config = self.sub_configs["text_config"](**text_config)
elif text_config is None:
self.text_config = self.sub_configs["text_config"]()
self.image_token_id = image_token_id
self.video_token_id = video_token_id
self.vision_start_token_id = vision_start_token_id
self.vision_end_token_id = vision_end_token_id
super().__init__(**kwargs)
# Set after super().__init__() to avoid v4 PretrainedConfig overwrite
self.tie_word_embeddings = tie_word_embeddings
__all__ = ["Qwen3_5MoeConfig", "Qwen3_5MoeTextConfig"]
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/transformers_utils/configs/qwen3_5_moe.py",
"license": "Apache License 2.0",
"lines": 187,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:examples/online_serving/data_parallel_pause_resume.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Test pause/resume with Data Parallel (DP) via HTTP API.
This example demonstrates coordinated pause/resume across multiple DP ranks.
The pause synchronizes across all DP engines via all-reduce.
Prerequisites:
Start a vLLM server with data parallelism:
$ VLLM_SERVER_DEV_MODE=1 vllm serve facebook/opt-125m \
--enforce-eager \
--data-parallel-size 4 \
--tensor-parallel-size 1
Then run this script:
$ python data_parallel_pause_resume.py
The test verifies pause works by:
1. Starting a streaming generation request
2. Pausing the server mid-generation
3. Sleeping for PAUSE_DURATION seconds
4. Resuming the server
5. Verifying there was a gap in token generation matching the pause duration
"""
import argparse
import threading
import time
import requests
from openai import OpenAI
BASE_URL = "http://localhost:8000"
MODEL_NAME = "facebook/opt-125m"
PAUSE_DURATION = 3.0
def pause_generation(base_url: str, mode: str = "keep") -> None:
"""Pause generation via HTTP endpoint."""
url = f"{base_url}/pause"
response = requests.post(url, params={"mode": mode}, timeout=60)
response.raise_for_status()
print("Server paused")
def resume_generation(base_url: str) -> None:
"""Resume generation via HTTP endpoint."""
url = f"{base_url}/resume"
response = requests.post(url, timeout=60)
response.raise_for_status()
print("Server resumed")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--base-url", default=BASE_URL)
parser.add_argument("--model", default=MODEL_NAME)
args = parser.parse_args()
client = OpenAI(
base_url=f"{args.base_url}/v1",
api_key="EMPTY",
)
prompt = "Write a long story about a dragon. Once upon a time"
token_times: list[float] = []
pause_token_idx = 0
pause_triggered = threading.Event()
def generator_thread():
"""Stream tokens and record timestamps."""
stream = client.completions.create(
model=args.model,
prompt=prompt,
max_tokens=50,
stream=True,
)
for chunk in stream:
if chunk.choices[0].text:
token_times.append(time.monotonic())
token_count = len(token_times)
print(f"Token {token_count}: {chunk.choices[0].text!r}")
# Signal controller after some tokens
if token_count >= 5 and not pause_triggered.is_set():
pause_triggered.set()
def controller_thread():
"""Pause and resume the server."""
nonlocal pause_token_idx
# Wait for some tokens
pause_triggered.wait()
print(f"\nPausing server (keep mode) at token {len(token_times)}...")
pause_generation(args.base_url, mode="keep")
pause_token_idx = len(token_times)
print(f"Sleeping for {PAUSE_DURATION}s...")
time.sleep(PAUSE_DURATION)
print("Resuming server...")
resume_generation(args.base_url)
print("Resumed!\n")
# Run both threads
gen_thread = threading.Thread(target=generator_thread)
ctrl_thread = threading.Thread(target=controller_thread)
gen_thread.start()
ctrl_thread.start()
gen_thread.join()
ctrl_thread.join()
# Check gap at the pause point
if pause_token_idx < len(token_times):
pause_gap = token_times[pause_token_idx] - token_times[pause_token_idx - 1]
print(
f"\nGap after pause (token {pause_token_idx} -> "
f"{pause_token_idx + 1}): {pause_gap:.3f}s"
)
if pause_gap >= PAUSE_DURATION * 0.9:
print("Test passed! Pause synchronized across DP ranks.")
else:
print(f"Test failed! Expected ~{PAUSE_DURATION}s gap, got {pause_gap:.3f}s")
else:
print("Test failed! No tokens were generated after resuming.")
if __name__ == "__main__":
main()
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/online_serving/data_parallel_pause_resume.py",
"license": "Apache License 2.0",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:scripts/autotune_helion_kernels.py | #!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Autotune registered Helion kernels for optimal configurations.
Usage:
# Autotune all registered kernels
python scripts/autotune_helion_kernels.py
# Autotune specific kernel
python scripts/autotune_helion_kernels.py --kernels silu_mul_fp8
# Autotune multiple kernels
python scripts/autotune_helion_kernels.py --kernels silu_mul_fp8 rms_norm_fp8
# Force re-autotuning
python scripts/autotune_helion_kernels.py --force
# List available kernels
python scripts/autotune_helion_kernels.py --list
"""
import argparse
import sys
import time
from dataclasses import dataclass
import torch
try:
import helion
from vllm.kernels.helion import (
ConfigManager,
get_kernel_by_name,
get_registered_kernels,
)
from vllm.kernels.helion.utils import get_canonical_gpu_name
from vllm.logger import init_logger
from vllm.utils.import_utils import has_helion
except ImportError as e:
print(f"Error importing vLLM: {e}")
print("Please ensure vLLM is installed and in your Python path")
sys.exit(1)
logger = init_logger("vllm.scripts.autotune_helion_kernels")
@dataclass
class AutotuneResult:
status: str # "success" | "partial" | "error" | "skipped"
successful: int
failed: int
configs: dict[str, "helion.Config"]
message: str = ""
def list_kernels() -> None:
kernels = get_registered_kernels()
if not kernels:
print("No Helion kernels found in registry.")
return
print("Available Helion kernels:")
print("=" * 50)
for name in sorted(kernels.keys()):
print(f" {name}")
print(f"\nTotal: {len(kernels)} kernels")
def check_requirements() -> bool:
if not torch.cuda.is_available():
logger.error("CUDA is not available. Helion autotuning requires GPU.")
return False
if not has_helion():
logger.error("Helion is not installed. Please install Helion package.")
return False
return True
def autotune_kernel(
kernel_name: str,
platform: str,
config_manager: ConfigManager,
force: bool = False,
autotune_effort: str = "quick",
) -> AutotuneResult:
logger.debug(
"Starting autotune for kernel '%s' with effort='%s'",
kernel_name,
autotune_effort,
)
kernel_wrapper = get_kernel_by_name(kernel_name)
if kernel_wrapper is None:
error_msg = f"Kernel '{kernel_name}' not found in registry"
logger.error(error_msg)
return AutotuneResult(
status="error",
message=error_msg,
successful=0,
failed=0,
configs={},
)
try:
inputs_dict = kernel_wrapper.get_inputs()
except NotImplementedError:
error_msg = f"Kernel '{kernel_name}' has no input generator registered"
logger.error(error_msg)
return AutotuneResult(
status="error",
message=error_msg,
successful=0,
failed=0,
configs={},
)
try:
logger.info(
"Autotuning kernel '%s' for platform '%s' with %d configs",
kernel_name,
platform,
len(inputs_dict),
)
configs_to_autotune = {}
if not force:
existing_configs = config_manager.get_platform_configs(
kernel_name, platform
)
for config_key, inputs in inputs_dict.items():
if config_key in existing_configs:
logger.debug(
"Config '%s' already exists for platform '%s', skipping",
config_key,
platform,
)
else:
configs_to_autotune[config_key] = inputs
else:
logger.debug("Force mode enabled, will re-autotune all configs")
configs_to_autotune = inputs_dict
if not configs_to_autotune:
logger.info(
"All configs already exist for kernel '%s' on platform '%s'. "
"Use --force to re-autotune.",
kernel_name,
platform,
)
return AutotuneResult(
status="skipped",
message="All configs already exist",
successful=0,
failed=0,
configs={},
)
total_start_time = time.time()
autotuned_configs = {}
failed_configs = []
for config_key, inputs in configs_to_autotune.items():
logger.info("Autotuning config: %s", config_key)
logger.debug(
"Input shapes: %s",
[getattr(inp, "shape", type(inp).__name__) for inp in inputs],
)
try:
config_start_time = time.time()
config = kernel_wrapper.run_autotune(inputs, autotune_effort)
config_duration = time.time() - config_start_time
# Save immediately for checkpointing
config_manager.save_configs(kernel_name, platform, {config_key: config})
autotuned_configs[config_key] = config
logger.debug("Config details: %s", config)
logger.info(
"✓ Autotuned and saved config '%s' (%.2fs)",
config_key,
config_duration,
)
except (RuntimeError, ValueError, OSError) as e:
logger.exception(
"Failed to autotune config '%s': %s",
config_key,
e,
)
failed_configs.append(config_key)
total_duration = time.time() - total_start_time
successful = len(autotuned_configs)
failed = len(failed_configs)
logger.info(
"Completed autotuning for kernel '%s': %d successful, %d failed (%.2fs)",
kernel_name,
successful,
failed,
total_duration,
)
status = "success" if failed == 0 else "partial"
return AutotuneResult(
status=status,
successful=successful,
failed=failed,
configs=autotuned_configs,
)
except (KeyError, RuntimeError, ValueError, OSError) as e:
error_msg = f"Unexpected error: {e}"
logger.exception("Failed to autotune kernel '%s': %s", kernel_name, e)
return AutotuneResult(
status="error",
message=error_msg,
successful=0,
failed=0,
configs={},
)
def summarize_results(results: dict[str, AutotuneResult]) -> bool:
logger.info("=" * 50)
logger.info("Autotuning Results Summary")
logger.info("=" * 50)
total_successful = 0
total_failed = 0
success_kernels = []
partial_kernels = []
error_kernels = []
skipped_kernels = []
for kernel_name, result in results.items():
total_successful += result.successful
total_failed += result.failed
if result.status == "success":
success_kernels.append(f"{kernel_name} ({result.successful} configs)")
logger.info("✓ %s: %d configs successful", kernel_name, result.successful)
elif result.status == "partial":
partial_kernels.append(
f"{kernel_name} ({result.successful} ok, {result.failed} failed)"
)
logger.warning(
"⚠ %s: %d successful, %d failed",
kernel_name,
result.successful,
result.failed,
)
elif result.status == "error":
error_kernels.append(f"{kernel_name}: {result.message or 'Unknown error'}")
logger.error("✗ %s: %s", kernel_name, result.message or "Unknown error")
elif result.status == "skipped":
skipped_kernels.append(f"{kernel_name}: {result.message or 'Skipped'}")
logger.info("- %s: %s", kernel_name, result.message or "Skipped")
logger.info("=" * 50)
logger.info(
"Summary: %d total configs (%d successful, %d failed)",
total_successful + total_failed,
total_successful,
total_failed,
)
logger.info(
"Kernels: %d success, %d partial, %d error, %d skipped",
len(success_kernels),
len(partial_kernels),
len(error_kernels),
len(skipped_kernels),
)
has_failures = bool(error_kernels or partial_kernels)
if not has_failures:
if total_successful > 0:
logger.info("All configs autotuned successfully!")
else:
logger.info("No new configs were generated (all may already exist)")
return not has_failures
def get_kernels_to_autotune(requested_kernels: list[str] | None) -> list[str]:
all_kernels = get_registered_kernels()
if not all_kernels:
logger.error("No Helion kernels found in registry")
sys.exit(1)
if not requested_kernels:
return list(all_kernels.keys())
if len(requested_kernels) != len(set(requested_kernels)):
duplicates = [
k for k in set(requested_kernels) if requested_kernels.count(k) > 1
]
logger.error("Duplicate kernel names in --kernels flag: %s", duplicates)
sys.exit(1)
kernels_to_autotune = []
missing_kernels = []
for kernel_name in requested_kernels:
if kernel_name in all_kernels:
kernels_to_autotune.append(kernel_name)
else:
missing_kernels.append(kernel_name)
if missing_kernels:
logger.error("Kernel(s) not found: %s", missing_kernels)
logger.error("Available kernels: %s", list(all_kernels.keys()))
sys.exit(1)
return kernels_to_autotune
def main():
parser = argparse.ArgumentParser(
description="Autotune Helion kernels",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=__doc__.split("Usage:")[1] if "Usage:" in __doc__ else "",
)
parser.add_argument(
"--kernels",
nargs="+",
help="Kernel(s) to autotune (default: all kernels)",
)
parser.add_argument(
"--config-dir",
type=str,
help="Config directory for config files (default: vLLM helion configs dir)",
)
parser.add_argument(
"--list",
action="store_true",
help="List available Helion kernels and exit",
)
parser.add_argument(
"--force",
action="store_true",
help=(
"Force re-autotuning even if configs already exist for the "
"platform and config keys"
),
)
parser.add_argument(
"--autotune-effort",
type=str,
default="quick",
help=(
"Helion autotune effort level: 'quick' (smaller search) or "
"'full' (full search budget) (default: quick)"
),
)
parser.add_argument(
"--verbose",
action="store_true",
help="Enable verbose logging",
)
args = parser.parse_args()
import logging
if args.verbose:
logging.getLogger("vllm").setLevel(logging.DEBUG)
logger.debug("Verbose mode enabled")
logger.debug("Arguments: %s", vars(args))
else:
logging.getLogger("vllm").setLevel(logging.INFO)
if args.list:
list_kernels()
return
if not check_requirements():
sys.exit(1)
platform = get_canonical_gpu_name()
logger.info("Detected GPU platform: %s", platform)
config_manager = (
ConfigManager(args.config_dir) if args.config_dir else ConfigManager()
)
try:
config_manager.ensure_base_dir_writable()
except OSError as e:
logger.error("Failed to access config directory: %s", e)
sys.exit(1)
kernels_to_autotune = get_kernels_to_autotune(args.kernels)
logger.info(
"Will autotune %d kernel(s) for platform '%s': %s",
len(kernels_to_autotune),
platform,
kernels_to_autotune,
)
results = {}
for kernel_name in kernels_to_autotune:
result = autotune_kernel(
kernel_name, platform, config_manager, args.force, args.autotune_effort
)
results[kernel_name] = result
success = summarize_results(results)
sys.exit(0 if success else 1)
if __name__ == "__main__":
main()
| {
"repo_id": "vllm-project/vllm",
"file_path": "scripts/autotune_helion_kernels.py",
"license": "Apache License 2.0",
"lines": 352,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/kernels/helion/test_silu_mul_fp8.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
import torch
import torch.nn.functional as F
from vllm.utils.import_utils import has_helion
if not has_helion():
pytest.skip(
"Helion is not installed. Install with: pip install vllm[helion]",
allow_module_level=True,
)
from vllm.kernels.helion.config_manager import ConfigManager
from vllm.kernels.helion.ops.silu_mul_fp8 import (
pick_silu_mul_fp8_config,
silu_mul_fp8,
silu_mul_fp8_baseline,
)
def skip_if_platform_unsupported():
try:
from vllm.kernels.helion.utils import get_canonical_gpu_name
if not torch.cuda.is_available():
pytest.skip("CUDA not available")
platform = get_canonical_gpu_name()
try:
config_manager = ConfigManager.get_instance()
except RuntimeError:
config_manager = ConfigManager()
configs = config_manager.get_platform_configs("silu_mul_fp8", platform)
if len(configs) == 0:
pytest.skip("Current GPU platform not supported for silu_mul_fp8 kernel")
except (ImportError, RuntimeError, KeyError):
pytest.skip("Error detecting platform support for silu_mul_fp8 kernel")
@pytest.fixture(autouse=True)
def reset_config_manager_singleton():
ConfigManager.reset_instance()
ConfigManager()
yield
ConfigManager.reset_instance()
class TestSiluMulFp8ConfigPicker:
def test_config_picker_exact_match(self):
config_keys = [
"intermediate_2048_numtokens_256",
"intermediate_4096_numtokens_256",
]
input_tensor = torch.randn(32, 4096, dtype=torch.bfloat16, device="cuda")
scale = torch.tensor([0.5], dtype=torch.float32, device="cuda")
args = (input_tensor, scale)
selected_key = pick_silu_mul_fp8_config(args, config_keys)
assert selected_key == "intermediate_2048_numtokens_256"
def test_config_picker_closest_match(self):
config_keys = [
"intermediate_2048_numtokens_256",
"intermediate_4096_numtokens_256",
]
# Use 7000 (intermediate_size=3500) which is closer to 4096 than 2048
input_tensor = torch.randn(32, 7000, dtype=torch.bfloat16, device="cuda")
scale = torch.tensor([0.5], dtype=torch.float32, device="cuda")
args = (input_tensor, scale)
selected_key = pick_silu_mul_fp8_config(args, config_keys)
assert selected_key == "intermediate_4096_numtokens_256"
def test_config_picker_fallback_to_default(self):
config_keys = ["default"]
input_tensor = torch.randn(32, 4096, dtype=torch.bfloat16, device="cuda")
scale = torch.tensor([0.5], dtype=torch.float32, device="cuda")
args = (input_tensor, scale)
selected_key = pick_silu_mul_fp8_config(args, config_keys)
assert selected_key == "default"
def test_config_picker_no_configs(self):
config_keys: list[str] = []
input_tensor = torch.randn(32, 4096, dtype=torch.bfloat16, device="cuda")
scale = torch.tensor([0.5], dtype=torch.float32, device="cuda")
args = (input_tensor, scale)
selected_key = pick_silu_mul_fp8_config(args, config_keys)
assert selected_key is None
@pytest.mark.parametrize("intermediate_size", [2048, 4096, 5120])
def test_config_picker_different_sizes(self, intermediate_size):
config_keys = [
"intermediate_2048_numtokens_256",
"intermediate_4096_numtokens_256",
"intermediate_5120_numtokens_256",
]
input_tensor = torch.randn(
32, 2 * intermediate_size, dtype=torch.bfloat16, device="cuda"
)
scale = torch.tensor([0.5], dtype=torch.float32, device="cuda")
args = (input_tensor, scale)
selected_key = pick_silu_mul_fp8_config(args, config_keys)
expected_key = f"intermediate_{intermediate_size}_numtokens_256"
assert selected_key == expected_key
def test_config_picker_numtokens_ceiling(self):
"""Pick the smallest numtokens >= input num_tokens."""
config_keys = [
"intermediate_4096_numtokens_8",
"intermediate_4096_numtokens_32",
"intermediate_4096_numtokens_128",
"intermediate_4096_numtokens_256",
]
# 20 tokens -> should pick numtokens_32 (smallest >= 20)
input_tensor = torch.randn(20, 8192, dtype=torch.bfloat16, device="cuda")
scale = torch.tensor([0.5], dtype=torch.float32, device="cuda")
selected_key = pick_silu_mul_fp8_config((input_tensor, scale), config_keys)
assert selected_key == "intermediate_4096_numtokens_32"
def test_config_picker_numtokens_exact(self):
"""Exact num_tokens match is preferred over ceiling."""
config_keys = [
"intermediate_4096_numtokens_8",
"intermediate_4096_numtokens_32",
"intermediate_4096_numtokens_128",
]
input_tensor = torch.randn(32, 8192, dtype=torch.bfloat16, device="cuda")
scale = torch.tensor([0.5], dtype=torch.float32, device="cuda")
selected_key = pick_silu_mul_fp8_config((input_tensor, scale), config_keys)
assert selected_key == "intermediate_4096_numtokens_32"
def test_config_picker_numtokens_fallback_to_largest(self):
"""Fall back to the largest numtokens when input exceeds all."""
config_keys = [
"intermediate_4096_numtokens_8",
"intermediate_4096_numtokens_32",
"intermediate_4096_numtokens_128",
]
# 512 tokens -> exceeds all available, should pick largest (128)
input_tensor = torch.randn(512, 8192, dtype=torch.bfloat16, device="cuda")
scale = torch.tensor([0.5], dtype=torch.float32, device="cuda")
selected_key = pick_silu_mul_fp8_config((input_tensor, scale), config_keys)
assert selected_key == "intermediate_4096_numtokens_128"
def test_config_picker_malformed_key_raises(self):
"""Malformed config keys should raise ValueError."""
config_keys = ["intermediate_4096_badformat_256"]
input_tensor = torch.randn(32, 8192, dtype=torch.bfloat16, device="cuda")
scale = torch.tensor([0.5], dtype=torch.float32, device="cuda")
with pytest.raises(ValueError, match="Malformed config key"):
pick_silu_mul_fp8_config((input_tensor, scale), config_keys)
def test_config_picker_default_ignored_when_valid_keys_exist(self):
"""'default' is skipped in favor of a real match."""
config_keys = [
"default",
"intermediate_4096_numtokens_32",
"intermediate_4096_numtokens_128",
]
input_tensor = torch.randn(64, 8192, dtype=torch.bfloat16, device="cuda")
scale = torch.tensor([0.5], dtype=torch.float32, device="cuda")
selected_key = pick_silu_mul_fp8_config((input_tensor, scale), config_keys)
assert selected_key == "intermediate_4096_numtokens_128"
class TestSiluMulFp8Correctness:
@pytest.mark.parametrize("batch_size", [1, 8, 32, 128])
@pytest.mark.parametrize("intermediate_size", [2048, 3000, 3500, 4096, 5000])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16])
def test_silu_mul_fp8_correctness(self, batch_size, intermediate_size, dtype):
skip_if_platform_unsupported()
input_size = 2 * intermediate_size
input_tensor = torch.randn(batch_size, input_size, dtype=dtype, device="cuda")
scale = torch.tensor([0.5], dtype=torch.float32, device="cuda")
reference_output = silu_mul_fp8_baseline(input_tensor, scale)
helion_output = silu_mul_fp8(input_tensor, scale)
assert helion_output.shape == reference_output.shape
assert helion_output.dtype == torch.float8_e4m3fn
assert reference_output.dtype == torch.float8_e4m3fn
ref_f32 = reference_output.to(torch.float32)
helion_f32 = helion_output.to(torch.float32)
# FP8 E4M3 has limited precision. Values near quantization boundaries
# can round differently due to intermediate precision differences.
torch.testing.assert_close(
helion_f32,
ref_f32,
atol=0.05,
rtol=0.05,
msg=f"Mismatch at batch={batch_size}, size={intermediate_size}",
)
def test_silu_mul_fp8_shape_inference(self):
skip_if_platform_unsupported()
batch_size, input_size = 32, 8192
intermediate_size = input_size // 2
input_tensor = torch.randn(
batch_size, input_size, dtype=torch.bfloat16, device="cuda"
)
scale = torch.tensor([0.5], dtype=torch.float32, device="cuda")
output = silu_mul_fp8(input_tensor, scale)
expected_shape = (batch_size, intermediate_size)
assert output.shape == expected_shape
assert output.dtype == torch.float8_e4m3fn
def test_silu_mul_fp8_scale_variations(self):
skip_if_platform_unsupported()
batch_size, input_size = 16, 4096
input_tensor = torch.randn(
batch_size, input_size, dtype=torch.bfloat16, device="cuda"
)
scales = [0.1, 0.5, 1.0, 2.0, 10.0]
for scale_val in scales:
scale = torch.tensor([scale_val], dtype=torch.float32, device="cuda")
reference_output = silu_mul_fp8_baseline(input_tensor, scale)
helion_output = silu_mul_fp8(input_tensor, scale)
ref_f32 = reference_output.to(torch.float32)
helion_f32 = helion_output.to(torch.float32)
torch.testing.assert_close(
helion_f32,
ref_f32,
atol=0.05,
rtol=0.05,
msg=f"Mismatch for scale={scale_val}",
)
@pytest.mark.parametrize(
"shape",
[
(1, 4096),
(16, 4096),
(128, 4096),
(1024, 4096),
(1, 8192),
(16, 8192),
(128, 8192),
],
)
def test_silu_mul_fp8_various_shapes(self, shape):
skip_if_platform_unsupported()
input_tensor = torch.randn(*shape, dtype=torch.bfloat16, device="cuda")
scale = torch.tensor([0.5], dtype=torch.float32, device="cuda")
reference_output = silu_mul_fp8_baseline(input_tensor, scale)
helion_output = silu_mul_fp8(input_tensor, scale)
assert helion_output.shape == reference_output.shape
ref_f32 = reference_output.to(torch.float32)
helion_f32 = helion_output.to(torch.float32)
torch.testing.assert_close(
helion_f32, ref_f32, atol=0.05, rtol=0.05, msg=f"Mismatch for shape={shape}"
)
def silu_mul_fp8_pytorch(input: torch.Tensor, scale: torch.Tensor) -> torch.Tensor:
"""Pure PyTorch reference using F.silu.
This matches vLLM's SiluAndMul.forward_native exactly:
F.silu(x[..., :d]) * x[..., d:]
"""
d = input.shape[-1] // 2
result = F.silu(input[..., :d]) * input[..., d:]
return (result.to(torch.float32) / scale).to(torch.float8_e4m3fn)
class TestSiluMulFp8PytorchReference:
"""Tests comparing Helion kernel against pure PyTorch implementation.
Uses tighter tolerance since both use PyTorch's FP8 conversion
(same rounding mode), unlike the vLLM C++ baseline which uses
NVIDIA's hardware FP8 conversion with different rounding.
"""
@pytest.mark.parametrize("batch_size", [1, 8, 32, 128, 256])
@pytest.mark.parametrize("intermediate_size", [1024, 2048, 4096])
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16])
def test_silu_mul_fp8_vs_pytorch(self, batch_size, intermediate_size, dtype):
skip_if_platform_unsupported()
input_tensor = torch.randn(
batch_size, 2 * intermediate_size, dtype=dtype, device="cuda"
)
scale = torch.tensor([0.5], dtype=torch.float32, device="cuda")
pytorch_output = silu_mul_fp8_pytorch(input_tensor, scale)
helion_output = silu_mul_fp8(input_tensor, scale)
assert helion_output.shape == pytorch_output.shape
assert helion_output.dtype == torch.float8_e4m3fn
pytorch_f32 = pytorch_output.to(torch.float32)
helion_f32 = helion_output.to(torch.float32)
# Tolerance accounts for FP8 quantization boundary effects
torch.testing.assert_close(
helion_f32,
pytorch_f32,
atol=0.05,
rtol=0.05,
msg=(
f"Mismatch at batch={batch_size}, size={intermediate_size}, "
f"dtype={dtype}"
),
)
@pytest.mark.parametrize(
"shape",
[
(1, 2, 4096), # 3D input
(2, 4, 2048), # 3D input
(1, 1, 1, 8192), # 4D input
],
)
def test_silu_mul_fp8_multidim_vs_pytorch(self, shape):
skip_if_platform_unsupported()
input_tensor = torch.randn(*shape, dtype=torch.bfloat16, device="cuda")
scale = torch.tensor([0.5], dtype=torch.float32, device="cuda")
pytorch_output = silu_mul_fp8_pytorch(input_tensor, scale)
helion_output = silu_mul_fp8(input_tensor, scale)
assert helion_output.shape == pytorch_output.shape
pytorch_f32 = pytorch_output.to(torch.float32)
helion_f32 = helion_output.to(torch.float32)
torch.testing.assert_close(
helion_f32,
pytorch_f32,
atol=0.05,
rtol=0.05,
msg=f"Mismatch for shape={shape}",
)
class TestSiluMulFp8Integration:
def test_kernel_registration_integration(self):
from vllm.kernels.helion.register import get_registered_kernels
registered_kernels = get_registered_kernels()
assert "silu_mul_fp8" in registered_kernels
kernel_wrapper = registered_kernels["silu_mul_fp8"]
assert kernel_wrapper.op_name == "silu_mul_fp8"
assert kernel_wrapper._config_picker is not None
def test_fake_impl_functionality(self):
skip_if_platform_unsupported()
from vllm.kernels.helion.register import get_registered_kernels
input_tensor = torch.randn(32, 4096, dtype=torch.bfloat16, device="cuda")
scale = torch.tensor([0.5], dtype=torch.float32, device="cuda")
registered_kernels = get_registered_kernels()
kernel_wrapper = registered_kernels["silu_mul_fp8"]
fake_impl = kernel_wrapper._fake_impl
fake_output = fake_impl(input_tensor, scale)
expected_shape = (32, 2048)
assert fake_output.shape == expected_shape
assert fake_output.dtype == torch.float8_e4m3fn
assert fake_output.device == input_tensor.device
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/kernels/helion/test_silu_mul_fp8.py",
"license": "Apache License 2.0",
"lines": 309,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/kernels/helion/ops/silu_mul_fp8.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from typing import Any
import regex as re
import torch
from vllm.logger import init_logger
from vllm.utils.import_utils import has_helion
if not has_helion():
raise ImportError(
"silu_mul_fp8 Helion kernel requires helion to be installed. "
"Install it with: pip install helion"
)
import helion.language as hl
from vllm.kernels.helion.register import register_kernel
logger = init_logger(__name__)
@register_kernel # type: ignore[misc]
def silu_mul_fp8(input: torch.Tensor, scale: torch.Tensor) -> torch.Tensor:
original_shape = input.shape
two_d = hl.specialize(original_shape[-1])
d = two_d // 2
output_shape = original_shape[:-1] + (d,)
input_2d = input.view(-1, original_shape[-1])
m = input_2d.shape[0]
# TODO(gmagogsfm): Support for more float8 subtypes (e4m3fnuz, e5m2) coming
out = torch.empty((m, d), device=input.device, dtype=torch.float8_e4m3fn)
input_part_a = input_2d[:, :d]
input_part_b = input_2d[:, d:]
assert scale.numel() == 1, "Scale must be a scalar Tensor"
for tile_m, tile_n in hl.tile([m, d]):
a_vals = input_part_a[tile_m, tile_n]
silu_result = torch.nn.functional.silu(a_vals)
b_vals = input_part_b[tile_m, tile_n]
result = silu_result * b_vals
result_f32 = result.to(torch.float32)
scale_val = hl.load(scale, [0])
inv_scale = 1.0 / scale_val
result_scaled = result_f32 * inv_scale
out[tile_m, tile_n] = result_scaled.to(out.dtype)
return out.view(output_shape)
@silu_mul_fp8.register_input_generator # type: ignore[misc]
def generate_silu_mul_fp8_inputs() -> dict[str, tuple[Any, ...]]:
intermediate_sizes = [2048, 2880, 4096, 8192, 11008, 14336]
# Use the same num_tokens values as vLLM's default cudagraph capture sizes.
# See vllm/config/vllm.py _set_cudagraph_sizes() for the canonical formula.
num_tokens_list = [1, 2, 4] + list(range(8, 256, 8)) + list(range(256, 513, 16))
inputs = {}
for num_tokens in num_tokens_list:
for intermediate_size in intermediate_sizes:
# Input tensor has shape (num_tokens, 2 * intermediate_size)
# because silu_mul splits it into two halves
input_tensor = torch.randn(
num_tokens,
2 * intermediate_size,
device="cuda",
dtype=torch.bfloat16,
)
scale = torch.tensor([1.0], device="cuda", dtype=torch.float32)
config_key = f"intermediate_{intermediate_size}_numtokens_{num_tokens}"
inputs[config_key] = (input_tensor, scale)
return inputs
@silu_mul_fp8.register_config_picker # type: ignore[misc]
def pick_silu_mul_fp8_config(
args: tuple[Any, ...], config_keys: list[str]
) -> str | None:
"""Pick the best pre-tuned config for the given input shape.
Selection strategy:
1. Find the closest intermediate_size among available configs
(exact match preferred).
2. Among the num_tokens values tuned for that intermediate_size, pick
the smallest num_tokens >= the input's num_tokens. If the input is
larger than all available num_tokens, fall back to the largest.
Config keys must be "default" or follow the format
"intermediate_{int}_numtokens_{int}".
"""
if not config_keys:
return None
input_tensor, _scale = args
intermediate_size = input_tensor.shape[-1] // 2
num_tokens = input_tensor.view(-1, input_tensor.shape[-1]).shape[0]
configs: dict[int, list[int]] = {}
for key in config_keys:
if key == "default":
continue
match = re.fullmatch(r"intermediate_(\d+)_numtokens_(\d+)", key)
if not match:
raise ValueError(
f"Malformed config key '{key}', "
f"expected format 'intermediate_{{int}}_numtokens_{{int}}'"
)
isize_str, ntokens_str = match.groups()
configs.setdefault(int(isize_str), []).append(int(ntokens_str))
if not configs:
return "default" if "default" in config_keys else None
best_isize = min(configs, key=lambda s: abs(s - intermediate_size))
available_ntokens = sorted(configs[best_isize])
best_ntokens = next(
(n for n in available_ntokens if n >= num_tokens), available_ntokens[-1]
)
return f"intermediate_{best_isize}_numtokens_{best_ntokens}"
def silu_mul_fp8_baseline(input: torch.Tensor, scale: torch.Tensor) -> torch.Tensor:
output_shape = input.shape[:-1] + (input.shape[-1] // 2,)
out = torch.empty(output_shape, dtype=torch.float8_e4m3fn, device=input.device)
torch.ops._C.silu_and_mul_quant(out, input, scale)
return out
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/kernels/helion/ops/silu_mul_fp8.py",
"license": "Apache License 2.0",
"lines": 104,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/v1/attention/backends/mla/flashinfer_mla_sparse.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""FlashInfer MLA Sparse Attention Backend.
This backend uses the FlashInfer TRT-LLM MLA kernel with sparse_mla_top_k
for models like DeepSeek-V3.2 that use index-based sparse attention.
For sparse MLA:
- block_tables shape changes from [batch_size, max_num_blocks] (dense)
to [batch_size, q_len_per_request, sparse_mla_top_k] (sparse)
- The sparse indices represent physical cache slot positions to attend to
- sparse_mla_top_k parameter must be set to the topk value
"""
from dataclasses import dataclass
from typing import TYPE_CHECKING, ClassVar
import numpy as np
import torch
from flashinfer.decode import trtllm_batch_decode_with_kv_cache_mla
from vllm.config import VllmConfig
from vllm.config.cache import CacheDType
from vllm.logger import init_logger
from vllm.model_executor.layers.attention.mla_attention import (
get_mla_dims,
)
from vllm.platforms.interface import DeviceCapability
from vllm.v1.attention.backend import (
AttentionBackend,
AttentionCGSupport,
AttentionLayer,
AttentionMetadata,
AttentionMetadataBuilder,
AttentionType,
CommonAttentionMetadata,
MultipleOf,
SparseMLAAttentionImpl,
)
from vllm.v1.attention.backends.mla.sparse_utils import (
triton_convert_req_index_to_global_index,
)
from vllm.v1.attention.backends.utils import KVCacheLayoutType
from vllm.v1.kv_cache_interface import AttentionSpec
if TYPE_CHECKING:
from vllm.model_executor.models.deepseek_v2 import Indexer
logger = init_logger(__name__)
FLASHINFER_MLA_SPARSE_WORKSPACE_BUFFER_SIZE = 128 * 1024 * 1024
class FlashInferMLASparseBackend(AttentionBackend):
"""FlashInfer MLA backend with sparse attention support.
This backend uses the FlashInfer TRT-LLM MLA kernel with sparse_mla_top_k
for models like DeepSeek-V3.2 that use index-based sparse attention.
"""
accept_output_buffer: bool = True
supported_dtypes: ClassVar[list[torch.dtype]] = [torch.float16, torch.bfloat16]
supported_kv_cache_dtypes: ClassVar[list[CacheDType]] = [
"auto",
"bfloat16",
]
@staticmethod
def get_supported_kernel_block_sizes() -> list[int | MultipleOf]:
return [32, 64]
@staticmethod
def get_name() -> str:
return "FLASHINFER_MLA_SPARSE"
@staticmethod
def get_impl_cls() -> type["FlashInferMLASparseImpl"]:
return FlashInferMLASparseImpl
@staticmethod
def get_builder_cls() -> type["FlashInferMLASparseMetadataBuilder"]:
return FlashInferMLASparseMetadataBuilder
@classmethod
def get_supported_head_sizes(cls) -> list[int]:
return [576]
@classmethod
def is_mla(cls) -> bool:
return True
@classmethod
def is_sparse(cls) -> bool:
return True
@classmethod
def supports_compute_capability(cls, capability: DeviceCapability) -> bool:
# FlashInfer sparse MLA targets Blackwell (SM 10.x)
return capability.major == 10
@classmethod
def supports_combination(
cls,
head_size: int,
dtype: torch.dtype,
kv_cache_dtype: CacheDType | None,
block_size: int,
use_mla: bool,
has_sink: bool,
use_sparse: bool,
device_capability: DeviceCapability,
) -> str | None:
# FlashInfer MLA sparse kernel requires qk_nope_head_dim == 128
from vllm.config import get_current_vllm_config
vllm_config = get_current_vllm_config()
if vllm_config.model_config is not None:
hf_text_config = vllm_config.model_config.hf_text_config
qk_nope_head_dim = getattr(hf_text_config, "qk_nope_head_dim", 1)
if qk_nope_head_dim != 128:
return (
f"FlashInfer MLA Sparse kernel requires qk_nope_head_dim == 128, "
f"but got {qk_nope_head_dim}"
)
# Check for index_topk which indicates sparse model
if not hasattr(hf_text_config, "index_topk"):
return "FlashInfer MLA Sparse requires model with index_topk config"
return None
@staticmethod
def get_kv_cache_shape(
num_blocks: int,
block_size: int,
num_kv_heads: int, # assumed to be 1 for MLA
head_size: int,
cache_dtype_str: str = "auto",
) -> tuple[int, ...]:
return (num_blocks, block_size, head_size)
@classmethod
def get_required_kv_cache_layout(cls) -> "KVCacheLayoutType | None":
return "HND"
@dataclass
class FlashInferMLASparseMetadata(AttentionMetadata):
"""Attention metadata for FlashInfer MLA Sparse backend."""
num_reqs: int
max_query_len: int
max_seq_len: int
num_actual_tokens: int
# Query start locations
query_start_loc: torch.Tensor
slot_mapping: torch.Tensor
block_table: torch.Tensor
req_id_per_token: torch.Tensor
# Sequence lengths for all requests (context + query)
seq_lens: torch.Tensor
# Sparse-specific
block_size: int = 64
topk_tokens: int = 2048
class FlashInferMLASparseMetadataBuilder(
AttentionMetadataBuilder[FlashInferMLASparseMetadata]
):
"""Builder for FlashInfer MLA Sparse attention metadata."""
_cudagraph_support: ClassVar[AttentionCGSupport] = AttentionCGSupport.UNIFORM_BATCH
def __init__(
self,
kv_cache_spec: AttentionSpec,
layer_names: list[str],
vllm_config: VllmConfig,
device: torch.device,
) -> None:
self.vllm_config = vllm_config
self.layer_names = layer_names
self.kv_cache_spec = kv_cache_spec
self.model_config = vllm_config.model_config
self.device = device
self.mla_dims = get_mla_dims(self.model_config)
self.topk_tokens = vllm_config.model_config.hf_config.index_topk
self.req_id_per_token_buffer = torch.empty(
(vllm_config.scheduler_config.max_num_batched_tokens,),
dtype=torch.int32,
device=device,
)
def build(
self,
common_prefix_len: int,
common_attn_metadata: CommonAttentionMetadata,
fast_build: bool = False,
) -> FlashInferMLASparseMetadata:
cm = common_attn_metadata
num_tokens = cm.num_actual_tokens
# Build req_id_per_token mapping
starts = np.asarray(cm.query_start_loc_cpu, dtype=np.int32)
seg_lengths = np.diff(starts)
req_id_per_token = np.repeat(
np.arange(seg_lengths.shape[0], dtype=np.int32), seg_lengths
)
# Zero-fill for cudagraphs
self.req_id_per_token_buffer.fill_(0)
self.req_id_per_token_buffer[: req_id_per_token.shape[0]].copy_(
torch.from_numpy(req_id_per_token), non_blocking=True
)
req_id_per_token_tensor = self.req_id_per_token_buffer[:num_tokens]
return FlashInferMLASparseMetadata(
num_reqs=cm.num_reqs,
max_query_len=cm.max_query_len,
max_seq_len=cm.max_seq_len,
num_actual_tokens=cm.num_actual_tokens,
query_start_loc=cm.query_start_loc,
slot_mapping=cm.slot_mapping,
block_table=cm.block_table_tensor,
req_id_per_token=req_id_per_token_tensor,
seq_lens=cm.seq_lens,
block_size=self.kv_cache_spec.block_size,
topk_tokens=self.topk_tokens,
)
# Global workspace buffer (lazily initialized)
_fi_sparse_workspace: torch.Tensor | None = None
def _get_workspace_buffer(device: torch.device) -> torch.Tensor:
global _fi_sparse_workspace
if _fi_sparse_workspace is None:
_fi_sparse_workspace = torch.zeros(
FLASHINFER_MLA_SPARSE_WORKSPACE_BUFFER_SIZE,
dtype=torch.uint8,
device=device,
)
return _fi_sparse_workspace
class FlashInferMLASparseImpl(SparseMLAAttentionImpl[FlashInferMLASparseMetadata]):
"""FlashInfer MLA Sparse implementation.
Uses the TRT-LLM MLA kernel with sparse_mla_top_k parameter for
sparse attention computation.
"""
def __init__(
self,
num_heads: int,
head_size: int,
scale: float,
num_kv_heads: int,
alibi_slopes: list[float] | None,
sliding_window: int | None,
kv_cache_dtype: str,
logits_soft_cap: float | None,
attn_type: str,
kv_sharing_target_layer_name: str | None,
# MLA Specific Arguments
topk_indice_buffer: torch.Tensor | None = None,
indexer: "Indexer | None" = None,
**mla_args,
) -> None:
unsupported_features = [alibi_slopes, sliding_window, logits_soft_cap]
if any(unsupported_features):
raise NotImplementedError(
"FlashInferMLASparseImpl does not support one of the following: "
"alibi_slopes, sliding_window, logits_soft_cap"
)
if attn_type != AttentionType.DECODER:
raise NotImplementedError(
"Encoder self-attention and "
"encoder/decoder cross-attention "
"are not implemented for "
"FlashInferMLASparseImpl"
)
self.num_heads = num_heads
self.head_size = head_size
self.scale = float(scale)
self.num_kv_heads = num_kv_heads
self.kv_cache_dtype = kv_cache_dtype
# MLA-specific dimensions
self.kv_lora_rank: int = mla_args["kv_lora_rank"]
self.qk_nope_head_dim: int = mla_args["qk_nope_head_dim"]
self.qk_rope_head_dim: int = mla_args["qk_rope_head_dim"]
assert indexer is not None, "Indexer required for sparse MLA"
self.topk_indices_buffer: torch.Tensor | None = indexer.topk_indices_buffer
self._workspace_buffer: torch.Tensor | None = None
self.bmm1_scale: float | None = None
self.bmm2_scale: float | None = None
def forward_mqa(
self,
q: torch.Tensor | tuple[torch.Tensor, torch.Tensor],
kv_c_and_k_pe_cache: torch.Tensor,
attn_metadata: FlashInferMLASparseMetadata,
layer: AttentionLayer,
) -> tuple[torch.Tensor, torch.Tensor | None]:
if isinstance(q, tuple):
q = torch.cat(q, dim=-1)
num_actual_toks = q.shape[0]
assert self.topk_indices_buffer is not None
topk_indices = self.topk_indices_buffer[:num_actual_toks]
topk_indices_physical, seq_lens = triton_convert_req_index_to_global_index(
attn_metadata.req_id_per_token[:num_actual_toks],
attn_metadata.block_table,
topk_indices,
BLOCK_SIZE=attn_metadata.block_size,
NUM_TOPK_TOKENS=topk_indices.shape[1],
return_valid_counts=True,
)
if self._workspace_buffer is None:
self._workspace_buffer = _get_workspace_buffer(q.device)
if self.bmm1_scale is None:
self.bmm1_scale = layer._q_scale_float * layer._k_scale_float * self.scale
if self.bmm2_scale is None:
self.bmm2_scale = layer._v_scale_float
o = trtllm_batch_decode_with_kv_cache_mla(
query=q.unsqueeze(1),
kv_cache=kv_c_and_k_pe_cache.unsqueeze(1),
workspace_buffer=self._workspace_buffer,
qk_nope_head_dim=self.qk_nope_head_dim,
kv_lora_rank=self.kv_lora_rank,
qk_rope_head_dim=self.qk_rope_head_dim,
block_tables=topk_indices_physical.unsqueeze(1),
seq_lens=seq_lens,
max_seq_len=attn_metadata.topk_tokens,
bmm1_scale=self.bmm1_scale,
bmm2_scale=self.bmm2_scale,
sparse_mla_top_k=attn_metadata.topk_tokens,
)
return o.view(-1, o.shape[-2], o.shape[-1]), None
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/attention/backends/mla/flashinfer_mla_sparse.py",
"license": "Apache License 2.0",
"lines": 293,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/v1/attention/backends/mla/sparse_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Utility functions for sparse MLA backends."""
import torch
from vllm.triton_utils import tl, triton
# Kernel with prefill workspace support and valid count tracking
@triton.jit
def _convert_req_index_to_global_index_kernel(
req_id_ptr, # int32 [num_tokens]
block_table_ptr, # int32 [num_requests, max_num_blocks_per_req]
token_indices_ptr, # int32 [num_tokens, NUM_TOPK_TOKENS]
out_ptr, # int32 [num_tokens, NUM_TOPK_TOKENS]
valid_count_ptr, # int32 [num_tokens] - output valid count per row
prefill_request_id_ptr, # int32 [num_tokens], -1 for decode, >=0 for prefill
workspace_starts_ptr, # int32 [num_prefill_reqs+1] or nullptr
# shapes (compile-time where possible)
max_num_blocks_per_req: tl.constexpr,
BLOCK_SIZE: tl.constexpr,
BLOCK_N: tl.constexpr, # tile width along columns
HAS_PREFILL: tl.constexpr,
COUNT_VALID: tl.constexpr, # whether to count valid indices
# strides (in elements)
bt_stride0,
bt_stride1,
ti_stride0,
ti_stride1,
out_stride0,
out_stride1,
):
# program_id(0) -> token_id (row)
# program_id(1) -> tile index along columns
token_id = tl.program_id(0)
tile_id = tl.program_id(1)
# Each program covers BLOCK_N consecutive columns
indice_id = tile_id * BLOCK_N + tl.arange(0, BLOCK_N)
# Load request id for this token (no mask: grid is exact)
req = tl.load(req_id_ptr + token_id)
# Load token indices for this tile
ti_ptr = token_indices_ptr + token_id * ti_stride0 + indice_id * ti_stride1
tok = tl.load(ti_ptr) # int32
# Only token == -1 should propagate as -1
is_invalid_tok = tok < 0
is_prefill = False
if HAS_PREFILL:
prefill_req_id = tl.load(prefill_request_id_ptr + token_id)
is_prefill = prefill_req_id >= 0
# Compute block id and in-block offset
block_id = tok // BLOCK_SIZE
inblock_off = tok % BLOCK_SIZE
# Guard block_table access
valid_block = (block_id < max_num_blocks_per_req) & (block_id >= 0)
bt_ptr = block_table_ptr + req * bt_stride0 + block_id * bt_stride1
is_invalid_tok |= ~valid_block
base = tl.load(bt_ptr, mask=valid_block & ~is_prefill, other=0)
out_val = base * BLOCK_SIZE + inblock_off
# Override with prefill output if prefill is enabled
if HAS_PREFILL:
workspace_start = tl.load(
workspace_starts_ptr + prefill_req_id, mask=is_prefill, other=0
)
prefill_out = workspace_start + tok
out_val = tl.where(is_prefill, prefill_out, out_val)
out_val = tl.where(is_invalid_tok, -1, out_val)
# Store results
out_ptr_ij = out_ptr + token_id * out_stride0 + indice_id * out_stride1
tl.store(out_ptr_ij, out_val)
# Count valid indices in this tile and atomically add to row total
if COUNT_VALID:
tile_valid_count = tl.sum((~is_invalid_tok).to(tl.int32))
tl.atomic_add(valid_count_ptr + token_id, tile_valid_count)
def triton_convert_req_index_to_global_index(
req_id: torch.Tensor, # int32 [num_tokens]
block_table: torch.Tensor, # int32 [num_requests, max_num_blocks_per_req]
token_indices: torch.Tensor, # int32 [num_tokens, NUM_TOPK_TOKENS]
BLOCK_SIZE: int = 64,
NUM_TOPK_TOKENS: int = 2048,
BLOCK_N: int = 128, # tile width along columns
HAS_PREFILL_WORKSPACE: bool = False,
prefill_workspace_request_ids: torch.Tensor | None = None,
prefill_workspace_starts: torch.Tensor | None = None,
return_valid_counts: bool = False,
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
"""
out[token_id, indice_id] =
block_table[req_id[token_id],
token_indices[token_id, indice_id] // BLOCK_SIZE] * BLOCK_SIZE
+ token_indices[token_id, indice_id] % BLOCK_SIZE
Only when token_indices[token_id, indice_id] == -1 do we output -1.
For safety, we also output -1 if the derived block_id would be
out-of-bounds.
When HAS_PREFILL_WORKSPACE is True, prefill tokens are mapped to workspace offsets
instead of global cache slots. prefill_workspace_request_ids and
prefill_workspace_starts must be provided.
prefill_workspace_request_ids: int32 [num_tokens], -1 for decode else
prefill request index (maps to prefill_workspace_starts)
prefill_workspace_starts: int32 [num_prefills], 0-indexed workspace
starts for each prefill request
When return_valid_counts is True, also returns the count of valid (non -1)
indices per row, computed during the same kernel pass (no extra overhead).
"""
assert req_id.dtype == torch.int32
assert block_table.dtype == torch.int32
assert token_indices.dtype == torch.int32
assert token_indices.shape[1] == NUM_TOPK_TOKENS
assert NUM_TOPK_TOKENS % BLOCK_N == 0, (
f"NUM_TOPK_TOKENS ({NUM_TOPK_TOKENS}) must be divisible by BLOCK_N ({BLOCK_N})"
)
if HAS_PREFILL_WORKSPACE:
assert prefill_workspace_request_ids is not None
assert prefill_workspace_starts is not None
assert prefill_workspace_request_ids.dtype == torch.int32
assert prefill_workspace_starts.dtype == torch.int32
num_tokens = req_id.shape[0]
max_num_blocks_per_req = block_table.shape[1]
tiles_per_row = NUM_TOPK_TOKENS // BLOCK_N
# Ensure contiguous tensors on the same device
req_id_c = req_id.contiguous()
block_table_c = block_table.contiguous()
token_indices_c = token_indices.contiguous()
out = torch.empty_like(token_indices_c)
# Allocate valid count buffer if needed (must be zero-initialized for atomics)
valid_counts: torch.Tensor | None = None
if return_valid_counts:
valid_counts = torch.zeros(
num_tokens, dtype=torch.int32, device=token_indices.device
)
# Strides in elements
bt_stride0, bt_stride1 = block_table_c.stride()
ti_stride0, ti_stride1 = token_indices_c.stride()
out_stride0, out_stride1 = out.stride()
# Prepare prefill pointers
if HAS_PREFILL_WORKSPACE:
assert prefill_workspace_request_ids is not None # for mypy
assert prefill_workspace_starts is not None # for mypy
assert prefill_workspace_request_ids.is_contiguous()
assert prefill_workspace_starts.is_contiguous()
# Exact 2D grid: tokens × column tiles
grid = (num_tokens, tiles_per_row)
_convert_req_index_to_global_index_kernel[grid](
req_id_c,
block_table_c,
token_indices_c,
out,
valid_counts,
prefill_workspace_request_ids,
prefill_workspace_starts,
# shapes / constexprs
max_num_blocks_per_req,
BLOCK_SIZE,
BLOCK_N,
HAS_PREFILL_WORKSPACE,
return_valid_counts,
# strides
bt_stride0,
bt_stride1,
ti_stride0,
ti_stride1,
out_stride0,
out_stride1,
)
if return_valid_counts:
assert valid_counts is not None
return out, valid_counts
return out
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/attention/backends/mla/sparse_utils.py",
"license": "Apache License 2.0",
"lines": 164,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/fused_moe/activation.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""MoE activation function enum and utilities."""
from enum import Enum
import torch
import torch.nn.functional as F
class MoEActivation(Enum):
"""Activation functions for MoE layers."""
# Gated activations (gate * activation(up)) expect input of shape [..., 2*d]
# and produce output of shape [..., d]
SILU = "silu"
GELU = "gelu"
RELU2 = "relu2"
SWIGLUOAI = "swigluoai"
SWIGLUSTEP = "swiglustep"
# Non-gated activations (no mul with gate) expect input of shape [..., d]
# and produce output of shape [..., d].
# NOTE: Non-gated activations require the "_no_mul" suffix to be present.
SILU_NO_MUL = "silu_no_mul"
GELU_NO_MUL = "gelu_no_mul"
RELU2_NO_MUL = "relu2_no_mul"
@property
def is_gated(self) -> bool:
"""Returns True if activation expects gate*activation(up) pattern.
Gated activations expect input tensor with 2x the output size,
where the first half is the gate and second half is the up projection.
"""
return not self.value.endswith("_no_mul")
@property
def custom_op_name(self) -> str:
"""Maps to the CustomOp name of activations
in vllm/model_executor/layers/activation.py."""
return _CUSTOM_OP_NAMES[self]
def without_mul(self) -> "MoEActivation":
"""Get the non-gated variant of this activation.
For activations that have a _no_mul variant, returns that variant.
For activations without a _no_mul variant (or already _no_mul),
returns self.
"""
return _WITHOUT_MUL.get(self, self)
@classmethod
def from_str(cls, s: str) -> "MoEActivation":
"""Parse from string for backward compatibility."""
for member in cls:
if member.value == s:
return member
valid = [m.value for m in cls]
raise ValueError(f"Unknown MoE activation: {s!r}. Valid activations: {valid}")
# Module-level lookup tables used by MoEActivation functions.
_CUSTOM_OP_NAMES: dict[MoEActivation, str] = {
MoEActivation.SILU: "silu_and_mul",
MoEActivation.GELU: "gelu_and_mul",
MoEActivation.SWIGLUOAI: "swigluoai_and_mul",
MoEActivation.SWIGLUSTEP: "swiglustep_and_mul",
MoEActivation.RELU2: "relu2",
MoEActivation.SILU_NO_MUL: "silu_and_mul",
MoEActivation.GELU_NO_MUL: "gelu_and_mul",
MoEActivation.RELU2_NO_MUL: "relu2",
}
_WITHOUT_MUL: dict[MoEActivation, MoEActivation] = {
MoEActivation.SILU: MoEActivation.SILU_NO_MUL,
MoEActivation.GELU: MoEActivation.GELU_NO_MUL,
MoEActivation.RELU2: MoEActivation.RELU2_NO_MUL,
}
def activation_without_mul(activation: str) -> str:
"""Get the non-gated variant of an activation function.
Args:
activation: The activation function name (e.g., "silu", "gelu")
Returns:
The non-gated activation name (e.g., "silu_no_mul", "gelu_no_mul")
"""
return MoEActivation.from_str(activation).without_mul().value
def apply_moe_activation(
activation: MoEActivation,
output: torch.Tensor,
input: torch.Tensor,
) -> torch.Tensor:
"""Apply MoE activation function."""
assert input.dim() == 2, "Input must be 2D"
assert output.dim() == 2, "Output must be 2D"
if activation.is_gated:
assert output.size(-1) * 2 == input.size(-1), (
f"{activation.value} expects 2x ratio: "
f"{output.size(-1) * 2} vs {input.size(-1)}"
)
else:
assert output.size(-1) == input.size(-1), (
f"{activation.value} expects equal sizes: "
f"{output.size(-1)} vs {input.size(-1)}"
)
# Activations with gated multiplication (gate × activation(up))
if activation == MoEActivation.SILU:
torch.ops._C.silu_and_mul(output, input)
elif activation == MoEActivation.GELU:
torch.ops._C.gelu_and_mul(output, input)
elif activation == MoEActivation.SWIGLUOAI:
torch.ops._C.swigluoai_and_mul(output, input)
elif activation == MoEActivation.SWIGLUSTEP:
from vllm.model_executor.layers.activation import swiglustep_and_mul_triton
swiglustep_and_mul_triton(output, input)
# Activations without gated multiplication
elif activation == MoEActivation.SILU_NO_MUL:
output.copy_(F.silu(input))
elif activation == MoEActivation.GELU_NO_MUL:
output.copy_(F.gelu(input))
elif activation == MoEActivation.RELU2_NO_MUL:
F.relu(input, inplace=True)
torch.square(input, out=output)
else:
raise ValueError(f"Unsupported FusedMoe activation: {activation}")
return output
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/fused_moe/activation.py",
"license": "Apache License 2.0",
"lines": 111,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/kernels/moe/test_unquantized_backend_selection.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from unittest.mock import patch
import pytest
from tests.kernels.moe.utils import make_dummy_moe_config
from vllm.model_executor.layers.fused_moe.oracle.unquantized import (
UnquantizedMoeBackend,
select_unquantized_moe_backend,
)
from vllm.platforms import current_platform
@pytest.mark.parametrize(
"platform_method,expected_backend",
[
("is_cuda", UnquantizedMoeBackend.TRITON), # Default CUDA without FlashInfer
("is_rocm", UnquantizedMoeBackend.TRITON),
("is_cpu", UnquantizedMoeBackend.CPU),
("is_xpu", UnquantizedMoeBackend.XPU),
("is_tpu", UnquantizedMoeBackend.TPU),
("is_out_of_tree", UnquantizedMoeBackend.OOT),
],
)
@patch(
"vllm.model_executor.layers.fused_moe.oracle.unquantized.has_flashinfer",
return_value=False,
)
def test_select_default_backend_by_platform(
mock_has_flashinfer,
monkeypatch,
platform_method,
expected_backend,
):
"""Test backend selection for different platforms."""
with patch(
"vllm.model_executor.layers.fused_moe.oracle.unquantized.current_platform"
) as mock_platform:
# Set all platform checks to False
mock_platform.is_cuda.return_value = False
mock_platform.is_rocm.return_value = False
mock_platform.is_cpu.return_value = False
mock_platform.is_xpu.return_value = False
mock_platform.is_tpu.return_value = False
mock_platform.is_out_of_tree.return_value = False
# Set only the specified platform to True
getattr(mock_platform, platform_method).return_value = True
moe_config = make_dummy_moe_config()
selected_backend = select_unquantized_moe_backend(
moe_config=moe_config,
use_ep=False,
use_dp=False,
)
assert selected_backend == expected_backend
@patch(
"vllm.model_executor.layers.fused_moe.oracle.unquantized.has_flashinfer",
return_value=True,
)
@patch(
"vllm.model_executor.layers.fused_moe.oracle.unquantized.is_supported_config_trtllm_bf16",
return_value=(True, None),
)
@pytest.mark.skipif(
not current_platform.is_cuda(), reason="Only supported on NVIDIA platforms."
)
def test_select_cuda_flashinfer_trtllm_backend(
mock_has_flashinfer, mock_is_supported_trtllm, monkeypatch
):
"""Test CUDA backend selection when FlashInfer TRTLLM is available and enabled."""
with patch(
"vllm.model_executor.layers.fused_moe.oracle.unquantized.current_platform"
) as mock_platform:
# Set as CUDA platform
mock_platform.is_cuda.return_value = True
mock_platform.is_rocm.return_value = False
mock_platform.is_cpu.return_value = False
mock_platform.is_xpu.return_value = False
mock_platform.is_tpu.return_value = False
mock_platform.is_out_of_tree.return_value = False
monkeypatch.setenv("VLLM_USE_FLASHINFER_MOE_FP16", "1")
moe_config = make_dummy_moe_config()
selected_backend = select_unquantized_moe_backend(
moe_config=moe_config,
use_ep=True,
use_dp=False,
)
assert selected_backend == UnquantizedMoeBackend.FLASHINFER_TRTLLM
@patch(
"vllm.model_executor.layers.fused_moe.oracle.unquantized.has_flashinfer",
return_value=True,
)
@patch(
"vllm.model_executor.layers.fused_moe.oracle.unquantized.is_supported_config_trtllm_bf16",
return_value=(False, None),
)
@pytest.mark.skipif(
not current_platform.is_cuda(), reason="Only supported on NVIDIA platforms."
)
def test_select_cuda_flashinfer_cutlass_backend(
mock_has_flashinfer, mock_is_supported_trtllm, monkeypatch
):
"""Test CUDA backend selection when FlashInfer TRTLLM is not available
and FlashInfer CUTLASS is available."""
with patch(
"vllm.model_executor.layers.fused_moe.oracle.unquantized.current_platform"
) as mock_platform:
# Set as CUDA platform with Hopper capability
mock_platform.is_cuda.return_value = True
mock_platform.is_rocm.return_value = False
mock_platform.is_cpu.return_value = False
mock_platform.is_xpu.return_value = False
mock_platform.is_tpu.return_value = False
mock_platform.is_out_of_tree.return_value = False
mock_platform.has_device_capability.return_value = True # SM90+
# Enable FlashInfer via env var
monkeypatch.setenv("VLLM_USE_FLASHINFER_MOE_FP16", "1")
moe_config = make_dummy_moe_config()
selected_backend = select_unquantized_moe_backend(
moe_config=moe_config,
use_ep=True, # CUTLASS requires EP
use_dp=False, # CUTLASS doesn't support DP
)
assert selected_backend == UnquantizedMoeBackend.FLASHINFER_CUTLASS
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/kernels/moe/test_unquantized_backend_selection.py",
"license": "Apache License 2.0",
"lines": 120,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_executor/models/funasr.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import math
from collections.abc import Iterable, Mapping, Sequence
from typing import Annotated, Literal, cast
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from transformers import (
BatchFeature,
Qwen3Config,
)
from vllm.config import ModelConfig, SpeechToTextConfig, VllmConfig
from vllm.config.multimodal import BaseDummyOptions
from vllm.distributed import get_tensor_model_parallel_world_size
from vllm.inputs.data import PromptType
from vllm.logger import init_logger
from vllm.model_executor.layers.activation import _ACTIVATION_REGISTRY
from vllm.model_executor.layers.attention.mm_encoder_attention import (
MMEncoderAttention,
)
from vllm.model_executor.layers.linear import (
ColumnParallelLinear,
QKVParallelLinear,
ReplicatedLinear,
RowParallelLinear,
)
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.vocab_parallel_embedding import ParallelLMHead
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
from vllm.model_executor.models.whisper_utils import (
ISO639_1_SUPPORTED_LANGS,
)
from vllm.multimodal import MULTIMODAL_REGISTRY
from vllm.multimodal.inputs import (
MultiModalDataDict,
MultiModalFieldConfig,
MultiModalKwargsItems,
)
from vllm.multimodal.parse import MultiModalDataItems, MultiModalDataParser
from vllm.multimodal.processing import (
BaseDummyInputsBuilder,
BaseMultiModalProcessor,
BaseProcessingInfo,
PromptReplacement,
PromptUpdate,
)
from vllm.transformers_utils.processor import cached_processor_from_config
from vllm.transformers_utils.processors.funasr_processor import FunASRFeatureExtractor
from vllm.utils.jsontree import json_map_leaves
from vllm.utils.tensor_schema import TensorSchema, TensorShape
from .interfaces import (
MultiModalEmbeddings,
SupportsMultiModal,
SupportsTranscription,
_require_is_multimodal,
)
from .qwen3 import Qwen3Model
from .utils import (
AutoWeightsLoader,
WeightsMapper,
_merge_multimodal_embeddings,
maybe_prefix,
)
logger = init_logger(__name__)
def sequence_mask(lengths, maxlen=None, dtype=torch.float32, device=None):
if maxlen is None:
maxlen = lengths.max()
row_vector = torch.arange(0, maxlen, 1).to(lengths.device)
matrix = torch.unsqueeze(lengths, dim=-1)
mask = row_vector < matrix
mask = mask.detach()
return mask.type(dtype).to(device) if device is not None else mask.type(dtype)
class LayerNorm(torch.nn.LayerNorm):
def __init__(self, nout, dim=-1):
super().__init__(nout, eps=1e-12)
self.dim = dim
def forward(self, x: torch.Tensor):
if self.dim == -1:
return super().forward(x)
return super().forward(x.transpose(self.dim, -1)).transpose(self.dim, -1)
class EncoderLayerSANM(nn.Module):
def __init__(
self,
in_size: int,
size: int,
self_attn: nn.Module,
feed_forward: nn.Module,
normalize_before=True,
):
super().__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.norm1 = LayerNorm(in_size)
self.norm2 = LayerNorm(size)
self.in_size = in_size
self.size = size
self.normalize_before = normalize_before
def forward(
self,
hidden_states: torch.Tensor,
mask: torch.Tensor | None = None,
cache=None,
mask_shfit_chunk=None,
mask_att_chunk_encoder=None,
):
residual = hidden_states
hidden_states = self.norm1(hidden_states)
if self.in_size == self.size:
hidden_states = residual + self.self_attn(
hidden_states,
mask,
mask_shfit_chunk=mask_shfit_chunk,
mask_att_chunk_encoder=mask_att_chunk_encoder,
)
else:
hidden_states = self.self_attn(
hidden_states,
mask,
mask_shfit_chunk=mask_shfit_chunk,
mask_att_chunk_encoder=mask_att_chunk_encoder,
)
residual = hidden_states
hidden_states = self.norm2(hidden_states)
hidden_states = residual + self.feed_forward(hidden_states)
return hidden_states, mask, cache, mask_shfit_chunk, mask_att_chunk_encoder
class MultiHeadedAttentionSANM(nn.Module):
def __init__(
self,
n_head: int,
in_feat: int,
n_feat: int,
kernel_size: int,
sanm_shift: int = 0,
):
super().__init__()
assert n_feat % n_head == 0
# We assume d_v always equals d_k
self.d_k = n_feat // n_head
self.h = n_head
self.out_proj = ReplicatedLinear(
input_size=n_feat,
output_size=n_feat,
bias=True,
)
self.linear_q_k_v = ReplicatedLinear(
input_size=in_feat,
output_size=n_feat * 3,
bias=True,
)
self.attn = None
self.fsmn_block = nn.Conv1d(
n_feat, n_feat, kernel_size, stride=1, padding=0, groups=n_feat, bias=False
)
# padding
left_padding = (kernel_size - 1) // 2
if sanm_shift > 0:
left_padding = left_padding + sanm_shift
right_padding = kernel_size - 1 - left_padding
self.pad_fn = nn.ConstantPad1d((left_padding, right_padding), 0.0)
def forward_fsmn(
self,
inputs: torch.Tensor,
mask: torch.Tensor,
mask_shfit_chunk: torch.Tensor = None,
):
b, t, d = inputs.size()
if mask is not None:
mask = torch.reshape(mask, (b, -1, 1))
if mask_shfit_chunk is not None:
mask = mask * mask_shfit_chunk
inputs = inputs * mask
x = inputs.transpose(1, 2)
x = self.pad_fn(x)
x = self.fsmn_block(x)
x = x.transpose(1, 2)
x += inputs
if mask is not None:
x = x * mask
return x
def forward_qkv(self, x: torch.Tensor):
b, t, d = x.size()
q_k_v, _ = self.linear_q_k_v(x)
q, k, v = torch.split(q_k_v, int(self.h * self.d_k), dim=-1)
q_h = torch.reshape(q, (b, t, self.h, self.d_k)).transpose(1, 2)
k_h = torch.reshape(k, (b, t, self.h, self.d_k)).transpose(1, 2)
v_h = torch.reshape(v, (b, t, self.h, self.d_k)).transpose(1, 2)
return q_h, k_h, v_h, v
def forward_attention(
self,
value: torch.Tensor,
scores: torch.Tensor,
mask: torch.Tensor,
mask_att_chunk_encoder: torch.Tensor = None,
):
n_batch = value.size(0)
if mask is not None:
if mask_att_chunk_encoder is not None:
mask = mask * mask_att_chunk_encoder
mask = mask.unsqueeze(1).eq(0)
min_value = -float("inf")
scores = scores.masked_fill(mask, min_value)
attn = torch.softmax(scores, dim=-1).masked_fill(mask, 0.0)
else:
attn = torch.softmax(scores, dim=-1)
p_attn = attn
x = torch.matmul(p_attn, value)
x = x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
out, _ = self.out_proj(x)
return out
def forward(
self,
hidden_states: torch.Tensor,
mask: torch.Tensor,
mask_shfit_chunk: torch.Tensor = None,
mask_att_chunk_encoder: torch.Tensor = None,
):
q_h, k_h, v_h, v = self.forward_qkv(hidden_states)
fsmn_memory = self.forward_fsmn(v, mask, mask_shfit_chunk)
q_h = q_h * self.d_k ** (-0.5)
scores = torch.matmul(q_h, k_h.transpose(-2, -1))
att_outs = self.forward_attention(v_h, scores, mask, mask_att_chunk_encoder)
return att_outs + fsmn_memory
class SinusoidalPositionEncoder(torch.nn.Module):
def __init__(self, d_model=80):
super().__init__()
def encode(
self,
positions: torch.Tensor = None,
depth: int = None,
dtype: torch.dtype = torch.float32,
):
batch_size = positions.size(0)
positions = positions.type(dtype)
device = positions.device
log_timescale_increment = torch.log(
torch.tensor([10000], dtype=dtype, device=device)
) / (depth / 2 - 1)
inv_timescales = torch.exp(
torch.arange(depth / 2, device=device).type(dtype)
* (-log_timescale_increment)
)
inv_timescales = torch.reshape(inv_timescales, [batch_size, -1])
scaled_time = torch.reshape(positions, [1, -1, 1]) * torch.reshape(
inv_timescales, [1, 1, -1]
)
encoding = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=2)
return encoding.type(dtype)
def forward(self, hidden_states: torch.Tensor):
batch_size, timesteps, input_dim = hidden_states.size()
positions = torch.arange(1, timesteps + 1, device=hidden_states.device)[None, :]
position_encoding = self.encode(positions, input_dim, hidden_states.dtype).to(
hidden_states.device
)
return hidden_states + position_encoding
class SenseVoiceEncoderSmall(nn.Module):
def __init__(
self,
input_size: int,
output_size: int = 256,
attention_heads: int = 4,
linear_units: int = 2048,
num_blocks: int = 6,
tp_blocks: int = 0,
attention_dropout_rate: float = 0.0,
normalize_before: bool = True,
kernel_size: int = 11,
sanm_shift: int = 0,
**kwargs,
):
super().__init__()
self._output_size = output_size
self.embed = SinusoidalPositionEncoder()
self.normalize_before = normalize_before
positionwise_layer = PositionwiseFeedForward
positionwise_layer_args = (
output_size,
linear_units,
)
encoder_selfattn_layer = MultiHeadedAttentionSANM
encoder_selfattn_layer_args0 = (
attention_heads,
input_size,
output_size,
kernel_size,
sanm_shift,
)
encoder_selfattn_layer_args = (
attention_heads,
output_size,
output_size,
kernel_size,
sanm_shift,
)
self.encoders0 = nn.ModuleList(
[
EncoderLayerSANM(
input_size,
output_size,
encoder_selfattn_layer(*encoder_selfattn_layer_args0),
positionwise_layer(*positionwise_layer_args),
)
for i in range(1)
]
)
self.encoders = nn.ModuleList(
[
EncoderLayerSANM(
output_size,
output_size,
encoder_selfattn_layer(*encoder_selfattn_layer_args),
positionwise_layer(*positionwise_layer_args),
)
for i in range(num_blocks - 1)
]
)
self.tp_encoders = nn.ModuleList(
[
EncoderLayerSANM(
output_size,
output_size,
encoder_selfattn_layer(*encoder_selfattn_layer_args),
positionwise_layer(*positionwise_layer_args),
)
for i in range(tp_blocks)
]
)
self.after_norm = LayerNorm(output_size)
self.tp_norm = LayerNorm(output_size)
def output_size(self) -> int:
return self._output_size
def forward(
self,
xs_pad: torch.Tensor,
ilens: torch.Tensor,
):
maxlen = xs_pad.shape[1]
masks = sequence_mask(
ilens, maxlen=maxlen, dtype=ilens.dtype, device=ilens.device
)[:, None, :]
xs_pad *= self.output_size() ** 0.5
xs_pad = self.embed(xs_pad)
for layer_idx, encoder_layer in enumerate(self.encoders0):
encoder_outs = encoder_layer(xs_pad, masks)
xs_pad, masks = encoder_outs[0], encoder_outs[1]
for layer_idx, encoder_layer in enumerate(self.encoders):
encoder_outs = encoder_layer(xs_pad, masks)
xs_pad, masks = encoder_outs[0], encoder_outs[1]
xs_pad = self.after_norm(xs_pad)
olens = masks.squeeze(1).sum(1).int()
for layer_idx, encoder_layer in enumerate(self.tp_encoders):
encoder_outs = encoder_layer(xs_pad, masks)
xs_pad, masks = encoder_outs[0], encoder_outs[1]
xs_pad = self.tp_norm(xs_pad)
return xs_pad, olens
class PositionwiseFeedForward(nn.Module):
def __init__(self, idim: int, hidden_units: int):
super().__init__()
self.w_1 = ColumnParallelLinear(
input_size=idim,
output_size=hidden_units,
bias=True,
)
self.w_2 = RowParallelLinear(
input_size=hidden_units,
output_size=idim,
bias=True,
)
self.activation = _ACTIVATION_REGISTRY["relu"]
def forward(self, hidden_states: torch.Tensor):
hidden_states, _ = self.w_1(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states, _ = self.w_2(hidden_states)
return hidden_states
class EncoderLayer(nn.Module):
def __init__(
self,
size: int,
self_attn: nn.Module,
feed_forward: nn.Module,
):
super().__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.norm1 = LayerNorm(size)
self.norm2 = LayerNorm(size)
def forward(self, hidden_states: torch.Tensor):
residual = hidden_states
hidden_states = self.norm1(hidden_states)
hidden_states = residual + self.self_attn(hidden_states, None, None)
residual = hidden_states
hidden_states = self.norm2(hidden_states)
hidden_states = residual + self.feed_forward(hidden_states)
return hidden_states
class FunASRAudioAttention(nn.Module):
def __init__(
self,
num_heads: int,
embed_dim: int,
prefix: str = "",
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = self.embed_dim // self.num_heads
tp_size = get_tensor_model_parallel_world_size()
self.num_local_heads = self.num_heads // tp_size
if (self.head_dim * self.num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: "
f"{self.embed_dim} and `num_heads`: {self.num_heads})."
)
self.scaling = self.head_dim**-0.5
self.qkv = QKVParallelLinear(
hidden_size=self.embed_dim,
head_size=self.head_dim,
total_num_heads=self.num_heads,
total_num_kv_heads=self.num_heads,
bias=True,
prefix=f"{prefix}.qkv",
)
self.out_proj = RowParallelLinear(
input_size=self.embed_dim,
output_size=self.embed_dim,
bias=True,
prefix=f"{prefix}.out_proj",
)
self.attn = MMEncoderAttention(
num_heads=self.num_local_heads,
head_size=self.head_dim,
scale=self.scaling,
)
def forward(
self,
hidden_states: torch.Tensor,
cu_seqlens: torch.Tensor,
max_seqlen: torch.Tensor | None,
) -> torch.Tensor:
bs, seq_length, _ = hidden_states.size()
qkv, _ = self.qkv(hidden_states)
q, k, v = qkv.chunk(3, dim=-1)
q = q.view(bs, seq_length, -1, self.head_dim)
k = k.view(bs, seq_length, -1, self.head_dim)
v = v.view(bs, seq_length, -1, self.head_dim)
attn_output = self.attn(
query=q,
key=k,
value=v,
cu_seqlens=cu_seqlens,
max_seqlen=max_seqlen,
)
attn_output = attn_output.view(bs, seq_length, -1)
output, _ = self.out_proj(attn_output)
return output
class Transformer(nn.Module):
def __init__(
self,
downsample_rate=2,
encoder_dim=1280,
llm_dim=4096,
ffn_dim: int = 2048,
prefix: str = "",
**kwargs,
):
super().__init__()
self.k = downsample_rate
self.encoder_dim = encoder_dim
self.llm_dim = llm_dim
self.linear1 = ColumnParallelLinear(
input_size=self.encoder_dim * self.k,
output_size=ffn_dim,
bias=True,
)
self.relu = nn.ReLU()
self.linear2 = RowParallelLinear(
input_size=ffn_dim,
output_size=self.llm_dim,
bias=True,
)
self.blocks = None
if kwargs.get("n_layer", 2) > 0:
self.blocks = nn.ModuleList(
[
EncoderLayer(
llm_dim,
FunASRAudioAttention(
kwargs.get("attention_heads", 8),
llm_dim,
prefix=f"{prefix}.self_attn",
),
PositionwiseFeedForward(
llm_dim,
llm_dim // 4,
),
)
for _ in range(kwargs.get("n_layer", 2))
]
)
def forward(self, hidden_states: torch.Tensor, ilens: int = 0):
batch_size, seq_len, dim = hidden_states.size()
chunk_num = (seq_len - 1) // self.k + 1
pad_num = chunk_num * self.k - seq_len
hidden_states = F.pad(hidden_states, (0, 0, 0, pad_num, 0, 0), value=0.0)
seq_len = hidden_states.size(1)
hidden_states = hidden_states.contiguous()
hidden_states = hidden_states.view(batch_size, chunk_num, dim * self.k)
hidden_states, _ = self.linear1(hidden_states)
hidden_states = self.relu(hidden_states)
hidden_states, _ = self.linear2(hidden_states)
olens = None
olens = (ilens - 1) // self.k + 1
if self.blocks is not None:
for layer, block in enumerate(self.blocks):
hidden_states = block(hidden_states)
return hidden_states, olens
class FunASRAudioInputs(TensorSchema):
"""
Dimensions:
- b: Batch size
- nmb: Number of mel bins
- t: Time frames (M)
"""
input_features: Annotated[
list[torch.Tensor] | None,
TensorShape("b", "nmb", "t"),
]
speech_lengths: Annotated[
list[torch.Tensor] | None,
TensorShape("b"),
]
class FunASREncoder(nn.Module):
def __init__(
self, *, vllm_config: VllmConfig, prefix: str = "", init_in_fp32: bool = False
):
super().__init__()
self.audio_encoder = SenseVoiceEncoderSmall(
input_size=560, **vllm_config.model_config.hf_config.audio_encoder_conf
)
self.audio_adaptor = Transformer(
downsample_rate=1,
use_low_frame_rate=True,
ffn_dim=2048,
llm_dim=1024,
encoder_dim=512,
n_layer=2,
freeze=True,
prefix=maybe_prefix(prefix, "audio_encoder"),
)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
"""Load weights with mapping from HuggingFace format."""
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("self_attn.qkv.", "self_attn.q_proj.", "q"),
("self_attn.qkv.", "self_attn.k_proj.", "k"),
("self_attn.qkv.", "self_attn.v_proj.", "v"),
]
params_dict = dict(self.named_parameters(remove_duplicate=False))
loaded_params: set[str] = set()
for name, loaded_weight in weights:
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
break
else:
param = params_dict.get(name)
if param is not None:
weight_loader = getattr(
param, "weight_loader", default_weight_loader
)
weight_loader(param, loaded_weight)
loaded_params.add(name)
return loaded_params
class FunASRModel(nn.Module):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
self.encoder = FunASREncoder(
vllm_config=vllm_config, prefix=maybe_prefix(prefix, "encoder")
)
self.decoder = Qwen3Model(
vllm_config=vllm_config, prefix=maybe_prefix(prefix, "decoder")
)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor:
decoder_outputs = self.decoder(
input_ids=input_ids,
positions=positions,
inputs_embeds=inputs_embeds,
)
return decoder_outputs
def get_encoder_outputs(
self,
speech: torch.Tensor | list[torch.Tensor] | None,
speech_lengths: torch.Tensor | list[torch.Tensor] | None,
) -> torch.Tensor | None:
self.feat_permute = False
if self.feat_permute:
encoder_out, encoder_out_lens = self.encoder.audio_encoder(
speech.permute(0, 2, 1), speech_lengths
)
else:
encoder_out, encoder_out_lens = self.encoder.audio_encoder(
speech, speech_lengths
)
encoder_out, encoder_out_lens = self.encoder.audio_adaptor(
encoder_out, encoder_out_lens
)
return encoder_out
class FunASRProcessingInfo(BaseProcessingInfo):
def get_hf_config(self) -> Qwen3Config:
return self.ctx.get_hf_config(Qwen3Config)
def get_supported_mm_limits(self) -> Mapping[str, int | None]:
return {"audio": 1}
def get_feature_extractor(self, **kwargs: object) -> FunASRFeatureExtractor:
hf_processor = self.get_hf_processor(**kwargs)
feature_extractor = hf_processor.feature_extractor # type: ignore
assert isinstance(feature_extractor, FunASRFeatureExtractor)
return feature_extractor
def get_data_parser(self) -> MultiModalDataParser:
feature_extractor = self.get_feature_extractor()
return MultiModalDataParser(
target_sr=feature_extractor.sampling_rate,
target_channels=self.get_target_channels(),
)
def get_target_channels(self) -> int:
return 1
def get_num_audio_tokens(self) -> int:
return self.get_hf_config().max_source_positions
class FunASRDummyInputsBuilder(BaseDummyInputsBuilder[FunASRProcessingInfo]):
def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str:
num_audios = mm_counts.get("audio", 0)
return "<|AUDIO|>" * num_audios
def get_dummy_mm_data(
self,
seq_len: int,
mm_counts: Mapping[str, int],
mm_options: Mapping[str, BaseDummyOptions],
) -> MultiModalDataDict:
feature_extractor = self.info.get_feature_extractor()
sampling_rate = feature_extractor.sampling_rate
audio_len = feature_extractor.chunk_length * sampling_rate
num_audios = mm_counts.get("audio", 0)
audio_overrides = mm_options.get("audio")
return {
"audio": self._get_dummy_audios(
length=audio_len,
num_audios=num_audios,
overrides=audio_overrides,
),
}
class FunASRMultiModalProcessor(BaseMultiModalProcessor[FunASRProcessingInfo]):
def _call_hf_processor(
self,
prompt: str,
mm_data: Mapping[str, object],
mm_kwargs: Mapping[str, object],
tok_kwargs: Mapping[str, object],
) -> BatchFeature:
if mm_data:
feature_extractor = self.info.get_feature_extractor(**mm_kwargs)
mm_data = dict(audio=mm_data.pop("audios"))
mm_kwargs = dict(
**mm_kwargs,
sampling_rate=feature_extractor.sampling_rate,
)
processed_outputs = super()._call_hf_processor(
prompt=prompt,
mm_data=mm_data,
mm_kwargs=mm_kwargs,
tok_kwargs=tok_kwargs,
)
if "labels" in processed_outputs:
processed_outputs["input_ids"] = processed_outputs.pop("labels")
return processed_outputs
def _get_mm_fields_config(
self,
hf_inputs: BatchFeature,
hf_processor_mm_kwargs: Mapping[str, object],
) -> Mapping[str, MultiModalFieldConfig]:
return dict(
input_features=MultiModalFieldConfig.batched("audio"),
speech_lengths=MultiModalFieldConfig.batched("audio"),
fake_token_len=MultiModalFieldConfig.batched("audio"),
)
def _get_prompt_updates(
self,
mm_items: MultiModalDataItems,
hf_processor_mm_kwargs: Mapping[str, object],
out_mm_kwargs: MultiModalKwargsItems,
) -> Sequence[PromptUpdate]:
processor = self.info.get_hf_processor(**hf_processor_mm_kwargs)
audio_token_id = processor.audio_token_id
out_mm_data = out_mm_kwargs.get_data()
fake_token_len = out_mm_data.get("fake_token_len")
if fake_token_len is None:
audio_output_lengths = []
else:
assert isinstance(fake_token_len, torch.Tensor)
audio_output_lengths = fake_token_len.tolist()
def get_replacement_qwen2_audio(item_idx: int):
if audio_output_lengths:
num_features = audio_output_lengths[item_idx]
else:
audio_embeds = out_mm_data["audio_embeds"][item_idx]
assert len(audio_embeds.shape) == 2, "audio_embeds must be a 2D tensor"
num_features = audio_embeds.shape[0]
return [audio_token_id] * num_features
return [
PromptReplacement(
modality="audio",
target=[audio_token_id],
replacement=get_replacement_qwen2_audio,
)
]
@MULTIMODAL_REGISTRY.register_processor(
FunASRMultiModalProcessor,
info=FunASRProcessingInfo,
dummy_inputs=FunASRDummyInputsBuilder,
)
class FunASRForConditionalGeneration(
nn.Module, SupportsTranscription, SupportsMultiModal
):
packed_modules_mapping = {
"self_attn.qkv_proj": [
"self_attn.q_proj",
"self_attn.k_proj",
"self_attn.v_proj",
],
"encoder_attn.kv_proj": ["encoder_attn.k_proj", "encoder_attn.v_proj"],
}
hf_to_vllm_mapper = WeightsMapper(
orig_to_new_substr={
"linear_q.": "q_proj.",
"linear_k.": "k_proj.",
"linear_v.": "v_proj.",
"linear_out.": "out_proj.",
}
)
supports_transcription_only = True
supports_segment_timestamp = True
supported_languages = ISO639_1_SUPPORTED_LANGS
@classmethod
def validate_language(cls, language: str | None) -> str | None:
if language is None:
# TODO language should be optional and can be guessed.
# For now we default to en. See
# https://github.com/huggingface/transformers/blob/main/src/transformers/models/whisper/generation_whisper.py#L1520
logger.warning(
"Defaulting to language='en'. If you wish to transcribe "
"audio in a different language, pass the `language` field "
"in the TranscriptionRequest."
)
language = "en"
return super().validate_language(language)
@classmethod
def get_generation_prompt(
cls,
audio: np.ndarray,
model_config: ModelConfig, # not needed here
stt_config: SpeechToTextConfig,
language: str | None,
task_type: Literal["transcribe", "translate"],
request_prompt: str,
to_language: str | None,
) -> PromptType:
if language is None:
raise ValueError(
"Language must be specified when creating the funasr prompt"
)
funasr_prompt = "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n语音转写:<|AUDIO|><|im_end|>\n<|im_start|>assistant\n" # noqa: E501
prompt = {
"prompt": funasr_prompt,
"multi_modal_data": {
"audio": (audio, stt_config.sample_rate),
},
}
return cast(PromptType, prompt)
@classmethod
def get_speech_to_text_config(
cls, model_config: ModelConfig, task_type: str
) -> SpeechToTextConfig:
processor = cached_processor_from_config(model_config)
return SpeechToTextConfig(
max_audio_clip_s=processor.feature_extractor.chunk_length,
sample_rate=processor.feature_extractor.sampling_rate,
)
@classmethod
def get_num_audio_tokens(
cls,
audio_duration_s: float,
stt_config: SpeechToTextConfig,
model_config: ModelConfig,
) -> int | None:
processor = cached_processor_from_config(model_config)
hop_length = processor.feature_extractor.hop_length
assert hop_length is not None
return math.ceil(audio_duration_s * stt_config.sample_rate / hop_length)
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
self.config = config
self.dtype = vllm_config.model_config.dtype
self.model = FunASRModel(
vllm_config=vllm_config,
prefix=maybe_prefix(prefix, "model"),
)
logit_scale = getattr(config, "logit_scale", 1.0)
if config.tie_word_embeddings:
self.lm_head = self.model.decoder.embed_tokens
else:
self.lm_head = ParallelLMHead(
config.vocab_size,
config.hidden_size,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "lm_head"),
)
self.logits_processor = LogitsProcessor(config.vocab_size, scale=logit_scale)
def forward(
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
inputs_embeds: torch.Tensor | None = None,
**kwargs,
) -> torch.Tensor:
decoder_outputs = self.model(
input_ids=input_ids,
positions=positions,
inputs_embeds=inputs_embeds,
)
return decoder_outputs
def get_language_model(self) -> torch.nn.Module:
return self.model.decoder
def embed_multimodal(self, **kwargs: object) -> MultiModalEmbeddings:
audio_input = self._parse_and_validate_audio_input(**kwargs)
speech = audio_input["input_features"]
speech_lengths = audio_input["speech_lengths"]
enc_output = self.model.get_encoder_outputs(
speech=speech, speech_lengths=speech_lengths
)
return enc_output
def embed_input_ids(
self,
input_ids: torch.Tensor,
multimodal_embeddings: MultiModalEmbeddings | None = None,
*,
is_multimodal: torch.Tensor | None = None,
handle_oov_mm_token: bool = False,
) -> torch.Tensor:
inputs_embeds = self.model.decoder.embed_input_ids(input_ids)
return _merge_multimodal_embeddings(
inputs_embeds=inputs_embeds,
multimodal_embeddings=multimodal_embeddings,
is_multimodal=_require_is_multimodal(is_multimodal),
)
def _parse_and_validate_audio_input(self, **kwargs: object) -> FunASRAudioInputs:
input_features = kwargs.pop("input_features", None)
speech_lengths = kwargs.pop("speech_lengths", None)
if input_features is not None:
input_features = json_map_leaves(lambda x: x.to(self.dtype), input_features)
if speech_lengths is not None:
speech_lengths = json_map_leaves(lambda x: x.to(self.dtype), speech_lengths)
return FunASRAudioInputs(
input_features=input_features, speech_lengths=speech_lengths
)
def compute_logits(self, hidden_states: torch.Tensor) -> torch.Tensor:
logits = self.logits_processor(self.lm_head, hidden_states)
return logits
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
loader = AutoWeightsLoader(
self,
)
# add fake zeros bias for k_proj to state_dict
weights = _create_fake_bias_for_k_proj(weights)
return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper)
def _create_fake_bias_for_k_proj(
weights: Iterable[tuple[str, torch.Tensor]],
) -> Iterable[tuple[str, torch.Tensor]]:
"""
Create full zeros bias for k_proj weight in self-attn and x-attn layers.
So that the bias for k_proj in qkv_proj can be initialized with zeros.
"""
for name, weight in weights:
if name.endswith(".k_proj.weight"):
bias = torch.zeros(weight.size(0))
bias_name = name.replace("weight", "bias")
yield from [(name, weight), (bias_name, bias)]
else:
yield name, weight
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/funasr.py",
"license": "Apache License 2.0",
"lines": 897,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/transformers_utils/processors/funasr_processor.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import numpy as np
import torch
import torch.nn as nn
import torchaudio.compliance.kaldi as kaldi
from torch.nn.utils.rnn import pad_sequence
from transformers import (
AutoFeatureExtractor,
AutoProcessor,
BatchFeature,
)
from transformers.feature_extraction_sequence_utils import SequenceFeatureExtractor
from transformers.processing_utils import ProcessorMixin
from transformers.utils import TensorType
from vllm.logger import init_logger
logger = init_logger(__name__)
def apply_cmvn(inputs, cmvn): # noqa
"""
Apply CMVN with mvn data
"""
device = inputs.device
# dtype = inputs.dtype
frame, dim = inputs.shape
means = cmvn[0:1, :dim]
vars = cmvn[1:2, :dim]
inputs += means.to(device)
inputs *= vars.to(device)
return inputs.type(torch.float32)
def apply_lfr(inputs, lfr_m, lfr_n):
# LFR_inputs = []
T = inputs.shape[0]
T_lfr = int(np.ceil(T / lfr_n))
left_padding = inputs[0].repeat((lfr_m - 1) // 2, 1)
inputs = torch.vstack((left_padding, inputs))
T = T + (lfr_m - 1) // 2
feat_dim = inputs.shape[-1]
strides = (lfr_n * feat_dim, 1)
sizes = (T_lfr, lfr_m * feat_dim)
last_idx = (T - lfr_m) // lfr_n + 1
num_padding = lfr_m - (T - last_idx * lfr_n)
if num_padding > 0:
num_padding = (
(2 * lfr_m - 2 * T + (T_lfr - 1 + last_idx) * lfr_n)
/ 2
* (T_lfr - last_idx)
)
inputs = torch.vstack([inputs] + [inputs[-1:]] * int(num_padding))
LFR_outputs = inputs.as_strided(sizes, strides)
return LFR_outputs.clone().type(torch.float32)
def load_cmvn(cmvn_file):
with open(cmvn_file, encoding="utf-8") as f:
lines = f.readlines()
means_list = []
vars_list = []
for i in range(len(lines)):
line_item = lines[i].split()
if line_item[0] == "<AddShift>":
line_item = lines[i + 1].split()
if line_item[0] == "<LearnRateCoef>":
add_shift_line = line_item[3 : (len(line_item) - 1)]
means_list = list(add_shift_line)
continue
elif line_item[0] == "<Rescale>":
line_item = lines[i + 1].split()
if line_item[0] == "<LearnRateCoef>":
rescale_line = line_item[3 : (len(line_item) - 1)]
vars_list = list(rescale_line)
continue
means = np.array(means_list).astype(np.float32)
vars = np.array(vars_list).astype(np.float32)
cmvn = np.array([means, vars])
cmvn = torch.as_tensor(cmvn, dtype=torch.float32)
return cmvn
class WavFrontend(nn.Module):
"""Conventional frontend structure for ASR."""
def __init__(
self,
cmvn_file: str = "null",
fs: int = 16000,
window: str = "hamming",
n_mels: int = 80,
frame_length: int = 25,
frame_shift: int = 10,
filter_length_min: int = -1,
filter_length_max: int = -1,
lfr_m: int = 1,
lfr_n: int = 1,
dither: float = 1.0,
snip_edges: bool = True,
upsacle_samples: bool = True,
**kwargs,
):
super().__init__()
self.fs = fs
self.window = window
self.n_mels = n_mels
self.frame_length = frame_length
self.frame_shift = frame_shift
self.filter_length_min = filter_length_min
self.filter_length_max = filter_length_max
self.lfr_m = lfr_m
self.lfr_n = lfr_n
self.cmvn_file = cmvn_file
self.dither = dither
self.snip_edges = snip_edges
self.upsacle_samples = upsacle_samples
self.cmvn = None if self.cmvn_file is None else load_cmvn(self.cmvn_file)
def output_size(self) -> int:
return self.n_mels * self.lfr_m
def forward(
self,
input: torch.Tensor,
input_lengths,
**kwargs,
) -> tuple[torch.Tensor, torch.Tensor]:
batch_size = input.size(0)
feats = []
feats_lens = []
for i in range(batch_size):
waveform_length = input_lengths[i]
waveform = input[i][:waveform_length]
if self.upsacle_samples:
waveform = waveform * (1 << 15)
waveform = waveform.unsqueeze(0)
mat = kaldi.fbank(
waveform,
num_mel_bins=self.n_mels,
frame_length=min(self.frame_length, waveform_length / self.fs * 1000),
frame_shift=self.frame_shift,
dither=self.dither,
energy_floor=0.0,
window_type=self.window,
sample_frequency=self.fs,
snip_edges=self.snip_edges,
)
if self.lfr_m != 1 or self.lfr_n != 1:
mat = apply_lfr(mat, self.lfr_m, self.lfr_n)
if self.cmvn is not None:
mat = apply_cmvn(mat, self.cmvn)
feat_length = mat.size(0)
feats.append(mat)
feats_lens.append(feat_length)
feats_lens = torch.as_tensor(feats_lens)
if batch_size == 1:
feats_pad = feats[0][None, :, :]
else:
feats_pad = pad_sequence(feats, batch_first=True, padding_value=0.0)
return feats_pad, feats_lens
def forward_fbank(
self, input: torch.Tensor, input_lengths: torch.Tensor
) -> tuple[torch.Tensor, torch.Tensor]:
batch_size = input.size(0)
feats = []
feats_lens = []
for i in range(batch_size):
waveform_length = input_lengths[i]
waveform = input[i][:waveform_length]
waveform = waveform * (1 << 15)
waveform = waveform.unsqueeze(0)
mat = kaldi.fbank(
waveform,
num_mel_bins=self.n_mels,
frame_length=self.frame_length,
frame_shift=self.frame_shift,
dither=self.dither,
energy_floor=0.0,
window_type=self.window,
sample_frequency=self.fs,
)
feat_length = mat.size(0)
feats.append(mat)
feats_lens.append(feat_length)
feats_lens = torch.as_tensor(feats_lens)
feats_pad = pad_sequence(feats, batch_first=True, padding_value=0.0)
return feats_pad, feats_lens
def forward_lfr_cmvn(
self, input: torch.Tensor, input_lengths: torch.Tensor
) -> tuple[torch.Tensor, torch.Tensor]:
batch_size = input.size(0)
feats = []
feats_lens = []
for i in range(batch_size):
mat = input[i, : input_lengths[i], :]
if self.lfr_m != 1 or self.lfr_n != 1:
mat = apply_lfr(mat, self.lfr_m, self.lfr_n)
if self.cmvn is not None:
mat = apply_cmvn(mat, self.cmvn)
feat_length = mat.size(0)
feats.append(mat)
feats_lens.append(feat_length)
feats_lens = torch.as_tensor(feats_lens)
feats_pad = pad_sequence(feats, batch_first=True, padding_value=0.0)
return feats_pad, feats_lens
class FunASRFeatureExtractor(SequenceFeatureExtractor):
r"""
Constructs a FunASR feature extractor.
This feature extractor inherits from [`~feature_extraction_sequence_
utils.SequenceFeatureExtractor`] which contains most of the main
methods. Users should refer to this superclass for more information
regarding those methods.
This class extracts mel-filter bank features from raw speech using a custom
numpy implementation of the `Short Time Fourier Transform` which should
match pytorch's `torch.stft` equivalent.
Args:
feature_size (`int`, *optional*, defaults to 80):
The feature dimension of the extracted features.
sampling_rate (`int`, *optional*, defaults to 16000):
The sampling rate at which the audio files should be digitalized
expressed in hertz (Hz).
hop_length (`int`, *optional*, defaults to 160):
Length of the overlapping windows for the STFT used to obtain the
Mel Frequency coefficients.
chunk_length (`int`, *optional*, defaults to 30):
The maximum number of chunks of `sampling_rate` samples used to
trim and pad longer or shorter audio sequences.
n_fft (`int`, *optional*, defaults to 400):
Size of the Fourier transform.
padding_value (`float`, *optional*, defaults to 0.0):
Padding value used to pad the audio. Should correspond to silences.
dither (`float`, *optional*, defaults to 0.0):
Adds dithering. In other words, adds a small Gaussian noise to each frame.
E.g. use 0.0001 to add dithering with a normal distribution centered
around 0.0 with standard deviation 0.0001 (assuming [-1,+1] range
of raw_speech). The value 0.0 means no dithering.
Dithering has similar effect as `spectrogram(mel_floor=...)`. It reduces
the high log_mel_fbank values for signals with hard-zero sections,
when VAD cutoff is present in the signal.
"""
model_input_names = ["input_features"]
def __init__(
self,
feature_size=80,
sampling_rate=16000,
hop_length=160,
chunk_length=30,
n_fft=400,
padding_value=0.0,
dither=0.0,
return_attention_mask=False,
**kwargs,
):
super().__init__(
feature_size=feature_size,
sampling_rate=sampling_rate,
padding_value=padding_value,
return_attention_mask=return_attention_mask,
**kwargs,
)
self.frontend_conf = kwargs.get("frontend_conf", {})
self.n_fft = n_fft
self.hop_length = hop_length
self.chunk_length = chunk_length
self.n_samples = chunk_length * sampling_rate
self.nb_max_frames = self.n_samples // hop_length
self.sampling_rate = sampling_rate
self.dither = dither
def extract_fbank(
self, data, data_len=None, data_type: str = "sound", frontend=None, **kwargs
):
if isinstance(data, np.ndarray):
data = torch.from_numpy(data)
if len(data.shape) < 2:
data = data[None, :] # data: [batch, N]
data_len = [data.shape[1]] if data_len is None else data_len
elif isinstance(data, torch.Tensor):
if len(data.shape) < 2:
data = data[None, :] # data: [batch, N]
data_len = [data.shape[1]] if data_len is None else data_len
elif isinstance(data, (list, tuple)):
data_list, data_len = [], []
for data_i in data:
if isinstance(data_i, np.ndarray):
data_i = torch.from_numpy(data_i)
data_list.append(data_i)
data_len.append(data_i.shape[0])
data = pad_sequence(data_list, batch_first=True)
data, data_len = frontend(data, data_len, **kwargs)
if isinstance(data_len, (list, tuple)):
data_len = torch.tensor([data_len])
return data.to(torch.float32), data_len.to(torch.int32)
def __call__(
self,
raw_speech: np.ndarray | list[float] | list[np.ndarray] | list[list[float]],
truncation: bool = True,
pad_to_multiple_of: int | None = None,
return_tensors: str | TensorType | None = None,
return_attention_mask: bool | None = None,
padding: str | None = "max_length",
max_length: int | None = None,
sampling_rate: int | None = None,
do_normalize: bool | None = None,
device: str | None = "cpu",
return_token_timestamps: bool | None = None,
**kwargs,
) -> BatchFeature:
is_batched = isinstance(raw_speech, (list, tuple)) and (
isinstance(raw_speech[0], (np.ndarray, tuple, list))
)
if is_batched:
raw_speech = [
np.asarray([speech], dtype=np.float32).T for speech in raw_speech
]
elif not is_batched and not isinstance(raw_speech, np.ndarray):
raw_speech = np.asarray(raw_speech, dtype=np.float32)
elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(
np.float64
):
raw_speech = raw_speech.astype(np.float32)
if not is_batched:
raw_speech = [np.asarray([raw_speech]).T]
batched_speech = BatchFeature({"input_features": raw_speech})
padded_inputs = self.pad(
batched_speech,
padding=padding,
max_length=max_length if max_length else self.n_samples,
truncation=truncation,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask or do_normalize,
)
input_features = padded_inputs.get("input_features").transpose(2, 0, 1)
frontend = WavFrontend(**self.frontend_conf, dither=self.dither)
input_features, speech_lengths = self.extract_fbank(
input_features[0],
data_type=kwargs.get("data_type", "sound"),
frontend=frontend,
is_final=True,
)
olens = 1 + (speech_lengths - 3 + 2 * 1) // 2
olens = 1 + (olens - 3 + 2 * 1) // 2
fake_token_len = (olens - 1) // 2 + 1
if isinstance(input_features[0], list):
padded_inputs["input_features"] = [
np.asarray(feature, dtype=np.float32) for feature in input_features
]
else:
padded_inputs["input_features"] = input_features
if return_tensors is not None:
padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
padded_inputs["speech_lengths"] = speech_lengths
padded_inputs["fake_token_len"] = fake_token_len
return padded_inputs
class FunASRProcessor(ProcessorMixin):
r"""
Constructs a FunASR processor which wraps a FunASR feature extractor and
a FunASR tokenizer into a single processor.
[`FunASRProcessor`] offers all the functionalities of
[`FunASRFeatureExtractor`] and [`Qwen2Tokenizer`]. See the
[`~FunASRProcessor.__call__`] and [`~FunASRProcessor.decode`] for more
information.
Args:
feature_extractor (`FunASRFeatureExtractor`): An instance of
[`FunASRFeatureExtractor`].
The feature extractor is a required input.
tokenizer (`Qwen2Tokenizer`):
An instance of [`Qwen2Tokenizer`]. The tokenizer is a required
input.
"""
feature_extractor_class = "FunASRFeatureExtractor"
tokenizer_class = ("Qwen2Tokenizer", "Qwen2TokenizerFast")
def __init__(
self,
feature_extractor,
tokenizer,
audio_token="<|AUDIO|>",
):
super().__init__(feature_extractor, tokenizer)
self.current_processor = self.feature_extractor
self._in_target_context_manager = False
self.audio_token = (
tokenizer.audio_token if hasattr(tokenizer, "audio_token") else audio_token
)
self.audio_token_id = tokenizer.convert_tokens_to_ids(self.audio_token)
def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True):
return self.tokenizer.get_decoder_prompt_ids(
task=task, language=language, no_timestamps=no_timestamps
)
def __call__(self, *args, **kwargs):
"""
Forwards the `audio` argument to FunASRFeatureExtractor's
[`~FunASRFeatureExtractor.__call__`] and the `text` argument to
[`~Qwen2Tokenizer.__call__`]. Please refer to the docstring of the
above two methods for more information.
"""
if self._in_target_context_manager:
return self.current_processor(*args, **kwargs)
audio = kwargs.pop("audio", None)
sampling_rate = kwargs.pop("sampling_rate", None)
text = kwargs.pop("text", None)
if len(args) > 0:
audio = args[0]
args = args[1:]
if text is None:
raise ValueError("You need to specify `text` input to process.")
elif isinstance(text, str):
text = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise ValueError(
"Invalid input text. Please provide a string, or a list of strings"
)
if audio is not None:
# ensure we have as much audios as audio tokens
num_audio_tokens = sum(sample.count(self.audio_token) for sample in text)
num_audios = 1 if type(audio) is np.ndarray else len(audio)
if num_audio_tokens != num_audios:
raise ValueError(
f"Found {num_audio_tokens} {self.audio_token} token{'s' if num_audio_tokens > 1 else ''} in provided text but received {num_audios} audio{'s' if num_audios > 1 else ''}" # noqa: E501
)
inputs = self.feature_extractor(
audio, *args, sampling_rate=sampling_rate, **kwargs
)
expanded_text = []
for sample in text:
replace_str = []
while self.audio_token in sample:
num_audio_tokens = inputs["fake_token_len"].item()
expanded_audio_token = self.audio_token * num_audio_tokens
replace_str.append(expanded_audio_token)
sample = sample.replace(self.audio_token, "<placeholder>", 1)
while "<placeholder>" in sample:
sample = sample.replace("<placeholder>", replace_str.pop(0), 1)
expanded_text.append(sample)
text = expanded_text
if text is not None:
encodings = self.tokenizer(text, **kwargs)
if text is None:
return inputs
elif audio is None:
return encodings
else:
inputs["labels"] = encodings["input_ids"]
return inputs
def get_prompt_ids(self, text: str, return_tensors="np"):
return self.tokenizer.get_prompt_ids(text, return_tensors=return_tensors)
AutoFeatureExtractor.register("FunASRFeatureExtractor", FunASRFeatureExtractor)
AutoProcessor.register("FunASRProcessor", FunASRProcessor)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/transformers_utils/processors/funasr_processor.py",
"license": "Apache License 2.0",
"lines": 436,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:examples/offline_inference/run_one_batch.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from __future__ import annotations
from vllm import LLM, EngineArgs
from vllm.config import ProfilerConfig
from vllm.utils.argparse_utils import FlexibleArgumentParser
DEFAULT_MAX_TOKENS = 16
def create_parser() -> FlexibleArgumentParser:
parser = FlexibleArgumentParser()
EngineArgs.add_cli_args(parser)
parser.set_defaults(model="meta-llama/Llama-3.2-1B-Instruct")
batch_group = parser.add_argument_group("Batch parameters")
batch_group.add_argument("--batch-size", type=int, default=1)
batch_group.add_argument("--prompt-size", type=int, default=128)
batch_group.add_argument("--prompt-prefix", type=str, default="Hello, my name is")
profile_group = parser.add_argument_group("Profiling parameters")
profile_group.add_argument(
"--profile",
choices=["none", "prefill", "decode", "both"],
default="none",
)
profile_group.add_argument(
"--profile-dir",
type=str,
default="",
help="Required when --profile is not 'none'.",
)
return parser
def _build_prompt(prefix: str, prompt_size: int) -> str:
if prompt_size <= 0:
return ""
if not prefix:
prefix = " "
if len(prefix) >= prompt_size:
return prefix[:prompt_size]
repeat_count = (prompt_size + len(prefix) - 1) // len(prefix)
return (prefix * repeat_count)[:prompt_size]
def _build_profiler_config(
profile: str, profile_dir: str, max_tokens: int
) -> ProfilerConfig | None:
if profile == "none":
return None
if not profile_dir:
raise ValueError("--profile-dir must be set when profiling is enabled.")
if profile == "prefill":
delay_iterations = 0
max_iterations = 1
elif profile == "decode":
delay_iterations = 1
max_iterations = max(1, max_tokens)
else:
delay_iterations = 0
max_iterations = 0
return ProfilerConfig(
profiler="torch",
torch_profiler_dir=profile_dir,
delay_iterations=delay_iterations,
max_iterations=max_iterations,
)
def main(args: dict) -> None:
max_tokens = DEFAULT_MAX_TOKENS
batch_size = args.pop("batch_size")
prompt_size = args.pop("prompt_size")
prompt_prefix = args.pop("prompt_prefix")
profile = args.pop("profile")
profile_dir = args.pop("profile_dir")
profiler_config = _build_profiler_config(profile, profile_dir, max_tokens)
if profiler_config is not None:
args["profiler_config"] = profiler_config
llm = LLM(**args)
sampling_params = llm.get_default_sampling_params()
sampling_params.max_tokens = max_tokens
sampling_params.min_tokens = max_tokens
sampling_params.ignore_eos = True
prompt = _build_prompt(prompt_prefix, prompt_size)
prompts = [prompt] * batch_size
if profile != "none":
llm.start_profile()
outputs = llm.generate(prompts, sampling_params)
if profile != "none":
llm.stop_profile()
print("-" * 50)
for output in outputs:
generated_text = output.outputs[0].text
print(f"Prompt: {output.prompt!r}\nGenerated text: {generated_text!r}")
print("-" * 50)
if __name__ == "__main__":
parser = create_parser()
main(vars(parser.parse_args()))
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/offline_inference/run_one_batch.py",
"license": "Apache License 2.0",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/fused_moe/runner/default_moe_runner.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from contextlib import nullcontext
from typing import TYPE_CHECKING
import torch
import torch.nn.functional as F
import vllm.envs as envs
from vllm.distributed import (
get_ep_group,
get_pcp_group,
tensor_model_parallel_all_reduce,
)
from vllm.forward_context import (
ForwardContext,
get_forward_context,
is_forward_context_available,
)
from vllm.logger import init_logger
from vllm.model_executor.layers.fused_moe.config import (
FusedMoEConfig,
)
from vllm.model_executor.layers.fused_moe.fused_moe_method_base import (
FusedMoEMethodBase,
)
from vllm.model_executor.layers.fused_moe.router.fused_moe_router import (
FusedMoERouter,
)
from vllm.model_executor.layers.fused_moe.runner.moe_runner import MoERunner
from vllm.platforms import current_platform
from vllm.utils.math_utils import cdiv
from vllm.utils.torch_utils import (
HAS_OPAQUE_TYPE,
ModuleName,
aux_stream,
current_stream,
direct_register_custom_op,
)
from vllm.v1.worker.ubatching import dbo_current_ubatch_id
logger = init_logger(__name__)
def get_layer_from_name(layer_name: str) -> torch.nn.Module:
forward_context: ForwardContext = get_forward_context()
if layer_name == "from_forward_context":
all_moe_layers = forward_context.all_moe_layers
assert all_moe_layers is not None
moe_layer_index = forward_context.moe_layer_index
if moe_layer_index >= len(all_moe_layers):
raise AssertionError(
"We expected the number of MOE layers in `all_moe_layers` "
"to be equal to the number of "
"{vllm.moe_forward, vllm.moe_forward_shared} calls."
)
layer_name = all_moe_layers[moe_layer_index]
forward_context.moe_layer_index += 1
return forward_context.no_compile_layers[layer_name]
# On torch >= 2.11, layer_name is a hoisted ModuleName opaque object;
# on older versions it remains a plain str.
if TYPE_CHECKING:
from typing import TypeAlias
_layer_name_type: TypeAlias = str | ModuleName
else:
_layer_name_type = ModuleName if HAS_OPAQUE_TYPE else str
def _resolve_layer_name(layer_name: str | ModuleName) -> str:
return layer_name.value if isinstance(layer_name, ModuleName) else layer_name
def _moe_forward(
hidden_states: torch.Tensor,
router_logits: torch.Tensor,
shared_experts_input: torch.Tensor | None,
layer_name: _layer_name_type,
) -> torch.Tensor:
layer = get_layer_from_name(_resolve_layer_name(layer_name))
# TODO(bnell): this can be removed after MK migration is complete.
layer.ensure_moe_quant_config_init()
return layer.runner.forward_impl(
layer, hidden_states, router_logits, shared_experts_input
)
def _moe_forward_fake(
hidden_states: torch.Tensor,
router_logits: torch.Tensor,
shared_experts_input: torch.Tensor | None,
layer_name: _layer_name_type,
) -> torch.Tensor:
return torch.empty_like(hidden_states)
def _moe_forward_shared(
hidden_states: torch.Tensor,
router_logits: torch.Tensor,
shared_experts_input: torch.Tensor | None,
layer_name: _layer_name_type,
) -> tuple[torch.Tensor, torch.Tensor]:
layer = get_layer_from_name(_resolve_layer_name(layer_name))
# TODO(bnell): this can be removed after MK migration is complete.
layer.ensure_moe_quant_config_init()
return layer.runner.forward_impl(
layer, hidden_states, router_logits, shared_experts_input
)
def _moe_forward_shared_fake(
hidden_states: torch.Tensor,
router_logits: torch.Tensor,
shared_experts_input: torch.Tensor | None,
layer_name: _layer_name_type,
) -> tuple[torch.Tensor, torch.Tensor]:
# Output shapes:
# - fused_out: same as hidden_states (routed experts use transformed size)
# - shared_out: same as shared_experts_input if provided, else same as
# hidden_states
# (For latent MoE: shared experts use original hidden_size, not latent size)
fused_out = torch.empty_like(hidden_states)
if shared_experts_input is not None:
shared_out = torch.empty_like(shared_experts_input)
else:
shared_out = torch.empty_like(hidden_states)
return shared_out, fused_out
direct_register_custom_op(
op_name="moe_forward",
op_func=_moe_forward,
mutates_args=["hidden_states"],
fake_impl=_moe_forward_fake,
tags=(torch.Tag.needs_fixed_stride_order,),
)
direct_register_custom_op(
op_name="moe_forward_shared",
op_func=_moe_forward_shared,
mutates_args=["hidden_states"],
fake_impl=_moe_forward_shared_fake,
tags=(torch.Tag.needs_fixed_stride_order,),
)
class DefaultMoERunner(MoERunner):
"""
Default implementation of the MoE runner for executing Mixture of Experts layers.
This class provides a comprehensive implementation for running MoE computations
with support for:
- Expert routing and token dispatching
- Shared experts computation with optional parallel execution using CUDA streams
- Data parallel (DP) chunking for large batch processing
- Tensor model parallel and expert parallel operations
- Various quantization methods and custom operators
- Both monolithic and decomposed expert execution paths
The runner handles the complete MoE forward pass including routing tokens to
experts, executing expert computations, and combining results. It supports
advanced features like overlapped execution of shared experts and optimized
kernels for different parallel execution modes.
Eventually, this class will be split up and specialized for different
configurations, e.g. the presence or absence of shared experts, a gate, etc.
"""
def __init__(
self,
layer: torch.nn.Module,
moe_config: FusedMoEConfig,
router: FusedMoERouter,
routed_input_transform: torch.nn.Module | None,
gate: torch.nn.Module | None,
shared_experts: torch.nn.Module | None,
quant_method: FusedMoEMethodBase,
reduce_results: bool,
enable_dbo: bool,
):
super().__init__()
self.moe_config = moe_config
self.router = router
self.routed_input_transform = routed_input_transform
self.gate = gate
self.shared_experts = shared_experts
self.quant_method = quant_method
self.reduce_results = reduce_results
self.enable_dbo = enable_dbo
# Allow disabling of the separate shared experts stream for
# debug purposes.
# TODO: Remove this after more extensive testings with TP/DP
# and other execution modes
if envs.VLLM_DISABLE_SHARED_EXPERTS_STREAM:
logger.debug_once("Disabling MoE shared_experts cuda stream", scope="local")
self.shared_experts_stream = None
else:
# TODO(rob): enable shared expert overlap with non-cuda-alike.
# aux_stream() returns None on non-cuda-alike platforms.
self.shared_experts_stream = aux_stream()
if self.shared_experts_stream is not None:
logger.debug_once(
"Enabled separate cuda stream for MoE shared_experts", scope="local"
)
# Needed for string -> FusedMoE layer lookup in custom ops.
self.layer_name = layer.layer_name
if current_platform.is_tpu() or current_platform.is_cpu():
# TODO: Once the OOM issue for the TPU backend is resolved, we
# will switch to using the moe_forward custom op.
# Note: CPU doesn't require wrapped forward_impl.
if self.shared_experts is None:
self.moe_forward = _moe_forward
else:
self.moe_forward = _moe_forward_shared
else:
if self.shared_experts is None:
self.moe_forward = torch.ops.vllm.moe_forward
else:
self.moe_forward = torch.ops.vllm.moe_forward_shared
# Chunked all2all staging tensor
self.batched_hidden_states: torch.Tensor | None = None
self.batched_router_logits: torch.Tensor | None = None
@property
def use_dp_chunking(self) -> bool:
return (
self.moe_config.moe_parallel_config.use_deepep_ll_kernels
or self.moe_config.moe_parallel_config.use_mori_kernels
or self.moe_config.moe_parallel_config.use_fi_all2allv_kernels
) and envs.VLLM_ENABLE_MOE_DP_CHUNK
def _maybe_setup_shared_experts_stream(
self,
hidden_states: torch.Tensor,
shared_input: torch.Tensor | None,
has_separate_shared_experts: bool,
use_chunked_impl: bool,
) -> tuple[bool, torch.Tensor | None]:
use_shared_experts_stream = (
current_platform.is_cuda()
and has_separate_shared_experts
and not use_chunked_impl
and self.shared_experts_stream is not None
and (
hidden_states.shape[0]
<= envs.VLLM_SHARED_EXPERTS_STREAM_TOKEN_THRESHOLD
)
)
shared_experts_input: torch.Tensor | None = None
if use_shared_experts_stream:
assert self.shared_experts_stream is not None
assert self.moe_config.disable_inplace
shared_experts_input = (
shared_input if shared_input is not None else hidden_states
)
# Record that the shared_experts_input will be used in the
# shared_experts_stream to to avoid gc issue from
# deallocation. For more details:
# https://docs.pytorch.org/docs/stable/generated/torch.Tensor.record_stream.html # noqa: E501
# NOTE: We don't need shared_output.record_stream(current_stream())
# because we synch the streams before using shared_output.
shared_experts_input.record_stream(self.shared_experts_stream)
# Mark sync start point for the separate shared experts
# stream here since we want to run in parallel with the
# router/gate (next op below)
assert self.shared_experts_stream is not None
self.shared_experts_stream.wait_stream(current_stream())
return use_shared_experts_stream, shared_experts_input
def ensure_dp_chunking_init(self):
if not self.use_dp_chunking or self.batched_hidden_states is not None:
return
states_shape: tuple[int, ...]
logits_shape: tuple[int, ...]
moe = self.moe_config
if self.enable_dbo:
states_shape = (2, moe.max_num_tokens, self.moe_config.hidden_dim)
logits_shape = (2, moe.max_num_tokens, self.moe_config.num_logical_experts)
else:
states_shape = (moe.max_num_tokens, self.moe_config.hidden_dim)
logits_shape = (moe.max_num_tokens, self.moe_config.num_logical_experts)
self.batched_hidden_states = torch.zeros(
states_shape, dtype=moe.in_dtype, device=torch.cuda.current_device()
)
self.batched_router_logits = torch.zeros(
logits_shape,
dtype=moe.router_logits_dtype,
device=torch.cuda.current_device(),
)
def must_reduce_shared_expert_outputs(self) -> bool:
"""
The shared_experts are typically computed using the RowParallelLinear
layer. The result of this function is typically used as
the reduce_results argument to the module.
When just tensor-parallel is used, it is not required to reduce
the shared_experts results immediately. Instead we reduce at the
once at the end of the MoE op. (Refer to DeepSeekV2MoE module)
With EP and all2all kernels - this is no longer viable as all
GPU ranks in DP, produce the complete set of hidden_states.
Therefore it is required that we reduce the shared_experts output
early.
"""
assert self.quant_method is not None
return (
self.quant_method.moe_mk is not None
and self.quant_method.moe_mk.output_is_reduced()
)
def maybe_all_reduce_tensor_model_parallel(self, final_hidden_states: torch.Tensor):
"""
Some combine kernels reduce across GPU ranks by default.
"""
if self.must_reduce_shared_expert_outputs():
return final_hidden_states
else:
return tensor_model_parallel_all_reduce(final_hidden_states)
def apply_routed_input_transform(self, hidden_states: torch.Tensor) -> torch.Tensor:
"""Apply transform for routed experts (e.g., latent projection).
This is called by FusedMoE.forward_native. The original hidden_states
is saved separately so shared experts get [S, hidden_size] while
routed experts get the transformed [S, moe_latent_size].
TODO: For latent MoE bandwidth optimization, fc2_latent_proj could be
moved inside SharedFusedMoE to all-reduce on the smaller latent
dimension.
"""
if self.routed_input_transform is not None:
result = self.routed_input_transform(hidden_states)
# ReplicatedLinear returns (output, extra_bias) tuple.
# We only need the output tensor; extra_bias is not used here.
if isinstance(result, tuple):
return result[0]
return result
return hidden_states
def _reduce_output(
self,
states: torch.Tensor | tuple[torch.Tensor, torch.Tensor],
trunc_sizes: list[int],
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
def trunc(x: torch.Tensor, trunc_size: int) -> torch.Tensor:
return x[..., :trunc_size]
def reduce_and_trunc(x: torch.Tensor, trunc_size: int) -> torch.Tensor:
return trunc(self.maybe_all_reduce_tensor_model_parallel(x), trunc_size)
if (
not self.moe_config.is_sequence_parallel
and not self.use_dp_chunking
and self.reduce_results
and (self.moe_config.tp_size > 1 or self.moe_config.ep_size > 1)
):
func = reduce_and_trunc
else:
func = trunc
if isinstance(states, tuple):
return tuple(
[func(s, trunc_size) for s, trunc_size in zip(states, trunc_sizes)]
)
else:
assert len(trunc_sizes) == 1
return func(states, trunc_sizes[0])
def _encode_layer_name(self) -> str | ModuleName:
if HAS_OPAQUE_TYPE:
return ModuleName(self.layer_name)
# Can be unavailable or None in unittests
if (
is_forward_context_available()
and get_forward_context().all_moe_layers is not None
):
return "from_forward_context"
return self.layer_name
def forward(
self,
hidden_states: torch.Tensor,
router_logits: torch.Tensor,
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
# For latent MoE: save ORIGINAL hidden_states before transform
# (shared_experts need original dimension, routed experts use transformed)
if self.shared_experts is not None:
original_hidden_states = hidden_states
original_hidden_dim = hidden_states.shape[-1]
else:
original_hidden_states = None
# Apply transform for routed experts (e.g., latent projection for latent MoE)
hidden_states = self.apply_routed_input_transform(hidden_states)
# This is the dimension after transform (for routed expert output slicing)
transformed_hidden_dim = hidden_states.shape[-1]
if self.moe_config.hidden_dim != transformed_hidden_dim:
hidden_states = F.pad(
hidden_states,
(0, self.moe_config.hidden_dim - transformed_hidden_dim),
mode="constant",
value=0.0,
)
fused_output = self.moe_forward(
hidden_states,
router_logits,
original_hidden_states,
self._encode_layer_name(),
)
if self.shared_experts is not None:
orig_hidden_dims = [original_hidden_dim, transformed_hidden_dim]
else:
orig_hidden_dims = [transformed_hidden_dim]
return self._reduce_output(fused_output, orig_hidden_dims)
def forward_impl_chunked(
self,
layer: torch.nn.Module,
full_hidden_states: torch.Tensor,
full_router_logits: torch.Tensor,
full_shared_input: torch.Tensor | None,
has_separate_shared_experts: bool,
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
assert self.batched_hidden_states is not None
assert self.batched_router_logits is not None
assert self.batched_hidden_states.dtype == full_hidden_states.dtype, (
f"{self.batched_hidden_states.dtype} == {full_hidden_states.dtype}"
)
assert self.batched_router_logits.dtype == full_router_logits.dtype, (
f"{self.batched_router_logits.dtype} == {full_router_logits.dtype}"
)
# Check size compatibility.
assert self.batched_hidden_states.size(-1) == full_hidden_states.size(-1)
assert self.batched_router_logits.size(-1) == full_router_logits.size(-1)
# TODO(bnell): Fix shared_expert_inputs w/chunking.
# assert shared_input is None, (
# "Routed input transform is not currently supported with DP chunking."
# )
full_fused_final_hidden_states = torch.empty_like(full_hidden_states)
if self.shared_experts is not None:
full_shared_final_hidden_states = torch.empty_like(full_hidden_states)
def process_chunk(chunk_start, chunk_end, skip_result_store=False):
chunk_size = chunk_end - chunk_start
hidden_states = full_hidden_states[chunk_start:chunk_end, :]
router_logits = full_router_logits[chunk_start:chunk_end, :]
shared_input = (
full_shared_input[chunk_start:chunk_end, :]
if full_shared_input is not None
else None
)
assert self.batched_hidden_states is not None
assert self.batched_router_logits is not None
# This is only true when DBO has been enabled in the config.
# Both tensors will have an outer dimension for the ubatch id
if self.batched_hidden_states.dim() == 3:
assert self.batched_router_logits.dim() == 3
batch_buffer_idx = dbo_current_ubatch_id()
batched_hidden_states = self.batched_hidden_states[batch_buffer_idx, :]
batched_router_logits = self.batched_router_logits[batch_buffer_idx, :]
else:
batched_hidden_states = self.batched_hidden_states
batched_router_logits = self.batched_router_logits
assert (
batched_hidden_states.size(0) # type: ignore
>= chunk_size
)
assert (
batched_router_logits.size(0) # type: ignore
>= chunk_size
)
staged_hidden_states = batched_hidden_states[:chunk_size, :] # type: ignore
staged_router_logits = batched_router_logits[:chunk_size, :] # type: ignore
staged_hidden_states.copy_(hidden_states, non_blocking=True)
staged_router_logits.copy_(router_logits, non_blocking=True)
shared_input = (
shared_input if shared_input is not None else staged_hidden_states
)
# Matrix multiply.
if self.quant_method.is_monolithic:
assert has_separate_shared_experts or self.shared_experts is None
final_hidden_states = self.quant_method.apply_monolithic(
layer=layer,
x=staged_hidden_states,
router_logits=staged_router_logits,
)
else:
topk_weights, topk_ids = self.router.select_experts(
hidden_states=staged_hidden_states,
router_logits=staged_router_logits,
)
final_hidden_states = self.quant_method.apply(
layer=layer,
x=staged_hidden_states,
topk_weights=topk_weights,
topk_ids=topk_ids,
shared_experts_input=shared_input,
)
if has_separate_shared_experts:
assert not isinstance(final_hidden_states, tuple)
assert self.shared_experts is not None
shared_output = self.shared_experts(shared_input)
final_hidden_states = (
shared_output,
final_hidden_states,
)
if not skip_result_store:
if self.shared_experts is None:
full_fused_final_hidden_states[chunk_start:chunk_end, :].copy_(
final_hidden_states, non_blocking=True
)
else:
full_shared_final_hidden_states[chunk_start:chunk_end, :].copy_(
final_hidden_states[0], non_blocking=True
)
full_fused_final_hidden_states[chunk_start:chunk_end, :].copy_(
final_hidden_states[1], non_blocking=True
)
ctx = get_forward_context()
# flashinfer_cutlass_kernels can handle: optional DP + TP/EP
max_tokens_across_dispatchers = ctx.dp_metadata.max_tokens_across_dp_cpu
moe_dp_chunk_size_per_rank = self.moe_config.max_num_tokens
# If the input to the MoE is sequence parallel then divide by sp_size
# to find the maximum number of tokens for any individual dispatcher.
if self.moe_config.is_sequence_parallel:
max_tokens_across_dispatchers = cdiv(
max_tokens_across_dispatchers, self.moe_config.sp_size
)
num_tokens = full_hidden_states.size(0)
for chunk_idx, chunk_start_ in enumerate(
range(0, max_tokens_across_dispatchers, moe_dp_chunk_size_per_rank)
):
chunk_start = chunk_start_
chunk_end = min(
chunk_start + moe_dp_chunk_size_per_rank, max_tokens_across_dispatchers
)
# clamp start and end
chunk_start = min(chunk_start, num_tokens - 1)
chunk_end = min(chunk_end, num_tokens)
with ctx.dp_metadata.chunked_sizes(
self.moe_config.sp_size, moe_dp_chunk_size_per_rank, chunk_idx
):
process_chunk(
chunk_start, chunk_end, skip_result_store=chunk_start_ >= num_tokens
)
if self.shared_experts is None:
return full_fused_final_hidden_states
else:
return (full_shared_final_hidden_states, full_fused_final_hidden_states)
def forward_impl(
self,
layer: torch.nn.Module,
hidden_states: torch.Tensor,
router_logits: torch.Tensor,
shared_input: torch.Tensor | None,
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
assert self.quant_method is not None
self.ensure_dp_chunking_init()
has_separate_shared_experts = (
not self.quant_method.mk_owns_shared_expert
and self.shared_experts is not None
)
use_chunked_impl = self.use_dp_chunking
use_shared_experts_stream, shared_experts_input = (
self._maybe_setup_shared_experts_stream(
hidden_states,
shared_input,
has_separate_shared_experts,
use_chunked_impl,
)
)
# If router/gate provided, then apply it here.
# (Note: This code runs only when "overlapped mode" is on to allow
# parallel execution of shared experts with the FusedMoE via
# separate cuda stream)
if self.gate is not None:
router_logits, _ = self.gate(hidden_states)
if use_chunked_impl:
return self.forward_impl_chunked(
layer,
hidden_states,
router_logits,
shared_input,
has_separate_shared_experts,
)
# NOTE(rob): once we finish migrating all the quant methods to use
# MKs, we can remove the naive dispatch/combine path from here.
do_naive_dispatch_combine = (
self.moe_config.dp_size > 1 and not self.quant_method.supports_internal_mk
)
ctx = get_forward_context()
sp_ctx = (
ctx.dp_metadata.sp_local_sizes(self.moe_config.sp_size)
if ctx.dp_metadata
else nullcontext()
)
with sp_ctx:
extra_tensors = None
if do_naive_dispatch_combine:
post_quant_allgather = (
self.quant_method is not None
and self.moe_config.dp_size > 1
and self.moe_config.use_ep
and getattr(self.quant_method, "do_post_quant_allgather", False)
)
if post_quant_allgather:
hidden_states_to_dispatch, extra_tensors = (
self.quant_method.prepare_dp_allgather_tensor(
layer, hidden_states, router_logits
)
)
else:
hidden_states_to_dispatch = hidden_states
dispatch_res = get_ep_group().dispatch_router_logits(
hidden_states_to_dispatch,
router_logits,
self.moe_config.is_sequence_parallel,
extra_tensors=extra_tensors,
)
if extra_tensors is not None:
(
orig_hidden_states,
router_logits,
extra_tensors_combined,
) = dispatch_res
hidden_states_combined = (
orig_hidden_states,
extra_tensors_combined[0],
)
else:
hidden_states_combined, router_logits = dispatch_res
orig_hidden_states = hidden_states_combined
else:
orig_hidden_states = hidden_states
# Run shared experts before matrix multiply.
# because matrix multiply maybe modify the hidden_states.
if has_separate_shared_experts and not use_shared_experts_stream:
assert self.shared_experts is not None
shared_input = (
shared_input if shared_input is not None else hidden_states
)
shared_output = self.shared_experts(shared_input)
# NOTE: Similar with DP, PCP also needs dispatch and combine. For
# simplicity, AgRsAll2All was added separately for PCP here. Maybe
# we should modify All2AllManager abstract to better support PCP.
if self.moe_config.pcp_size > 1:
hidden_states = get_pcp_group().all_gather(
hidden_states,
dim=0,
)
router_logits = get_pcp_group().all_gather(
router_logits,
dim=0,
)
# TODO(bnell): deal with fp4 flashinfer tuple hidden states hack (#30014).
# Figure out nicer way to do this.
if do_naive_dispatch_combine:
x = hidden_states_combined
x_orig = orig_hidden_states
else:
x = hidden_states
x_orig = hidden_states
# Matrix multiply.
if self.quant_method.is_monolithic:
final_hidden_states = self.quant_method.apply_monolithic(
layer=layer,
x=x,
router_logits=router_logits,
)
else:
topk_weights, topk_ids = self.router.select_experts(
hidden_states=x_orig,
router_logits=router_logits,
)
final_hidden_states = self.quant_method.apply(
layer=layer,
x=x, # The type signture of this is wrong due to the hack.
topk_weights=topk_weights,
topk_ids=topk_ids,
shared_experts_input=shared_input,
)
if has_separate_shared_experts:
assert self.shared_experts is not None
if use_shared_experts_stream:
# Run shared experts in parallel on a separate stream
# NOTE: We start the separate stream here and mark the
# sync end point immediately after it is done. This is
# important to avoid excessive stream allocations by the cuda
# graph replay later.
with torch.cuda.stream(self.shared_experts_stream):
# Note that hidden_states clone() is necessary here to avoid
# conflict with the main stream
shared_output = self.shared_experts(shared_experts_input)
current_stream().wait_stream(self.shared_experts_stream)
final_hidden_states = (
shared_output,
final_hidden_states,
)
def combine_output(states: torch.Tensor) -> torch.Tensor:
if do_naive_dispatch_combine:
states = get_ep_group().combine(
states, self.moe_config.is_sequence_parallel
)
if self.moe_config.pcp_size > 1:
states = get_pcp_group().reduce_scatter(
states,
dim=0,
)
return states
if self.shared_experts is not None:
return (
final_hidden_states[0],
combine_output(final_hidden_states[1]),
)
else:
return combine_output(final_hidden_states)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/fused_moe/runner/default_moe_runner.py",
"license": "Apache License 2.0",
"lines": 673,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/fused_moe/runner/moe_runner.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from abc import ABC, abstractmethod
import torch
class MoERunner(ABC):
"""
Abstract base class for Mixture of Experts (MoE) runners.
This class defines the interface that all MoE runner implementations must follow.
MoE runners are responsible for executing the forward pass of MoE layers, handling
expert routing, and managing tensor parallel operations.
"""
@abstractmethod
def forward(
self,
hidden_states: torch.Tensor,
router_logits: torch.Tensor,
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
raise NotImplementedError
@abstractmethod
def must_reduce_shared_expert_outputs(self) -> bool:
raise NotImplementedError
@abstractmethod
def maybe_all_reduce_tensor_model_parallel(
self,
final_hidden_states: torch.Tensor,
):
raise NotImplementedError
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/fused_moe/runner/moe_runner.py",
"license": "Apache License 2.0",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tools/pre_commit/check_boolean_context_manager.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Lint: detect `with a() and b():` (boolean op in with-statement context).
Using `and`/`or` to combine context managers is almost always a bug:
with ctx_a() and ctx_b(): # BUG: only ctx_b is entered
with ctx_a() or ctx_b(): # BUG: only ctx_a is entered
The correct way to combine context managers is:
with ctx_a(), ctx_b(): # comma-separated
with (ctx_a(), ctx_b()): # parenthesized (Python 3.10+)
with contextlib.ExitStack() ... # ExitStack
"""
import ast
import sys
def check_file(filepath: str) -> list[str]:
try:
with open(filepath, encoding="utf-8") as f:
source = f.read()
except (OSError, UnicodeDecodeError):
return []
try:
tree = ast.parse(source, filename=filepath)
except SyntaxError:
return []
violations = []
for node in ast.walk(tree):
if isinstance(node, (ast.With, ast.AsyncWith)):
for item in node.items:
if isinstance(item.context_expr, ast.BoolOp):
op = "and" if isinstance(item.context_expr.op, ast.And) else "or"
violations.append(
f"{filepath}:{item.context_expr.lineno}: "
f"boolean `{op}` used to combine context managers "
f"in `with` statement — use a comma instead"
)
return violations
def main() -> int:
if len(sys.argv) < 2:
print("Usage: check_boolean_context_manager.py <file> ...", file=sys.stderr)
return 1
all_violations = []
for filepath in sys.argv[1:]:
all_violations.extend(check_file(filepath))
if all_violations:
print(
"❌ Boolean operator used to combine context managers in `with` "
"statement.\n"
" `with a() and b():` only enters `b()` as a context manager.\n"
" Use `with a(), b():` or `with (a(), b()):` instead.\n"
)
for v in all_violations:
print(f" {v}")
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
| {
"repo_id": "vllm-project/vllm",
"file_path": "tools/pre_commit/check_boolean_context_manager.py",
"license": "Apache License 2.0",
"lines": 55,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/distributed/eplb/eplb_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Utility functions for EPLB (Expert Parallel Load Balancing)."""
import os
from vllm.config import ParallelConfig
from vllm.logger import init_logger
logger = init_logger(__name__)
def override_envs_for_eplb(parallel_config: ParallelConfig) -> None:
"""
Override environment variables for EPLB when specific conditions are met.
Args:
parallel_config: The parallel configuration object.
"""
is_data_parallel = parallel_config.data_parallel_size > 1
is_eplb_enabled = parallel_config.enable_eplb
async_eplb = parallel_config.eplb_config.use_async
is_deepep_ll = parallel_config.all2all_backend == "deepep_low_latency"
# Override NCCL_MAX_CTAS to avoid hangs when using async EPLB with the
# DeepEP low-latency backend.
#
# The hang happens when two ranks interleave kernel launches differently
# between NCCL collectives (used by async EPLB weight exchange) and DeepEP
# low-latency (LL) kernels. DeepEP LL uses a cooperative launch and tries
# to reserve a large fraction of the GPU's SMs; if those SMs are currently
# occupied by NCCL, the DeepEP LL launch blocks until enough SMs are
# freed.
#
# If rank A enters DeepEP LL in main thread while rank B is still executing
# NCCL in async thread, rank A can block waiting for SMs, while rank B can
# block inside NCCL waiting for rank A to participate in the collective.
# This circular wait causes a deadlock.
# Limiting NCCL occupancy via NCCL_MAX_CTAS leaves space for the DeepEP
# cooperative kernel to launch and complete, breaking the deadlock.
# See: https://github.com/deepseek-ai/DeepEP/issues/496
if is_data_parallel and is_eplb_enabled and is_deepep_ll and async_eplb:
current_value_str = os.getenv("NCCL_MAX_CTAS")
if current_value_str and current_value_str.isdigit():
return
override_value = 8
os.environ["NCCL_MAX_CTAS"] = str(override_value)
logger.info_once(
f"EPLB: Setting NCCL_MAX_CTAS={override_value} "
"for expert parallel with EPLB and deepep_low_latency backend",
scope="global",
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/distributed/eplb/eplb_utils.py",
"license": "Apache License 2.0",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/models/quantization/test_gpt_oss.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
End-to-end accuracy test for GPT-OSS model quantization.
Config:
Task: gsm8k_platinum
Filter: flexible-extract
n-shot: 5
Metric: exact_match
Run: pytest tests/models/quantization/test_gpt_oss.py
"""
import importlib
import importlib.metadata
from dataclasses import dataclass
import huggingface_hub
import lm_eval
import pytest
from packaging import version
MODEL_ACCURACIES = {
# Full quantization: attention linears and MoE linears
"amd/gpt-oss-20b-WFP8-AFP8-KVFP8": 0.89,
# MoE linears only quantization
"amd/gpt-oss-20b-MoE-Quant-W-MXFP4-A-FP8-KV-FP8": 0.89,
# MoE linears only quantization
# "amd/gpt-oss-20b-MoE-Quant-W-MXFP4-A-MXFP4-KV-FP8": 0.90,
}
QUARK_MXFP4_AVAILABLE = importlib.util.find_spec("quark") is not None and version.parse(
importlib.metadata.version("amd-quark")
) >= version.parse("0.9.0")
def has_huggingface_access(repo):
try:
huggingface_hub.list_repo_refs(repo)
return True
except huggingface_hub.errors.RepositoryNotFoundError:
return False
HF_HUB_AMD_ORG_ACCESS = all(
[has_huggingface_access(model_name) for model_name in MODEL_ACCURACIES]
)
@dataclass
class ModelCase:
model_id: str
tp: int
@dataclass
class EvaluationConfig:
model_name: str
def get_model_args(self, tp_size: int):
return {
"pretrained": self.model_name,
"chat_template_args": {"reasoning_effort": "low"},
"enable_thinking": True,
"think_end_token": "200008",
"tensor_parallel_size": tp_size,
"dtype": "auto",
"gpu_memory_utilization": 0.95,
"trust_remote_code": False,
"enable_prefix_caching": False,
"enforce_eager": False,
}
@pytest.mark.skipif(not QUARK_MXFP4_AVAILABLE, reason="amd-quark>=0.9 is not available")
@pytest.mark.skipif(
not HF_HUB_AMD_ORG_ACCESS,
reason="Read access to huggingface.co/amd is required for this test.",
)
@pytest.mark.parametrize("tp_size", [1, 2, 4, 8])
@pytest.mark.parametrize("model_name, expected_accuracy", MODEL_ACCURACIES.items())
def test_gpt_oss_attention_quantization(
model_name: str, tp_size: int, expected_accuracy: float
):
model_args = EvaluationConfig(model_name).get_model_args(tp_size)
extra_run_kwargs = {
"gen_kwargs": {"max_gen_toks": 8000},
"apply_chat_template": True,
"fewshot_as_multiturn": True,
"num_fewshot": 5,
}
lm_eval_out = lm_eval.simple_evaluate(
model="vllm",
model_args=model_args,
tasks="gsm8k_platinum",
batch_size="auto",
**extra_run_kwargs,
)
measured_accuracy = float(
lm_eval_out["results"]["gsm8k_platinum"]["exact_match,flexible-extract"]
)
rtol = 0.02
assert (
measured_accuracy - rtol < expected_accuracy
and measured_accuracy + rtol > expected_accuracy
), f"Expected: {expected_accuracy} | Measured: {measured_accuracy}"
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/models/quantization/test_gpt_oss.py",
"license": "Apache License 2.0",
"lines": 90,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_executor/models/qwen3_5.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# Copyright 2025 The vLLM team.
# Copyright 2025 The Qwen Team.
# Copyright 2025 The HuggingFace Inc. team.
# All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inference-only Qwen3.5 Series compatible with HuggingFace weights."""
import typing
from collections.abc import Callable, Iterable
import torch
from einops import rearrange
from torch import nn
from vllm.compilation.decorators import support_torch_compile
from vllm.config import (
VllmConfig,
)
from vllm.distributed import (
get_pp_group,
)
from vllm.logger import init_logger
from vllm.model_executor.layers.layernorm import (
GemmaRMSNorm as Qwen3_5RMSNorm,
)
from vllm.model_executor.layers.linear import MergedColumnParallelLinear
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.mamba.mamba_utils import (
MambaStateCopyFunc,
MambaStateCopyFuncCalculator,
MambaStateDtypeCalculator,
MambaStateShapeCalculator,
)
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.vocab_parallel_embedding import (
ParallelLMHead,
VocabParallelEmbedding,
)
from vllm.model_executor.model_loader.weight_utils import (
default_weight_loader,
maybe_remap_kv_scale_name,
)
from vllm.multimodal import MULTIMODAL_REGISTRY
from vllm.sequence import IntermediateTensors
from vllm.transformers_utils.configs.qwen3_5 import (
Qwen3_5Config,
Qwen3_5TextConfig,
)
from vllm.transformers_utils.configs.qwen3_5_moe import (
Qwen3_5MoeConfig,
Qwen3_5MoeTextConfig,
)
from .interfaces import (
HasInnerState,
IsHybrid,
MixtureOfExperts,
MultiModalEmbeddings,
SupportsLoRA,
SupportsPP,
_require_is_multimodal,
)
from .qwen2_moe import Qwen2MoeMLP as Qwen3NextMLP
from .qwen3_next import (
Qwen3NextAttention,
Qwen3NextDecoderLayer,
Qwen3NextGatedDeltaNet,
Qwen3NextModel,
Qwen3NextSparseMoeBlock,
QwenNextMixtureOfExperts,
)
from .qwen3_vl import (
Qwen3_VisionTransformer,
Qwen3VLDummyInputsBuilder,
Qwen3VLForConditionalGeneration,
Qwen3VLMultiModalProcessor,
Qwen3VLProcessingInfo,
)
from .utils import (
AutoWeightsLoader,
PPMissingLayer,
_merge_multimodal_embeddings,
extract_layer_index,
is_pp_missing_parameter,
make_empty_intermediate_tensors_factory,
make_layers,
maybe_prefix,
)
logger = init_logger(__name__)
class Qwen3_5ProcessingInfo(Qwen3VLProcessingInfo):
def get_hf_config(self):
return self.ctx.get_hf_config(Qwen3_5Config)
class Qwen3_5MoeProcessingInfo(Qwen3VLProcessingInfo):
def get_hf_config(self):
return self.ctx.get_hf_config(Qwen3_5MoeConfig)
class Qwen3_5GatedDeltaNet(Qwen3NextGatedDeltaNet):
def fix_query_key_value_ordering(
self,
mixed_qkvz: torch.Tensor,
mixed_ba: torch.Tensor,
):
raise NotImplementedError(
"Qwen3.5 Series dont need to fix query key value ordering"
)
def create_qkvz_proj(
self,
hidden_size: int,
key_dim: int,
value_dim: int,
quant_config: QuantizationConfig | None,
prefix: str,
) -> MergedColumnParallelLinear:
return MergedColumnParallelLinear(
input_size=hidden_size,
output_sizes=[key_dim, key_dim, value_dim, value_dim],
bias=False,
quant_config=quant_config,
prefix=prefix,
)
def forward(
self,
hidden_states: torch.Tensor,
output: torch.Tensor,
):
"""
Forward pass with three parts:
1. Input projection
2. Core attention (custom op)
3. Output projection
"""
num_tokens = hidden_states.size(0)
# ============================================================
# Part 1: Input Projection
# ============================================================
mixed_qkvz, _ = self.in_proj_qkvz(hidden_states)
qkv_size = (self.key_dim * 2 + self.value_dim) // self.tp_size
z_size = self.value_dim // self.tp_size
mixed_qkv, z = mixed_qkvz.split([qkv_size, z_size], dim=-1)
z = z.reshape(z.size(0), -1, self.head_v_dim)
ba, _ = self.in_proj_ba(hidden_states)
b, a = ba.chunk(2, dim=-1)
b = b.contiguous()
a = a.contiguous()
# ============================================================
# Part 2: Core Attention (Custom Op)
# ============================================================
# Note: we should not use torch.empty here like other attention backends,
# see discussions in https://github.com/vllm-project/vllm/pull/28182
core_attn_out = torch.zeros(
(num_tokens, self.num_v_heads // self.tp_size, self.head_v_dim),
dtype=hidden_states.dtype,
device=hidden_states.device,
)
torch.ops.vllm.gdn_attention_core(
mixed_qkv,
b,
a,
core_attn_out,
self.prefix,
)
# ============================================================
# Part 3: Output Projection
# ============================================================
z_shape_og = z.shape
# Reshape input data into 2D tensor
core_attn_out = core_attn_out.reshape(-1, core_attn_out.shape[-1])
z = z.reshape(-1, z.shape[-1])
core_attn_out = self.norm(core_attn_out, z)
core_attn_out = core_attn_out.reshape(z_shape_og)
core_attn_out = rearrange(core_attn_out, "... h d -> ... (h d)")
output[:num_tokens], _ = self.out_proj(core_attn_out)
class Qwen3_5DecoderLayer(Qwen3NextDecoderLayer):
def __init__(
self,
vllm_config: VllmConfig,
layer_type: str,
prefix: str = "",
) -> None:
super(Qwen3NextDecoderLayer, self).__init__()
config = vllm_config.model_config.hf_text_config
model_config = vllm_config.model_config
cache_config = vllm_config.cache_config
quant_config = vllm_config.quant_config
speculative_config = vllm_config.speculative_config
self.layer_type = layer_type
self.layer_idx = extract_layer_index(prefix)
if self.layer_type == "linear_attention":
self.linear_attn = Qwen3_5GatedDeltaNet(
config,
model_config=model_config,
cache_config=cache_config,
quant_config=quant_config,
speculative_config=speculative_config,
prefix=f"{prefix}.linear_attn",
)
elif self.layer_type == "full_attention":
self.self_attn = Qwen3NextAttention(
config,
model_config=model_config,
cache_config=cache_config,
quant_config=quant_config,
prefix=f"{prefix}.self_attn",
)
else:
raise ValueError(f"Invalid layer_type {self.layer_type}")
# NOTE: Determine the MLP type based on the model type
# Qwen3.5 use all layers for MLP / Qwen3.5-MoE use sparse MoE blocks
if config.model_type == "qwen3_5_moe_text":
self.mlp = Qwen3NextSparseMoeBlock(
vllm_config=vllm_config,
prefix=f"{prefix}.mlp",
)
elif config.model_type == "qwen3_5_text":
self.mlp = Qwen3NextMLP(
hidden_size=config.hidden_size,
intermediate_size=config.intermediate_size,
hidden_act=config.hidden_act,
quant_config=quant_config,
prefix=f"{prefix}.mlp",
)
else:
raise ValueError(f"Invalid model_type {config.model_type}")
self.input_layernorm = Qwen3_5RMSNorm(
config.hidden_size, eps=config.rms_norm_eps
)
self.post_attention_layernorm = Qwen3_5RMSNorm(
config.hidden_size, eps=config.rms_norm_eps
)
self.layer_scale = getattr(config, "layer_scale", False)
if self.layer_scale:
self.attn_layer_scale = torch.nn.Parameter(
torch.zeros(
1,
1,
config.hidden_size,
),
)
self.ffn_layer_scale = torch.nn.Parameter(
torch.zeros(
1,
1,
config.hidden_size,
),
)
@support_torch_compile(
dynamic_arg_dims={
"input_ids": 0,
# positions is of shape (3, seq_len) if mrope is enabled for qwen2-vl,
# otherwise (seq_len, ).
"positions": -1,
"intermediate_tensors": 0,
"inputs_embeds": 0,
}
)
class Qwen3_5Model(Qwen3NextModel):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super(Qwen3NextModel, self).__init__()
config: Qwen3_5TextConfig | Qwen3_5MoeTextConfig = (
vllm_config.model_config.hf_text_config
)
parallel_config = vllm_config.parallel_config
eplb_config = parallel_config.eplb_config
self.num_redundant_experts = eplb_config.num_redundant_experts
self.config = config
self.vocab_size = config.vocab_size
self.embed_tokens = VocabParallelEmbedding(
self.vocab_size,
config.hidden_size,
)
def get_layer(prefix: str):
return Qwen3_5DecoderLayer(
vllm_config,
layer_type=config.layer_types[extract_layer_index(prefix)],
prefix=prefix,
)
self.start_layer, self.end_layer, self.layers = make_layers(
config.num_hidden_layers, get_layer, prefix=f"{prefix}.layers"
)
self.make_empty_intermediate_tensors = make_empty_intermediate_tensors_factory(
["hidden_states", "residual"], config.hidden_size
)
if get_pp_group().is_last_rank:
self.norm = Qwen3_5RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
else:
self.norm = PPMissingLayer()
def load_fused_expert_weights(
self,
name: str,
params_dict: dict,
loaded_weight: torch.Tensor,
shard_id: str,
num_experts: int,
) -> bool:
param = params_dict[name]
weight_loader = typing.cast(Callable[..., bool], param.weight_loader)
loaded_local_expert = False
for expert_id in range(num_experts):
curr_expert_weight = loaded_weight[expert_id]
success = weight_loader(
param,
curr_expert_weight,
name,
shard_id,
expert_id,
return_success=True,
)
if success:
loaded_local_expert = True
return loaded_local_expert
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
# self attention
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
# mlp
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
# GDN
("in_proj_qkvz", "in_proj_qkv", (0, 1, 2)),
("in_proj_qkvz", "in_proj_z", 3),
("in_proj_ba", "in_proj_b", 0),
("in_proj_ba", "in_proj_a", 1),
]
params_dict = dict(self.named_parameters())
loaded_params: set[str] = set()
expert_params_mapping = self.get_expert_mapping()
is_fused_expert = False
fused_expert_params_mapping = [
("experts.w13_weight", "experts.gate_up_proj", 0, "w1"),
("experts.w2_weight", "experts.down_proj", 0, "w2"),
]
num_experts = (
self.config.num_experts if hasattr(self.config, "num_experts") else 0
)
for name, loaded_weight in weights:
if "rotary_emb.inv_freq" in name:
continue
if name.startswith("mtp."):
continue
# Remapping the name of FP8 kv-scale.
if name.endswith("scale"):
name = maybe_remap_kv_scale_name(name, params_dict)
if name is None:
continue
for param_name, weight_name, shard_id in stacked_params_mapping:
if "experts.gate_up_proj" in name or "experts.down_proj" in name:
is_fused_expert = True
expert_params_mapping = fused_expert_params_mapping
if weight_name not in name:
continue
if "mlp.experts" in name:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
# Skip layers on other devices.
if is_pp_missing_parameter(name, self):
continue
# name = apply_attn_prefix(name, params_dict)
if name not in params_dict:
continue
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
break
else:
is_expert_weight = False
for mapping in expert_params_mapping:
param_name, weight_name, expert_id, shard_id = mapping
if weight_name not in name:
continue
is_expert_weight = True
name_mapped = name.replace(weight_name, param_name)
# Skip layers on other devices.
if is_pp_missing_parameter(name_mapped, self):
continue
if is_fused_expert:
# qwen3.5 no need to transpose
# loaded_weight = loaded_weight.transpose(-1, -2)
if "experts.gate_up_proj" in name:
loaded_weight = loaded_weight.chunk(2, dim=-2)
success_w1 = self.load_fused_expert_weights(
name_mapped,
params_dict,
loaded_weight[0],
"w1",
num_experts,
)
success_w3 = self.load_fused_expert_weights(
name_mapped,
params_dict,
loaded_weight[1],
"w3",
num_experts,
)
success = success_w1 and success_w3
else:
# down_proj
success = self.load_fused_expert_weights(
name_mapped,
params_dict,
loaded_weight,
shard_id,
num_experts,
)
if success:
name = name_mapped
break
else:
# Skip loading extra bias for GPTQ models.
if (
name_mapped.endswith(".bias")
or name_mapped.endswith("_bias")
) and name_mapped not in params_dict:
continue
param = params_dict[name_mapped]
weight_loader = param.weight_loader
success = weight_loader(
param,
loaded_weight,
name_mapped,
shard_id=shard_id,
expert_id=expert_id,
return_success=True,
)
if success:
name = name_mapped
break
else:
if is_expert_weight:
# We've checked that this is an expert weight
# However it's not mapped locally to this rank
# So we simply skip it
continue
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
if is_pp_missing_parameter(name, self):
continue
if name not in params_dict:
logger.warning_once(
f"Parameter {name} not found in params_dict, skip loading"
)
continue
param = params_dict[name]
weight_loader = getattr(
param, "weight_loader", default_weight_loader
)
weight_loader(param, loaded_weight)
loaded_params.add(name)
return loaded_params
class Qwen3_5ForCausalLMBase(
nn.Module,
HasInnerState,
SupportsLoRA,
SupportsPP,
):
packed_modules_mapping = {
"qkv_proj": [
"q_proj",
"k_proj",
"v_proj",
],
"gate_up_proj": ["gate_proj", "up_proj"],
# GDN fused projections.
"in_proj_qkvz": ["in_proj_qkv", "in_proj_z"],
"in_proj_ba": ["in_proj_b", "in_proj_a"],
}
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
config = vllm_config.model_config.hf_text_config
self.vllm_config = vllm_config
self.model_config = vllm_config.model_config
cache_config = vllm_config.cache_config
scheduler_config = vllm_config.scheduler_config
if cache_config.mamba_cache_mode == "all":
raise NotImplementedError(
"Qwen3.5 currently does not support 'all' prefix caching, "
"please use '--mamba-cache-mode=align' instead"
)
self.quant_config = vllm_config.quant_config
super().__init__()
self.config = config
self.scheduler_config = scheduler_config
self.model = Qwen3_5Model(
vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")
)
if get_pp_group().is_last_rank:
if config.tie_word_embeddings:
self.lm_head = self.model.embed_tokens
else:
self.lm_head = ParallelLMHead(
config.vocab_size,
config.hidden_size,
prefix=maybe_prefix(prefix, "lm_head"),
)
else:
self.lm_head = PPMissingLayer()
self.logits_processor = LogitsProcessor(config.vocab_size)
self.make_empty_intermediate_tensors = (
self.model.make_empty_intermediate_tensors
)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.model.embed_input_ids(input_ids)
def forward(
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
**kwargs: object,
):
hidden_states = self.model(
input_ids, positions, intermediate_tensors, inputs_embeds
)
return hidden_states
def compute_logits(
self,
hidden_states: torch.Tensor,
) -> torch.Tensor | None:
return self.logits_processor(self.lm_head, hidden_states)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
loader = AutoWeightsLoader(
self,
skip_prefixes=["mtp."],
)
return loader.load_weights(weights)
class Qwen3_5ForCausalLM(Qwen3_5ForCausalLMBase):
pass
class Qwen3_5MoeForCausalLM(Qwen3_5ForCausalLMBase, QwenNextMixtureOfExperts):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__(vllm_config=vllm_config, prefix=prefix)
# set MoE hyperparameters
self.set_moe_parameters()
def get_expert_mapping(self) -> list[tuple[str, str, int, str]]:
return self.model.get_expert_mapping()
########################################################
# Qwen3_5-Dense
########################################################
@MULTIMODAL_REGISTRY.register_processor(
Qwen3VLMultiModalProcessor,
info=Qwen3_5ProcessingInfo,
dummy_inputs=Qwen3VLDummyInputsBuilder,
)
class Qwen3_5ForConditionalGeneration(Qwen3VLForConditionalGeneration, IsHybrid):
packed_modules_mapping = Qwen3VLForConditionalGeneration.packed_modules_mapping | {
"in_proj_qkvz": ["in_proj_qkv", "in_proj_z"],
"in_proj_ba": ["in_proj_b", "in_proj_a"],
}
def __init__(self, *, vllm_config: VllmConfig, prefix: str = "model"):
# protocols have not __init__ method, so we need to use nn.Module.__init__
nn.Module.__init__(self)
config: Qwen3_5Config = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
multimodal_config = vllm_config.model_config.multimodal_config
self.config = config
self.multimodal_config = multimodal_config
self.use_data_parallel = multimodal_config.mm_encoder_tp_mode == "data"
self.video_pruning_rate = multimodal_config.video_pruning_rate
self.is_multimodal_pruning_enabled = (
multimodal_config.is_multimodal_pruning_enabled()
)
with self._mark_tower_model(vllm_config, {"image", "video"}):
self.visual = Qwen3_VisionTransformer(
config.vision_config,
norm_eps=getattr(config, "rms_norm_eps", 1e-6),
quant_config=quant_config,
prefix=maybe_prefix(prefix, "visual"),
)
with self._mark_language_model(vllm_config):
self.language_model = Qwen3_5ForCausalLM(
vllm_config=vllm_config, prefix=maybe_prefix(prefix, "language_model")
)
self.make_empty_intermediate_tensors = (
self.language_model.make_empty_intermediate_tensors
)
def embed_input_ids(
self,
input_ids: torch.Tensor,
multimodal_embeddings: MultiModalEmbeddings | None = None,
*,
is_multimodal: torch.Tensor | None = None,
handle_oov_mm_token: bool = False,
) -> torch.Tensor:
inputs_embeds = self._embed_text_input_ids(
input_ids,
self.language_model.embed_input_ids,
is_multimodal=is_multimodal,
handle_oov_mm_token=handle_oov_mm_token,
)
if multimodal_embeddings is None or len(multimodal_embeddings) == 0:
return inputs_embeds
is_multimodal = _require_is_multimodal(is_multimodal)
inputs_embeds = _merge_multimodal_embeddings(
inputs_embeds=inputs_embeds,
multimodal_embeddings=multimodal_embeddings,
is_multimodal=is_multimodal,
)
return inputs_embeds
def forward(
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
**kwargs: object,
) -> torch.Tensor | IntermediateTensors:
"""Run forward pass for Qwen3.5.
Args:
input_ids: Flattened (concatenated) input_ids corresponding to a
batch.
positions: Flattened (concatenated) position ids corresponding to a
batch.
**NOTE**: If mrope is enabled (default setting for Qwen3VL
opensource models), the shape will be `(3, seq_len)`,
otherwise it will be `(seq_len,).
intermediate_tensors: Intermediate tensors from previous pipeline
stages.
inputs_embeds: Pre-computed input embeddings.
**kwargs: Additional keyword arguments including:
- pixel_values: Pixel values to be fed to a model.
`None` if no images are passed.
- image_grid_thw: Tensor `(n_images, 3)` of image 3D grid in
LLM. `None` if no images are passed.
- pixel_values_videos: Pixel values of videos to be fed to a
model. `None` if no videos are passed.
- video_grid_thw: Tensor `(n_videos, 3)` of video 3D grid in
LLM. `None` if no videos are passed.
"""
if intermediate_tensors is not None:
inputs_embeds = None
hidden_states = self.language_model.model(
input_ids=input_ids,
positions=positions,
intermediate_tensors=intermediate_tensors,
inputs_embeds=inputs_embeds,
)
return hidden_states
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
loader = AutoWeightsLoader(
self,
skip_prefixes=["mtp."],
)
return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper)
@classmethod
def get_mamba_state_dtype_from_config(
cls,
vllm_config: "VllmConfig",
) -> tuple[torch.dtype, torch.dtype]:
return MambaStateDtypeCalculator.gated_delta_net_state_dtype(
vllm_config.model_config.dtype,
vllm_config.cache_config.mamba_cache_dtype,
vllm_config.cache_config.mamba_ssm_cache_dtype,
)
@classmethod
def get_mamba_state_shape_from_config(
cls, vllm_config: "VllmConfig"
) -> tuple[tuple[int, int], tuple[int, int]]:
parallel_config = vllm_config.parallel_config
hf_config = vllm_config.model_config.hf_text_config
tp_size = parallel_config.tensor_parallel_size
num_spec = (
vllm_config.speculative_config.num_speculative_tokens
if vllm_config.speculative_config
else 0
)
return MambaStateShapeCalculator.gated_delta_net_state_shape(
tp_size,
hf_config.linear_num_key_heads,
hf_config.linear_num_value_heads,
hf_config.linear_key_head_dim,
hf_config.linear_value_head_dim,
hf_config.linear_conv_kernel_dim,
num_spec,
)
@classmethod
def get_mamba_state_copy_func(cls) -> tuple[MambaStateCopyFunc, MambaStateCopyFunc]:
return MambaStateCopyFuncCalculator.gated_delta_net_state_copy_func()
########################################################
# Qwen3_5-MoE
########################################################
class Qwen3_5_MoeMixtureOfExperts(MixtureOfExperts):
def update_physical_experts_metadata(
self,
num_physical_experts: int,
num_local_physical_experts: int,
) -> None:
assert self.num_local_physical_experts == num_local_physical_experts
self.num_physical_experts = num_physical_experts
self.num_local_physical_experts = num_local_physical_experts
self.num_redundant_experts = num_physical_experts - self.num_logical_experts
for layer in self.language_model.model.layers:
if isinstance(layer.mlp, Qwen3NextSparseMoeBlock):
moe = layer.mlp
moe.n_local_physical_experts = num_local_physical_experts
moe.n_physical_experts = num_physical_experts
moe.n_redundant_experts = self.num_redundant_experts
moe.experts.update_expert_map()
def set_moe_parameters(self):
self.expert_weights = []
self.moe_layers = []
example_moe = None
for layer in self.language_model.model.layers:
if isinstance(layer, Qwen3_5DecoderLayer) and isinstance(
layer.mlp, Qwen3NextSparseMoeBlock
):
example_moe = layer.mlp
self.moe_layers.append(layer.mlp.experts)
if example_moe is None:
raise RuntimeError(
"No Qwen3_5 layer found in the language_model.model.layers."
)
# Set MoE hyperparameters
self.num_moe_layers = len(self.moe_layers)
self.num_expert_groups = 1
self.num_shared_experts = 0
self.num_logical_experts = example_moe.n_logical_experts
self.num_physical_experts = example_moe.n_physical_experts
self.num_local_physical_experts = example_moe.n_local_physical_experts
self.num_routed_experts = example_moe.n_routed_experts
self.num_redundant_experts = example_moe.n_redundant_experts
@MULTIMODAL_REGISTRY.register_processor(
Qwen3VLMultiModalProcessor,
info=Qwen3_5MoeProcessingInfo,
dummy_inputs=Qwen3VLDummyInputsBuilder,
)
class Qwen3_5MoeForConditionalGeneration(
Qwen3_5ForConditionalGeneration, Qwen3_5_MoeMixtureOfExperts
):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = "model"):
# protocols have not __init__ method, so we need to use nn.Module.__init__
nn.Module.__init__(self)
config: Qwen3_5MoeConfig = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
multimodal_config = vllm_config.model_config.multimodal_config
self.config = config
self.multimodal_config = multimodal_config
self.use_data_parallel = multimodal_config.mm_encoder_tp_mode == "data"
self.video_pruning_rate = multimodal_config.video_pruning_rate
self.is_multimodal_pruning_enabled = (
multimodal_config.is_multimodal_pruning_enabled()
)
with self._mark_tower_model(vllm_config, {"image", "video"}):
self.visual = Qwen3_VisionTransformer(
config.vision_config,
norm_eps=getattr(config, "rms_norm_eps", 1e-6),
quant_config=quant_config,
prefix=maybe_prefix(prefix, "visual"),
)
with self._mark_language_model(vllm_config):
self.language_model = Qwen3_5MoeForCausalLM(
vllm_config=vllm_config, prefix=maybe_prefix(prefix, "language_model")
)
self.make_empty_intermediate_tensors = (
self.language_model.make_empty_intermediate_tensors
)
# set MoE hyperparameters
self.set_moe_parameters()
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/qwen3_5.py",
"license": "Apache License 2.0",
"lines": 774,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/qwen3_5_mtp.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Inference-only Qwen3_5 MTP model."""
import typing
from collections.abc import Callable, Iterable
import torch
from torch import nn
from vllm.compilation.decorators import support_torch_compile
from vllm.config import VllmConfig
from vllm.distributed.parallel_state import get_pp_group
from vllm.logger import init_logger
from vllm.model_executor.layers.fused_moe import FusedMoE
from vllm.model_executor.layers.linear import ColumnParallelLinear
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.vocab_parallel_embedding import (
ParallelLMHead,
VocabParallelEmbedding,
)
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
from vllm.model_executor.models.qwen3_5 import Qwen3_5DecoderLayer, Qwen3_5RMSNorm
from vllm.model_executor.models.qwen3_next import QwenNextMixtureOfExperts
from vllm.sequence import IntermediateTensors
from vllm.transformers_utils.configs.qwen3_5 import Qwen3_5TextConfig
from vllm.transformers_utils.configs.qwen3_5_moe import Qwen3_5MoeTextConfig
from .interfaces import (
MultiModalEmbeddings,
SupportsMultiModal,
_require_is_multimodal,
)
from .utils import (
AutoWeightsLoader,
PPMissingLayer,
_merge_multimodal_embeddings,
is_pp_missing_parameter,
make_empty_intermediate_tensors_factory,
maybe_prefix,
)
logger = init_logger(__name__)
@support_torch_compile(
dynamic_arg_dims={
"input_ids": 0,
# positions is of shape (3, seq_len) if mrope is enabled for qwen2-vl,
# otherwise (seq_len, ).
"positions": -1,
"intermediate_tensors": 0,
"inputs_embeds": 0,
"hidden_states": 0,
}
)
class Qwen3_5MultiTokenPredictor(nn.Module):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
model_config = vllm_config.model_config
quant_config = vllm_config.quant_config
config: Qwen3_5TextConfig | Qwen3_5MoeTextConfig = model_config.hf_text_config
self.config = config
self.vocab_size = config.vocab_size
self.mtp_start_layer_idx = config.num_hidden_layers
self.num_mtp_layers = getattr(config, "mtp_num_hidden_layers", 1)
self.embed_tokens = VocabParallelEmbedding(
self.vocab_size,
config.hidden_size,
)
self.fc = ColumnParallelLinear(
self.config.hidden_size * 2,
self.config.hidden_size,
gather_output=True,
bias=False,
return_bias=False,
quant_config=quant_config,
prefix=f"{prefix}.fc",
)
self.layers = torch.nn.ModuleList(
Qwen3_5DecoderLayer(
vllm_config,
layer_type="full_attention",
prefix=f"{prefix}.layers.{idx}",
)
for idx in range(self.num_mtp_layers)
)
self.make_empty_intermediate_tensors = make_empty_intermediate_tensors_factory(
["hidden_states", "residual"], config.hidden_size
)
self.norm = Qwen3_5RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.pre_fc_norm_hidden = Qwen3_5RMSNorm(
config.hidden_size, eps=config.rms_norm_eps
)
self.pre_fc_norm_embedding = Qwen3_5RMSNorm(
config.hidden_size, eps=config.rms_norm_eps
)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.embed_tokens(input_ids)
def forward(
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
hidden_states: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
spec_step_idx: int = 0,
) -> torch.Tensor:
if get_pp_group().is_first_rank:
if inputs_embeds is None:
inputs_embeds = self.embed_input_ids(input_ids)
assert hidden_states.shape[-1] == inputs_embeds.shape[-1]
inputs_embeds = self.pre_fc_norm_embedding(inputs_embeds)
hidden_states = self.pre_fc_norm_hidden(hidden_states)
hidden_states = torch.cat([inputs_embeds, hidden_states], dim=-1)
hidden_states = self.fc(hidden_states)
residual = None
else:
assert intermediate_tensors is not None
hidden_states = intermediate_tensors["hidden_states"]
residual = intermediate_tensors["residual"]
current_step_idx = spec_step_idx % self.num_mtp_layers
hidden_states, residual = self.layers[current_step_idx](
positions=positions,
hidden_states=hidden_states,
residual=residual,
)
if not get_pp_group().is_last_rank:
return IntermediateTensors(
{"hidden_states": hidden_states, "residual": residual}
)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states
def load_fused_expert_weights(
self,
name: str,
params_dict: dict,
loaded_weight: torch.Tensor,
shard_id: str,
num_experts: int,
) -> bool:
param = params_dict[name]
weight_loader = typing.cast(Callable[..., bool], param.weight_loader)
loaded_local_expert = False
for expert_id in range(num_experts):
curr_expert_weight = loaded_weight[expert_id]
success = weight_loader(
param,
curr_expert_weight,
name,
shard_id,
expert_id,
return_success=True,
)
if success:
loaded_local_expert = True
return loaded_local_expert
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
# Params for weights, fp8 weight scales, fp8 activation scales
# (param_name, weight_name, expert_id, shard_id)
expert_params_mapping = FusedMoE.make_expert_params_mapping(
self,
ckpt_gate_proj_name="gate_proj",
ckpt_down_proj_name="down_proj",
ckpt_up_proj_name="up_proj",
num_experts=self.config.num_experts
if hasattr(self.config, "num_experts")
else 0,
)
params_dict = dict(self.named_parameters())
loaded_params: set[str] = set()
is_fused_expert = False
fused_expert_params_mapping = [
("experts.w13_weight", "experts.gate_up_proj", 0, "w1"),
("experts.w2_weight", "experts.down_proj", 0, "w2"),
]
num_experts = (
self.config.num_experts if hasattr(self.config, "num_experts") else 0
)
for name, loaded_weight in weights:
if "rotary_emb.inv_freq" in name:
continue
for param_name, weight_name, shard_id in stacked_params_mapping:
if "experts.gate_up_proj" in name or "experts.down_proj" in name:
is_fused_expert = True
expert_params_mapping = fused_expert_params_mapping
if weight_name not in name:
continue
if "mlp.experts" in name:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
# Skip layers on other devices.
if is_pp_missing_parameter(name, self):
continue
if name not in params_dict:
continue
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
break
else:
is_expert_weight = False
for mapping in expert_params_mapping:
param_name, weight_name, expert_id, shard_id = mapping
if weight_name not in name:
continue
is_expert_weight = True
name_mapped = name.replace(weight_name, param_name)
# Skip layers on other devices.
if is_pp_missing_parameter(name_mapped, self):
continue
if is_fused_expert:
# qwen3.5 no need to transpose
# loaded_weight = loaded_weight.transpose(-1, -2)
if "experts.gate_up_proj" in name:
loaded_weight = loaded_weight.chunk(2, dim=-2)
success_w1 = self.load_fused_expert_weights(
name_mapped,
params_dict,
loaded_weight[0],
"w1",
num_experts,
)
success_w3 = self.load_fused_expert_weights(
name_mapped,
params_dict,
loaded_weight[1],
"w3",
num_experts,
)
success = success_w1 and success_w3
else:
# down_proj
success = self.load_fused_expert_weights(
name_mapped,
params_dict,
loaded_weight,
shard_id,
num_experts,
)
if success:
name = name_mapped
break
else:
# Skip loading extra bias for GPTQ models.
if (
name_mapped.endswith(".bias")
or name_mapped.endswith("_bias")
) and name_mapped not in params_dict:
continue
param = params_dict[name_mapped]
weight_loader = param.weight_loader
success = weight_loader(
param,
loaded_weight,
name_mapped,
shard_id=shard_id,
expert_id=expert_id,
return_success=True,
)
if success:
name = name_mapped
break
else:
if is_expert_weight:
# We've checked that this is an expert weight
# However it's not mapped locally to this rank
# So we simply skip it
continue
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
if is_pp_missing_parameter(name, self):
continue
if name not in params_dict:
logger.warning_once(
f"Parameter {name} not found in params_dict, skip loading"
)
continue
param = params_dict[name]
weight_loader = getattr(
param, "weight_loader", default_weight_loader
)
weight_loader(param, loaded_weight)
loaded_params.add(name)
return loaded_params
@support_torch_compile(
dynamic_arg_dims={
"input_ids": 0,
# positions is of shape (3, seq_len) if mrope is enabled for qwen2-vl,
# otherwise (seq_len, ).
"positions": -1,
"intermediate_tensors": 0,
"inputs_embeds": 0,
"hidden_states": 0,
}
)
class Qwen3_5MTP(nn.Module, SupportsMultiModal):
packed_modules_mapping = {
"qkv_proj": [
"q_proj",
"k_proj",
"v_proj",
],
"gate_up_proj": ["gate_proj", "up_proj"],
}
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
config = vllm_config.model_config.hf_text_config
self.vllm_config = vllm_config
cache_config = vllm_config.cache_config
if cache_config.mamba_cache_mode == "all":
raise NotImplementedError(
"Qwen3_5MTP currently does not support 'all' prefix caching, "
"please use '--mamba-cache-mode=align' instead"
)
self.quant_config = vllm_config.quant_config
super().__init__()
self.config = config
self.model = Qwen3_5MultiTokenPredictor(
vllm_config=vllm_config, prefix=maybe_prefix(prefix, "mtp")
)
if get_pp_group().is_last_rank:
if config.tie_word_embeddings:
self.lm_head = self.model.embed_tokens
else:
self.lm_head = ParallelLMHead(
config.vocab_size,
config.hidden_size,
prefix=maybe_prefix(prefix, "lm_head"),
)
else:
self.lm_head = PPMissingLayer()
self.logits_processor = LogitsProcessor(config.vocab_size)
def embed_input_ids(
self,
input_ids: torch.Tensor,
multimodal_embeddings: MultiModalEmbeddings | None = None,
*,
is_multimodal: torch.Tensor | None = None,
handle_oov_mm_token: bool = False,
) -> torch.Tensor:
inputs_embeds = self._embed_text_input_ids(
input_ids,
self.model.embed_input_ids,
is_multimodal=is_multimodal,
handle_oov_mm_token=handle_oov_mm_token,
)
if multimodal_embeddings is None or len(multimodal_embeddings) == 0:
return inputs_embeds
is_multimodal = _require_is_multimodal(is_multimodal)
inputs_embeds = _merge_multimodal_embeddings(
inputs_embeds=inputs_embeds,
multimodal_embeddings=multimodal_embeddings,
is_multimodal=is_multimodal,
)
return inputs_embeds
def forward(
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
hidden_states: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
**kwargs: object,
):
hidden_states = self.model(
input_ids, positions, hidden_states, intermediate_tensors, inputs_embeds
)
return hidden_states
def compute_logits(
self,
hidden_states: torch.Tensor,
spec_step_idx: int = 0,
) -> torch.Tensor | None:
return self.logits_processor(self.lm_head, hidden_states)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
def remap_weight_names(weights):
for name, weight in weights:
if name.startswith("mtp."):
name = name.replace("mtp.", "model.")
elif any(key in name for key in ["embed_tokens", "lm_head"]):
if "embed_tokens" in name:
name = name.replace("language_model.", "")
else:
continue
yield name, weight
loader = AutoWeightsLoader(self)
return loader.load_weights(remap_weight_names(weights))
class Qwen3_5MoeMTP(Qwen3_5MTP, QwenNextMixtureOfExperts):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__(vllm_config=vllm_config, prefix=prefix)
self.set_moe_parameters()
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/qwen3_5_mtp.py",
"license": "Apache License 2.0",
"lines": 396,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:examples/pooling/classify/vision_classification_online.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# ruff: noqa: E501
"""Example Python client for multimodal classification API using vLLM API server
NOTE:
start a supported multimodal classification model server with `vllm serve`, e.g.
vllm serve muziyongshixin/Qwen2.5-VL-7B-for-VideoCls \
--runner pooling \
--max-model-len 5000 \
--limit-mm-per-prompt.video 1 \
--hf-overrides '{"text_config": {"architectures": ["Qwen2_5_VLForSequenceClassification"]}}'
"""
import argparse
import pprint
import requests
from vllm.multimodal.utils import encode_image_url, fetch_image
input_text = "This product was excellent and exceeded my expectations"
image_url = "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/cat_snow.jpg"
image_base64 = {"url": encode_image_url(fetch_image(image_url))}
video_url = "https://www.bogotobogo.com/python/OpenCV_Python/images/mean_shift_tracking/slow_traffic_small.mp4"
def parse_args():
parse = argparse.ArgumentParser()
parse.add_argument("--host", type=str, default="localhost")
parse.add_argument("--port", type=int, default=8000)
return parse.parse_args()
def main(args):
base_url = f"http://{args.host}:{args.port}"
models_url = base_url + "/v1/models"
classify_url = base_url + "/classify"
response = requests.get(models_url)
model_name = response.json()["data"][0]["id"]
print("Text classification output:")
messages = [
{
"role": "assistant",
"content": "Please classify this text request.",
},
{
"role": "user",
"content": input_text,
},
]
response = requests.post(
classify_url,
json={"model": model_name, "messages": messages},
)
pprint.pprint(response.json())
print("Image url classification output:")
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": "Please classify this image."},
{"type": "image_url", "image_url": {"url": image_url}},
],
}
]
response = requests.post(
classify_url,
json={"model": model_name, "messages": messages},
)
pprint.pprint(response.json())
print("Image base64 classification output:")
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": "Please classify this image."},
{"type": "image_url", "image_url": image_base64},
],
}
]
response = requests.post(
classify_url,
json={"model": model_name, "messages": messages},
)
pprint.pprint(response.json())
print("Video url classification output:")
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": "Please classify this video."},
{"type": "video_url", "video_url": {"url": video_url}},
],
}
]
response = requests.post(
classify_url,
json={"model": model_name, "messages": messages},
)
pprint.pprint(response.json())
if __name__ == "__main__":
args = parse_args()
main(args)
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/pooling/classify/vision_classification_online.py",
"license": "Apache License 2.0",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/utils/print_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
def print_embeddings(embeds: list[float]):
embeds_trimmed = (str(embeds[:4])[:-1] + ", ...]") if len(embeds) > 4 else embeds
print(f"Embeddings: {embeds_trimmed} (size={len(embeds)})")
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/utils/print_utils.py",
"license": "Apache License 2.0",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/entrypoints/llm/test_mm_embeds_only.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import weakref
import pytest
from vllm import LLM, SamplingParams
from vllm.assets.image import ImageAsset
from vllm.distributed import cleanup_dist_env_and_memory
MODEL = "llava-hf/llava-1.5-7b-hf"
PROMPT = "USER: <image>\nDescribe this image briefly.\nASSISTANT:"
TEXT_ONLY_PROMPT = "USER: What is 2 + 2?\nASSISTANT:"
@pytest.fixture(scope="module")
def llm():
"""LLM with enable_mm_embeds=True and all modality limits zeroed out."""
llm = LLM(
model=MODEL,
max_model_len=2048,
enforce_eager=True,
gpu_memory_utilization=0.8,
enable_mm_embeds=True,
limit_mm_per_prompt={"image": 0},
)
yield weakref.proxy(llm)
del llm
cleanup_dist_env_and_memory()
@pytest.mark.skip_global_cleanup
def test_generate_with_embedding(llm: LLM):
"""Pre-computed embedding produces tokens without hanging."""
embedding = ImageAsset("stop_sign").image_embeds
outputs = llm.generate(
{"prompt": PROMPT, "multi_modal_data": {"image": embedding}},
sampling_params=SamplingParams(max_tokens=32, temperature=0.0),
)
assert len(outputs) == 1
assert len(outputs[0].outputs[0].text) > 0
@pytest.mark.skip_global_cleanup
def test_raw_image_rejected(llm: LLM):
"""Raw image input is still rejected when limit=0."""
raw_image = ImageAsset("stop_sign").pil_image
with pytest.raises(ValueError, match=r"At most 0 image\(s\)"):
llm.generate(
{"prompt": PROMPT, "multi_modal_data": {"image": raw_image}},
sampling_params=SamplingParams(max_tokens=16),
)
@pytest.mark.skip_global_cleanup
def test_text_only_prompt(llm: LLM):
"""Text-only prompts still work under this config."""
outputs = llm.generate(
TEXT_ONLY_PROMPT,
sampling_params=SamplingParams(max_tokens=16, temperature=0.0),
)
assert len(outputs) == 1
assert len(outputs[0].outputs[0].text) > 0
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/entrypoints/llm/test_mm_embeds_only.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/config/kernel.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Callable
from typing import Any, Literal
from pydantic import Field, field_validator
from vllm.config.utils import config
from vllm.utils.hashing import safe_hash
MoEBackend = Literal[
"auto",
"triton",
"deep_gemm",
"cutlass",
"flashinfer_trtllm",
"flashinfer_cutlass",
"flashinfer_cutedsl",
"marlin",
"aiter",
]
@config
class KernelConfig:
"""Configuration for kernel selection and warmup behavior."""
enable_flashinfer_autotune: bool = Field(default=None)
"""If True, run FlashInfer autotuning during kernel warmup."""
moe_backend: MoEBackend = "auto"
"""Backend for MoE expert computation kernels. Available options:
- "auto": Automatically select the best backend based on model and hardware\n
- "triton": Use Triton-based fused MoE kernels\n
- "deep_gemm": Use DeepGEMM kernels (FP8 block-quantized only)\n
- "cutlass": Use vLLM CUTLASS kernels\n
- "flashinfer_trtllm": Use FlashInfer with TRTLLM-GEN kernels\n
- "flashinfer_cutlass": Use FlashInfer with CUTLASS kernels\n
- "flashinfer_cutedsl": Use FlashInfer with CuteDSL kernels (FP4 only)\n
- "marlin": Use Marlin kernels (weight-only quantization)\n
- "aiter": Use AMD AITer kernels (ROCm only)"""
@field_validator("moe_backend", mode="before")
@classmethod
def _normalize_moe_backend(cls, value: Any) -> Any:
if isinstance(value, str):
return value.lower().replace("-", "_")
return value
def compute_hash(self) -> str:
"""
WARNING: Whenever a new field is added to this config,
ensure that it is included in the factors list if
it affects the computation graph.
Provide a hash that uniquely identifies all the configs
that affect the structure of the computation
graph from input ids/embeddings to the final hidden states,
excluding anything before input ids/embeddings and after
the final hidden states.
"""
# no factors to consider.
# this config will not affect the computation graph.
factors: list[Any] = []
hash_str = safe_hash(str(factors).encode(), usedforsecurity=False).hexdigest()
return hash_str
@field_validator("enable_flashinfer_autotune", mode="wrap")
@classmethod
def _skip_none_validation(cls, value: Any, handler: Callable) -> Any:
"""Skip validation if the value is `None` when initialization is delayed."""
if value is None:
return value
return handler(value)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/config/kernel.py",
"license": "Apache License 2.0",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/compile/passes/test_split_coalescing.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
import torch
import vllm
from tests.compile.backend import TestBackend
from vllm.compilation.passes.utility.split_coalescing import SplitCoalescingPass
from vllm.config import CompilationConfig, CompilationMode, PassConfig, VllmConfig
class SplitCoalescingModel(torch.nn.Module):
"""Model with 3 separate split_with_sizes calls on the same input,
simulating the B200+FP8 graph where CSE fails to merge them."""
def __init__(self, q_size: int, kv_size: int) -> None:
super().__init__()
self.q_size = q_size
self.kv_size = kv_size
def forward(self, qkv: torch.Tensor):
q, _, _ = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
_, k, _ = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
_, _, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
return q + 1, k + 2, v + 3
@pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16])
def test_split_coalescing(dtype):
torch.set_default_device("cuda")
torch.set_default_dtype(dtype)
torch.manual_seed(0)
q_size, kv_size = 2048, 512
vllm_config = VllmConfig(
compilation_config=CompilationConfig(
mode=CompilationMode.VLLM_COMPILE,
pass_config=PassConfig(),
)
)
with vllm.config.set_current_vllm_config(vllm_config):
coalesce_pass = SplitCoalescingPass(vllm_config)
backend = TestBackend(coalesce_pass)
model = SplitCoalescingModel(q_size, kv_size)
T = 5
qkv = torch.randn(T, q_size + 2 * kv_size)
torch._dynamo.mark_dynamic(qkv, 0)
result_eager = model(qkv)
model_compiled = torch.compile(model, backend=backend)
result_compiled = model_compiled(qkv)
ATOL, RTOL = (2e-3, 2e-3)
for eager, compiled in zip(result_eager, result_compiled):
torch.testing.assert_close(eager, compiled, atol=ATOL, rtol=RTOL)
assert backend.op_count(torch.ops.aten.split_with_sizes.default) == 1
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/compile/passes/test_split_coalescing.py",
"license": "Apache License 2.0",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/compilation/passes/utility/split_coalescing.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Coalesce duplicate ``split_with_sizes`` nodes that operate on the same
input tensor with the same split sizes.
On certain hardware/dtype combinations (e.g. B200 + FP8) the Inductor
graph may contain multiple ``split_with_sizes`` calls on the same tensor
that CSE fails to merge. This pass detects and replaces the duplicates
so that downstream pattern-matching passes (e.g. QK-Norm+RoPE fusion)
see a single split node with all users attached.
See also:
- vLLM #33295 (original issue)
- PyTorch #174472 (upstream CSE gap)
"""
import operator
import torch
from torch import fx
from vllm.logger import init_logger
from ..fx_utils import is_func
from ..vllm_inductor_pass import VllmInductorPass
logger = init_logger(__name__)
class SplitCoalescingPass(VllmInductorPass):
"""Replace duplicate ``split_with_sizes`` nodes with a single canonical
node when they share the same input tensor and split sizes."""
@VllmInductorPass.time_and_log
def __call__(self, graph: fx.Graph) -> None:
count = 0
# Map from input tensor node -> list of split nodes seen so far.
split_nodes: dict[fx.Node, list[fx.Node]] = {}
for node in graph.nodes:
if not is_func(node, torch.ops.aten.split_with_sizes.default):
continue
if not all(is_func(user, operator.getitem) for user in node.users):
continue
arg_node, split_sizes = node.args[:2]
if arg_node not in split_nodes:
split_nodes[arg_node] = [node]
continue
# Find existing node with same split_sizes
canonical = next(
(
n
for n in split_nodes[arg_node]
if list(n.args[1]) == list(split_sizes)
),
None,
)
if canonical is not None:
node.replace_all_uses_with(canonical)
graph.erase_node(node)
count += 1
else:
split_nodes[arg_node].append(node)
logger.debug("Coalesced %d duplicate split_with_sizes nodes", count)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/compilation/passes/utility/split_coalescing.py",
"license": "Apache License 2.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:examples/offline_inference/pause_resume.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Test for pause/resume with keep mode.
This test uses concurrent tasks to verify the engine truly stops generating
during pause:
1. Generator task: continuously generates and logs time between tokens
2. Controller task: sends pause/resume commands
If the engine properly pauses, we should see a gap in token timestamps
matching the pause duration.
"""
import asyncio
import time
from vllm import SamplingParams
from vllm.engine.arg_utils import AsyncEngineArgs
from vllm.v1.engine.async_llm import AsyncLLM
PAUSE_DURATION = 3.0 # seconds
async def main():
# Create engine with a small model
engine_args = AsyncEngineArgs(
model="facebook/opt-125m",
enforce_eager=True,
)
engine = AsyncLLM.from_engine_args(engine_args)
prompt = "Write a story about a dragon. Once upon a time"
sampling_params = SamplingParams(max_tokens=30, ignore_eos=True)
# Track token arrival times
token_times: list[tuple[int, float]] = [] # (token_count, timestamp)
pause_time: float = 0
resume_time: float = 0
pause_token_idx: int = 0 # Index in token_times when pause occurred
async def generator_task():
"""Generate tokens and record timestamps."""
async for output in engine.generate(
request_id="test-req",
prompt=prompt,
sampling_params=sampling_params,
):
token_count = len(output.outputs[0].token_ids)
token_times.append((token_count, time.monotonic()))
print(
f"Token {token_count} arrived:"
f"T={token_times[-1][1] - token_times[0][1]:.3f}s"
)
return output
async def controller_task():
"""Pause and resume the engine after some tokens generated."""
nonlocal pause_time, resume_time, pause_token_idx
# Wait for some tokens to be generated
while len(token_times) < 5:
await asyncio.sleep(0.01)
print(f"\nPausing engine (keep mode) at token {len(token_times)}")
pause_time = time.monotonic()
await engine.pause_generation(mode="keep")
pause_token_idx = len(token_times)
print(f"Paused! Sleeping for {PAUSE_DURATION}s...")
# Sleep while paused - no tokens should be generated during this time
await asyncio.sleep(PAUSE_DURATION)
print("Resuming engine...")
await engine.resume_generation()
resume_time = time.monotonic()
print("Resumed!\n")
# Run both tasks concurrently
gen_task = asyncio.create_task(generator_task())
ctrl_task = asyncio.create_task(controller_task())
final_output, _ = await asyncio.gather(gen_task, ctrl_task)
# Verify the pause actually stopped generation.
# The gap after the pause token should be approximately the sleep duration.
pause_gap = token_times[pause_token_idx][1] - token_times[pause_token_idx - 1][1]
print(
f"\nGap after pause (token {pause_token_idx - 1} -> {pause_token_idx}): "
f"{pause_gap:.3f}s"
)
if pause_gap >= PAUSE_DURATION * 0.9:
print(f"✓ Test passed! Engine paused for ~{pause_gap:.1f}s")
else:
print(
f"✗ Test failed! Expected ~{PAUSE_DURATION}s gap after pause, "
f"got {pause_gap:.3f}s"
)
raise AssertionError("Engine did not properly pause")
# Verify request completed
assert final_output.finished, "Request should have finished"
assert len(final_output.outputs[0].token_ids) == 30, "Should have all tokens"
engine.shutdown()
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/offline_inference/pause_resume.py",
"license": "Apache License 2.0",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/cuda/scripts/check_device_count_respects_env.py | #!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Check that device_count respects CUDA_VISIBLE_DEVICES after platform import."""
import os
import sys
for key in ["CUDA_VISIBLE_DEVICES", "HIP_VISIBLE_DEVICES", "ROCR_VISIBLE_DEVICES"]:
os.environ.pop(key, None)
import torch # noqa: E402
from vllm.platforms import current_platform # noqa: F401, E402
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
count = torch.cuda.device_count()
if count == 0:
sys.exit(0) # Skip: no GPUs available
assert count == 1, f"device_count()={count}, expected 1"
print("OK")
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/cuda/scripts/check_device_count_respects_env.py",
"license": "Apache License 2.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/cuda/scripts/check_platform_no_cuda_init.py | #!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Check that vllm.platforms import does not initialize CUDA."""
import os
for key in ["CUDA_VISIBLE_DEVICES", "HIP_VISIBLE_DEVICES", "ROCR_VISIBLE_DEVICES"]:
os.environ.pop(key, None)
import torch # noqa: E402
assert not torch.cuda.is_initialized(), "CUDA initialized before import"
from vllm.platforms import current_platform # noqa: E402
assert not torch.cuda.is_initialized(), (
f"CUDA was initialized during vllm.platforms import on {current_platform}"
)
print("OK")
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/cuda/scripts/check_platform_no_cuda_init.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/cuda/test_platform_no_cuda_init.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Test that platform imports do not prematurely initialize CUDA.
This is critical for Ray-based multi-GPU setups where workers need to
set CUDA_VISIBLE_DEVICES after importing vLLM but before CUDA is initialized.
If CUDA is initialized during import, device_count() gets locked and ignores
subsequent env var changes.
"""
import subprocess
import sys
from pathlib import Path
import pytest
SCRIPTS_DIR = Path(__file__).parent / "scripts"
def run_script(script_name: str) -> subprocess.CompletedProcess:
"""Run a test script in a subprocess with clean CUDA state."""
script_path = SCRIPTS_DIR / script_name
return subprocess.run(
[sys.executable, str(script_path)],
capture_output=True,
text=True,
)
def test_platform_import_does_not_init_cuda():
"""Test that importing vllm.platforms does not initialize CUDA."""
result = run_script("check_platform_no_cuda_init.py")
if result.returncode != 0:
pytest.fail(f"Platform import initialized CUDA:\n{result.stderr}")
def test_device_count_respects_env_after_platform_import():
"""Test that device_count respects CUDA_VISIBLE_DEVICES after import."""
result = run_script("check_device_count_respects_env.py")
if result.returncode != 0:
pytest.fail(
f"device_count does not respect env var after import:\n{result.stderr}"
)
if __name__ == "__main__":
pytest.main([__file__, "-v"])
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/cuda/test_platform_no_cuda_init.py",
"license": "Apache License 2.0",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/renderers/inputs/test_preprocess.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from vllm.renderers.inputs.preprocess import prompt_to_seq
def test_empty_input():
assert prompt_to_seq([]) == []
assert prompt_to_seq([[]]) == [[]]
assert prompt_to_seq([[], []]) == [[], []]
def test_text_input():
assert prompt_to_seq("foo") == ["foo"]
assert prompt_to_seq(["foo"]) == ["foo"]
assert prompt_to_seq(["foo", "bar"]) == ["foo", "bar"]
def test_token_input():
assert prompt_to_seq([1, 2]) == [[1, 2]]
assert prompt_to_seq([[1, 2]]) == [[1, 2]]
assert prompt_to_seq([[1, 2], [3, 4]]) == [[1, 2], [3, 4]]
def test_text_token_input():
assert prompt_to_seq([[1, 2], "foo"]) == [[1, 2], "foo"]
assert prompt_to_seq(["foo", [1, 2]]) == ["foo", [1, 2]]
def test_bytes_input():
assert prompt_to_seq(b"foo") == [b"foo"]
assert prompt_to_seq([b"foo"]) == [b"foo"]
assert prompt_to_seq([b"foo", b"bar"]) == [b"foo", b"bar"]
def test_dict_input():
assert prompt_to_seq({"prompt": "foo"}) == [{"prompt": "foo"}]
assert prompt_to_seq([{"prompt": "foo"}]) == [{"prompt": "foo"}]
assert prompt_to_seq([{"prompt": "foo"}, {"prompt_token_ids": [1, 2]}]) == [
{"prompt": "foo"},
{"prompt_token_ids": [1, 2]},
]
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/renderers/inputs/test_preprocess.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/renderers/inputs/preprocess.py | """
Schemas and utilites for preprocessing inputs.
"""
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Sequence
from typing import TYPE_CHECKING, NamedTuple, TypeAlias, TypedDict, overload
from vllm.inputs import (
EmbedsPrompt,
ExplicitEncoderDecoderPrompt,
ProcessorInputs,
PromptType,
SingletonPrompt,
TextPrompt,
TokensPrompt,
)
from vllm.utils import length_from_prompt_token_ids_or_embeds
from vllm.utils.collection_utils import is_list_of
if TYPE_CHECKING:
import torch
from vllm.config import ModelConfig
from vllm.entrypoints.chat_utils import ChatCompletionMessageParam
@overload
def prompt_to_seq(
prompt_or_prompts: SingletonPrompt | bytes | Sequence[SingletonPrompt | bytes],
) -> Sequence[SingletonPrompt]: ...
@overload
def prompt_to_seq( # type: ignore[misc]
prompt_or_prompts: ExplicitEncoderDecoderPrompt
| Sequence[ExplicitEncoderDecoderPrompt],
) -> Sequence[ExplicitEncoderDecoderPrompt]: ...
@overload
def prompt_to_seq( # type: ignore[misc]
prompt_or_prompts: PromptType | Sequence[PromptType],
) -> Sequence[PromptType]: ...
def prompt_to_seq(
prompt_or_prompts: PromptType | bytes | Sequence[PromptType | bytes],
) -> Sequence[PromptType]:
if isinstance(prompt_or_prompts, (dict, str, bytes)) or (
len(prompt_or_prompts) > 0 and is_list_of(prompt_or_prompts, int)
):
return [prompt_or_prompts] # type: ignore[list-item]
return prompt_or_prompts # type: ignore[return-value]
def conversation_to_seq(
conversation_or_conversations: list["ChatCompletionMessageParam"]
| Sequence[list["ChatCompletionMessageParam"]],
) -> Sequence[list["ChatCompletionMessageParam"]]:
if len(conversation_or_conversations) > 0 and is_list_of(
conversation_or_conversations, dict
):
return [conversation_or_conversations] # type: ignore[list-item]
return conversation_or_conversations # type: ignore[return-value]
DecoderOnlyDictPrompt: TypeAlias = TextPrompt | TokensPrompt | EmbedsPrompt
"""
A [`DecoderOnlyPrompt`][vllm.inputs.data.DecoderOnlyPrompt]
that has been standardized into a dictionary.
"""
EncoderDictPrompt: TypeAlias = TextPrompt | TokensPrompt
"""
A [`EncoderPrompt`][vllm.inputs.data.EncoderPrompt]
that has been standardized into a dictionary.
"""
DecoderDictPrompt: TypeAlias = TextPrompt | TokensPrompt
"""
A [`DecoderPrompt`][vllm.inputs.data.DecoderPrompt]
that has been standardized into a dictionary.
"""
class EncoderDecoderDictPrompt(TypedDict):
"""
A [`EncoderDecoderPrompt`][vllm.inputs.data.EncoderDecoderPrompt]
that has been standardized into a dictionary.
"""
encoder_prompt: EncoderDictPrompt
decoder_prompt: DecoderDictPrompt | None
SingletonDictPrompt: TypeAlias = (
DecoderOnlyDictPrompt | EncoderDictPrompt | DecoderDictPrompt
)
"""
A [`SingletonPrompt`][vllm.inputs.data.SingletonPrompt]
that has been standardized into a dictionary.
"""
DictPrompt: TypeAlias = DecoderOnlyDictPrompt | EncoderDecoderDictPrompt
"""
A [`PromptType`][vllm.inputs.data.PromptType]
that has been standardized into a dictionary.
"""
def parse_dec_only_prompt(prompt: PromptType | object) -> DecoderOnlyDictPrompt:
"""
Parse a prompt for a decoder-only model and normalize it to a dictionary.
"""
if isinstance(prompt, str):
return TextPrompt(prompt=prompt)
if isinstance(prompt, list):
if not is_list_of(prompt, int):
raise TypeError("Token prompt should be a list of integers")
return TokensPrompt(prompt_token_ids=prompt)
if isinstance(prompt, dict):
if "encoder_prompt" in prompt:
raise TypeError("Cannot pass encoder-decoder prompt to decoder-only models")
if (
"prompt" in prompt
or "prompt_token_ids" in prompt
or "prompt_embeds" in prompt
):
return prompt # type: ignore[return-value]
raise TypeError("Prompt dictionary must contain text, tokens, or embeddings")
raise TypeError("Prompt should be a string, list of tokens, or dictionary")
def _parse_enc_prompt(prompt: PromptType | object) -> EncoderDictPrompt:
if isinstance(prompt, str):
return TextPrompt(prompt=prompt)
if isinstance(prompt, list):
if not is_list_of(prompt, int):
raise TypeError("Token prompt should be a list of integers")
return TokensPrompt(prompt_token_ids=prompt)
if isinstance(prompt, dict):
if "prompt_embeds" in prompt:
raise TypeError("Cannot pass embeddings prompt to encoder-decoder models")
if "prompt" in prompt or "prompt_token_ids" in prompt:
return prompt # type: ignore[return-value]
raise TypeError("Prompt dictionary must contain text or tokens")
raise TypeError("Prompt should be a string, list of tokens, or dictionary")
def _parse_dec_prompt(prompt: PromptType | object) -> DecoderDictPrompt:
if isinstance(prompt, str):
return TextPrompt(prompt=prompt)
if isinstance(prompt, list):
if not is_list_of(prompt, int):
raise TypeError("Token prompt should be a list of integers")
return TokensPrompt(prompt_token_ids=prompt)
if isinstance(prompt, dict):
if "prompt_embeds" in prompt:
raise TypeError("Cannot pass embeddings prompt to encoder-decoder models")
if (
"multi_modal_data" in prompt
or "mm_processor_kwargs" in prompt
or "multi_modal_uuids" in prompt
):
raise TypeError("Cannot pass multi-modal inputs to decoder prompt")
if "prompt" in prompt or "prompt_token_ids" in prompt:
return prompt # type: ignore[return-value]
raise TypeError("Prompt dictionary must contain text or tokens")
raise TypeError("Prompt should be a string, list of tokens, or dictionary")
def parse_enc_dec_prompt(prompt: PromptType | object) -> EncoderDecoderDictPrompt:
"""
Parse a prompt for an encoder-decoder model and normalize it to a dictionary.
"""
if isinstance(prompt, dict) and "encoder_prompt" in prompt:
enc_prompt = prompt["encoder_prompt"] # type: ignore[typeddict-item]
dec_prompt = prompt["decoder_prompt"] # type: ignore[typeddict-item]
else:
enc_prompt = prompt
dec_prompt = None
return EncoderDecoderDictPrompt(
encoder_prompt=_parse_enc_prompt(enc_prompt),
decoder_prompt=None if dec_prompt is None else _parse_dec_prompt(dec_prompt),
)
def parse_model_prompt(model_config: "ModelConfig", prompt: object):
if model_config.is_encoder_decoder:
return parse_enc_dec_prompt(prompt)
return parse_dec_only_prompt(prompt)
class PromptComponents(NamedTuple):
text: str | None = None
token_ids: list[int] | None = None
embeds: "torch.Tensor | None" = None
def extract_target_prompt(model_config: "ModelConfig", prompt: object):
return (
parse_enc_dec_prompt(prompt)["encoder_prompt"]
if model_config.is_encoder_decoder
else parse_dec_only_prompt(prompt)
)
def extract_prompt_components(
model_config: "ModelConfig",
prompt: PromptType | ProcessorInputs,
) -> PromptComponents:
target_prompt = extract_target_prompt(model_config, prompt)
return PromptComponents(
text=target_prompt.get("prompt"),
token_ids=target_prompt.get("prompt_token_ids"),
embeds=target_prompt.get("prompt_embeds"),
)
def extract_prompt_len(
model_config: "ModelConfig", prompt: PromptType | ProcessorInputs
):
target_prompt = extract_target_prompt(model_config, prompt)
return length_from_prompt_token_ids_or_embeds(
target_prompt.get("prompt_token_ids"),
target_prompt.get("prompt_embeds"),
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/renderers/inputs/preprocess.py",
"license": "Apache License 2.0",
"lines": 187,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/renderers/inputs/tokenize.py | """
Schemas and utilites for tokenization inputs.
"""
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from typing import TypeAlias, TypedDict
from vllm.inputs import EmbedsPrompt, TokensPrompt
DecoderOnlyTokPrompt: TypeAlias = TokensPrompt | EmbedsPrompt
"""
A [`DecoderOnlyDictPrompt`][vllm.renderers.inputs.preprocess.DecoderOnlyDictPrompt]
that has been tokenized.
"""
EncoderTokPrompt: TypeAlias = TokensPrompt
"""
A [`EncoderDictPrompt`][vllm.renderers.inputs.preprocess.EncoderDictPrompt]
that has been tokenized.
"""
DecoderTokPrompt: TypeAlias = TokensPrompt
"""
A [`DecoderDictPrompt`][vllm.renderers.inputs.preprocess.DecoderDictPrompt]
that has been tokenized.
"""
class EncoderDecoderTokPrompt(TypedDict):
"""
A
[`EncoderDecoderDictPrompt`][vllm.renderers.inputs.preprocess.EncoderDecoderDictPrompt]
that has been tokenized.
"""
encoder_prompt: EncoderTokPrompt
decoder_prompt: DecoderTokPrompt | None
SingletonTokPrompt: TypeAlias = (
DecoderOnlyTokPrompt | EncoderTokPrompt | DecoderTokPrompt
)
"""
A [`SingletonDictPrompt`][vllm.renderers.inputs.preprocess.SingletonDictPrompt]
that has been tokenized.
"""
TokPrompt: TypeAlias = DecoderOnlyTokPrompt | EncoderDecoderTokPrompt
"""
A [`DictPrompt`][vllm.renderers.inputs.preprocess.DictPrompt]
that has been tokenized.
"""
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/renderers/inputs/tokenize.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/lora/test_qwen3_unembed.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Tests for Qwen3 unembed LoRA support.
This test creates synthetic LoRA weights that include lm_head (output embedding)
to verify that Qwen3 properly supports LoRA on the unembed/lm_head layer.
"""
import json
import os
import tempfile
import numpy as np
import torch
from safetensors.torch import save_file
from vllm import LLM, SamplingParams
from vllm.lora.request import LoRARequest
MODEL_PATH = "Qwen/Qwen3-0.6B"
HIDDEN_SIZE = 1024
VOCAB_SIZE = 151936
def create_qwen3_lora_with_lm_head(save_dir: str, rank: int = 8) -> None:
"""Create synthetic Qwen3 LoRA weights with lm_head."""
lora_weights = {}
for module in ["q_proj", "v_proj"]:
lora_A = torch.from_numpy(
np.random.randn(rank, HIDDEN_SIZE).astype(np.float16) * 0.01
)
lora_B = torch.zeros(HIDDEN_SIZE, rank, dtype=torch.float16)
key_prefix = f"base_model.model.model.layers.0.self_attn.{module}"
lora_weights[f"{key_prefix}.lora_A.weight"] = lora_A
lora_weights[f"{key_prefix}.lora_B.weight"] = lora_B
# lm_head LoRA weights
lora_weights["base_model.model.lm_head.lora_A.weight"] = torch.from_numpy(
np.random.randn(rank, HIDDEN_SIZE).astype(np.float16) * 0.01
)
lora_weights["base_model.model.lm_head.lora_B.weight"] = torch.zeros(
VOCAB_SIZE, rank, dtype=torch.float16
)
adapter_config = {
"peft_type": "LORA",
"base_model_name_or_path": MODEL_PATH,
"task_type": "CAUSAL_LM",
"inference_mode": True,
"r": rank,
"lora_alpha": rank * 2,
"lora_dropout": 0.0,
"bias": "none",
"target_modules": ["q_proj", "v_proj", "lm_head"],
}
os.makedirs(save_dir, exist_ok=True)
with open(os.path.join(save_dir, "adapter_config.json"), "w") as f:
json.dump(adapter_config, f)
save_file(lora_weights, os.path.join(save_dir, "adapter_model.safetensors"))
def test_qwen3_unembed_lora():
"""Verify Qwen3 can load and generate with LoRA adapters with lm_head."""
with tempfile.TemporaryDirectory() as tmpdir:
# Initialize engine first (before creating torch tensors)
llm = LLM(
model=MODEL_PATH,
enable_lora=True,
max_loras=4,
max_lora_rank=8,
max_model_len=128,
gpu_memory_utilization=0.8,
enforce_eager=True,
)
# Create LoRA weights after engine init
create_qwen3_lora_with_lm_head(tmpdir, rank=8)
lora_request = LoRARequest("lm_head_lora", 1, tmpdir)
llm.llm_engine.add_lora(lora_request)
assert 1 in llm.llm_engine.list_loras(), "lm_head LoRA should be loaded"
# Test generation
sampling_params = SamplingParams(temperature=0, max_tokens=32)
prompts = ["Hello, my name is"]
# Generate with base model (no LoRA)
base_outputs = llm.generate(prompts, sampling_params, use_tqdm=False)
assert len(base_outputs) == 1
assert len(base_outputs[0].outputs[0].text) > 0
# Generate with lm_head LoRA
lora_outputs = llm.generate(
prompts, sampling_params, lora_request=lora_request, use_tqdm=False
)
assert len(lora_outputs) == 1
assert len(lora_outputs[0].outputs[0].text) > 0
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/lora/test_qwen3_unembed.py",
"license": "Apache License 2.0",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/compile/correctness_e2e/test_async_tp.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import json
import pytest
from tests.models.registry import HF_EXAMPLE_MODELS
from tests.utils import (
compare_two_settings,
create_new_process_for_each_test,
)
from vllm.config import (
CompilationMode,
)
@create_new_process_for_each_test()
@pytest.mark.parametrize(
"model_id",
["meta-llama/Llama-3.2-1B-Instruct", "RedHatAI/Meta-Llama-3.1-8B-Instruct-FP8"],
)
@pytest.mark.parametrize("tp_size", [2])
@pytest.mark.parametrize("async_tp_enabled", [True])
@pytest.mark.parametrize("distributed_backend", ["mp"])
@pytest.mark.parametrize("eager_mode", [False, True])
def test_async_tp_pass_correctness(
model_id: str,
tp_size: int,
async_tp_enabled: bool,
distributed_backend: str,
eager_mode: bool,
num_gpus_available: int,
):
model_info = HF_EXAMPLE_MODELS.find_hf_info(model_id)
model_info.check_transformers_version(on_fail="skip")
model_info.check_available_online(on_fail="skip")
pp_size = 1
if num_gpus_available < tp_size:
pytest.skip(f"Need at least {tp_size} x {pp_size} GPUs")
common_args = [
"--dtype",
"bfloat16",
"--max-model-len",
"2048",
"--max-num-seqs",
"8",
]
if eager_mode:
common_args.append("--enforce-eager")
compilation_config = {
"mode": CompilationMode.VLLM_COMPILE,
"compile_sizes": [2, 4, 8],
"splitting_ops": [],
"pass_config": {"fuse_gemm_comms": async_tp_enabled},
}
async_tp_args = [
*common_args,
"--tensor-parallel-size",
str(tp_size),
"--distributed-executor-backend",
distributed_backend,
"--compilation_config",
json.dumps(compilation_config),
]
tp_args = [
*common_args,
"--tensor-parallel-size",
str(tp_size),
"--distributed-executor-backend",
"mp",
]
compare_two_settings(model_id, async_tp_args, tp_args, method="generate")
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/compile/correctness_e2e/test_async_tp.py",
"license": "Apache License 2.0",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/compilation/passes/fusion/collective_fusion.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import torch
import torch._inductor.pattern_matcher as pm
import torch.fx as fx
from torch._inductor.pattern_matcher import PatternMatcherPass
from torch.distributed._symmetric_memory import enable_symm_mem_for_group
from vllm.config import VllmConfig
from vllm.config.utils import Range
from vllm.distributed import get_tp_group
from vllm.distributed.parallel_state import (
get_tensor_model_parallel_world_size,
)
from vllm.logger import init_logger
from vllm.platforms import current_platform
from ..inductor_pass import enable_fake_mode
from ..vllm_inductor_pass import VllmInductorPass, VllmPatternMatcherPass
FP8_DTYPE = current_platform.fp8_dtype()
logger = init_logger(__name__)
class BasePattern:
def __init__(self, dtype: torch.dtype, device: str | None) -> None:
self.dtype = dtype
self.device = device
self.tp = get_tp_group()
self.tp_size = get_tensor_model_parallel_world_size()
class GEMMReduceScatterPattern(BasePattern):
def get_inputs(self) -> list[torch.Tensor]:
mul = torch.empty([16, 4], device=self.device, dtype=self.dtype)
mm_weight = torch.empty([4, 4], device=self.device, dtype=self.dtype)
return [mul, mm_weight]
def register(self, pm_pass: PatternMatcherPass) -> None:
def pattern(mul: torch.Tensor, mm_weight: torch.Tensor) -> torch.Tensor:
mm = torch.ops.aten.mm.default(mul, mm_weight)
reduce_scatter = torch.ops.vllm.reduce_scatter.default(
mm,
dim=0,
world_size=self.tp_size,
group_name=self.tp.unique_name,
)
return reduce_scatter
def replacement(mul: torch.Tensor, mm_weight: torch.Tensor) -> torch.Tensor:
gemm_rs = torch.ops.symm_mem.fused_matmul_reduce_scatter(
mul,
mm_weight,
"sum",
scatter_dim=0,
group_name=self.tp.device_group.group_name,
)
return gemm_rs
pm.register_replacement(
pattern, replacement, self.get_inputs(), pm.fwd_only, pm_pass
)
class AllGatherGEMMPattern(BasePattern):
def get_inputs(self) -> list[torch.Tensor]:
x = torch.empty([4, 4], device=self.device, dtype=self.dtype)
weight = torch.empty([4, 4], device=self.device, dtype=self.dtype)
return [x, weight]
def register(self, pm_pass: PatternMatcherPass) -> None:
def pattern(
x: torch.Tensor,
weight: torch.Tensor,
) -> torch.Tensor:
all_gather = torch.ops.vllm.all_gather.default(
x,
dim=0,
world_size=self.tp_size,
group_name=self.tp.unique_name,
)
return torch.ops.aten.mm.default(all_gather, weight)
def replacement(x: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
ag_output, mm_outputs = torch.ops.symm_mem.fused_all_gather_matmul(
x,
[weight],
gather_dim=0,
group_name=self.tp.device_group.group_name,
)
return mm_outputs
pm.register_replacement(
pattern, replacement, self.get_inputs(), pm.fwd_only, pm_pass
)
class ScaledMMReduceScatterPattern(BasePattern):
def get_inputs(self) -> list[torch.Tensor]:
input = torch.empty([16, 16], device=self.device, dtype=FP8_DTYPE)
mm_weight = (
torch.empty([16, 16], device=self.device, dtype=FP8_DTYPE)
.contiguous()
.transpose(0, 1)
)
scale_a = torch.empty([16, 1], device=self.device, dtype=torch.float32)
scale_b = torch.empty([1, 16], device=self.device, dtype=torch.float32)
return [input, mm_weight, scale_a, scale_b]
def register(self, pm_pass: PatternMatcherPass) -> None:
def pattern(
input: torch.Tensor,
mat2: torch.Tensor,
scale_a: torch.Tensor,
scale_b: torch.Tensor,
) -> torch.Tensor:
scaled_mm = torch.ops.aten._scaled_mm.default(
input,
mat2=mat2,
scale_a=scale_a,
scale_b=scale_b,
bias=None,
scale_result=None,
out_dtype=self.dtype,
)
reduce_scatter = torch.ops.vllm.reduce_scatter.default(
scaled_mm,
dim=0,
world_size=self.tp_size,
group_name=self.tp.unique_name,
)
return reduce_scatter
def replacement(
input: torch.Tensor,
mat2: torch.Tensor,
scale_a: torch.Tensor,
scale_b: torch.Tensor,
) -> torch.Tensor:
# Calculate output shape: input @ mat2 with scatter_dim reduced
output_shape = [*input.shape[:-1], mat2.shape[1]]
scatter_dim = 0
gemm_rs = torch.ops.vllm.patched_fused_scaled_matmul_reduce_scatter(
input,
mat2,
scale_a,
scale_b,
"sum",
scatter_dim, # orig_scatter_dim
scatter_dim, # scatter_dim_after_maybe_reshape
self.tp.device_group.group_name,
output_shape,
None, # bias
None, # result_scale
self.dtype, # out_dtype
False, # use_fast_accum
)
return gemm_rs
pm.register_replacement(
pattern, replacement, self.get_inputs(), pm.fwd_only, pm_pass
)
class AllGatherScaledMMPattern(BasePattern):
def get_inputs(self) -> list[torch.Tensor]:
x = torch.empty([8, 16], device=self.device, dtype=FP8_DTYPE)
weight = (
torch.empty([16, 16], device=self.device, dtype=FP8_DTYPE)
.contiguous()
.transpose(0, 1)
)
s1 = x.shape[0] * self.tp_size
scale_a = torch.empty([s1, 1], device=self.device, dtype=torch.float32)
scale_b = torch.empty([1, 16], device=self.device, dtype=torch.float32)
return [x, weight, scale_a, scale_b]
def register(self, pm_pass: PatternMatcherPass) -> None:
def pattern(
x: torch.Tensor,
weight: torch.Tensor,
scale_a: torch.Tensor,
scale_b: torch.Tensor,
) -> torch.Tensor:
all_gather = torch.ops.vllm.all_gather.default(
x, dim=0, world_size=self.tp_size, group_name=self.tp.unique_name
)
return torch.ops.aten._scaled_mm.default(
all_gather,
mat2=weight,
scale_a=scale_a,
scale_b=scale_b,
bias=None,
scale_result=None,
out_dtype=self.dtype,
)
def replacement(
x: torch.Tensor,
weight: torch.Tensor,
scale_a: torch.Tensor,
scale_b: torch.Tensor,
) -> torch.Tensor:
ag_output, mm_outputs = torch.ops.symm_mem.fused_all_gather_scaled_matmul( # noqa
x,
[weight],
scale_a,
[scale_b],
gather_dim=0,
biases=[None],
result_scales=[None],
out_dtypes=[self.dtype],
use_fast_accum=[False],
group_name=self.tp.device_group.group_name,
)
return mm_outputs
pm.register_replacement(
pattern, replacement, self.get_inputs(), pm.fwd_only, pm_pass
)
class CutlassScaledMMReduceScatterPattern(BasePattern):
def get_inputs(self) -> list[torch.Tensor]:
input = torch.empty([16, 16], device=self.device, dtype=FP8_DTYPE)
mm_weight = (
torch.empty([16, 16], device=self.device, dtype=FP8_DTYPE)
.contiguous()
.transpose(0, 1)
)
scale_a = torch.empty([16, 1], device=self.device, dtype=torch.float32)
scale_b = torch.empty([1, 16], device=self.device, dtype=torch.float32)
cutlass_mm_output = torch.empty([16, 16], device=self.device, dtype=self.dtype)
return [input, mm_weight, scale_a, scale_b, cutlass_mm_output]
def register(self, pm_pass: PatternMatcherPass) -> None:
def pattern(
input: torch.Tensor,
weight: torch.Tensor,
scale_a: torch.Tensor,
scale_b: torch.Tensor,
cutlass_mm_output: torch.Tensor,
) -> torch.Tensor:
cutlass_scaled_mm = torch.ops.higher_order.auto_functionalized(
torch.ops._C.cutlass_scaled_mm.default,
out=cutlass_mm_output,
a=input,
b=weight,
a_scales=scale_a,
b_scales=scale_b,
bias=None,
)
reduce_scatter = torch.ops.vllm.reduce_scatter.default(
cutlass_scaled_mm[1],
dim=0,
world_size=self.tp_size,
group_name=self.tp.unique_name,
)
return reduce_scatter
def replacement(
input: torch.Tensor,
mat2: torch.Tensor,
scale_a: torch.Tensor,
scale_b: torch.Tensor,
cutlass_mm_output: torch.Tensor,
) -> torch.Tensor:
# Calculate output shape: input @ mat2 with scatter_dim reduced
output_shape = [*input.shape[:-1], mat2.shape[1]]
scatter_dim = 0
gemm_rs = torch.ops.vllm.patched_fused_scaled_matmul_reduce_scatter(
input,
mat2,
scale_a,
scale_b,
"sum",
scatter_dim, # orig_scatter_dim
scatter_dim, # scatter_dim_after_maybe_reshape
self.tp.device_group.group_name,
output_shape,
None, # bias
None, # result_scale
self.dtype, # out_dtype
False, # use_fast_accum
)
return gemm_rs
pm.register_replacement(
pattern, replacement, self.get_inputs(), pm.fwd_only, pm_pass
)
class AllGatherCutlassScaledMMPattern(BasePattern):
def get_inputs(self) -> list[torch.Tensor]:
x = torch.empty([8, 16], device=self.device, dtype=FP8_DTYPE)
weight = (
torch.empty([16, 16], device=self.device, dtype=FP8_DTYPE)
.contiguous()
.transpose(0, 1)
)
s1 = x.shape[0] * self.tp_size
scale_a = torch.empty([s1, 1], device=self.device, dtype=torch.float32)
scale_b = torch.empty([1, 16], device=self.device, dtype=torch.float32)
s2 = weight.shape[1]
output = torch.empty([s1, s2], device=self.device, dtype=self.dtype)
return [x, weight, scale_a, scale_b, output]
def register(self, pm_pass: PatternMatcherPass) -> None:
def pattern(
x: torch.Tensor,
weight: torch.Tensor,
scale_a: torch.Tensor,
scale_b: torch.Tensor,
output: torch.Tensor,
) -> torch.Tensor:
all_gather = torch.ops.vllm.all_gather.default(
x, dim=0, world_size=self.tp_size, group_name=self.tp.unique_name
)
cutlass_scaled_mm = torch.ops.higher_order.auto_functionalized(
torch.ops._C.cutlass_scaled_mm.default,
out=output,
a=all_gather,
b=weight,
a_scales=scale_a,
b_scales=scale_b,
bias=None,
)
return cutlass_scaled_mm[1]
def replacement(
x: torch.Tensor,
weight: torch.Tensor,
scale_a: torch.Tensor,
scale_b: torch.Tensor,
output: torch.Tensor,
) -> torch.Tensor:
ag_output, mm_outputs = torch.ops.symm_mem.fused_all_gather_scaled_matmul( # noqa
x,
[weight],
scale_a,
[scale_b],
gather_dim=0,
biases=[None],
result_scales=[None],
out_dtypes=[self.dtype],
use_fast_accum=[False],
group_name=self.tp.device_group.group_name,
)
return mm_outputs
pm.register_replacement(
pattern, replacement, self.get_inputs(), pm.fwd_only, pm_pass
)
class AsyncTPPass(VllmPatternMatcherPass):
@enable_fake_mode
def __init__(self, config: VllmConfig) -> None:
super().__init__(config)
# Enable symmetric memory for the TP process group
enable_symm_mem_for_group(get_tp_group().device_group.group_name)
self.patterns: PatternMatcherPass = PatternMatcherPass(
pass_name="async_tp_pass"
)
GEMMReduceScatterPattern(self.model_dtype, self.device).register(self.patterns)
AllGatherGEMMPattern(self.model_dtype, self.device).register(self.patterns)
# These fusions are enabled only for bfloat16 models because
# `scaled_mm` or `cutlass_scaled_mm` with per-token (row-wise) scaling
# only supports bfloat16 as the output dtype.
if self.model_dtype == torch.bfloat16:
ScaledMMReduceScatterPattern(self.model_dtype, self.device).register(
self.patterns
)
AllGatherScaledMMPattern(self.model_dtype, self.device).register(
self.patterns
)
CutlassScaledMMReduceScatterPattern(self.model_dtype, self.device).register(
self.patterns
)
AllGatherCutlassScaledMMPattern(self.model_dtype, self.device).register(
self.patterns
)
self.dump_patterns(config, self.patterns)
def is_applicable_for_range(self, compile_range: Range) -> bool:
# This pass is applied on top of the sequence parallelism pass.
# It inherits the same applicability condition as `SequenceParallelismPass`.
# See `SequenceParallelismPass.is_applicable` for more details.
if (
not self.compilation_config.splitting_ops
or self.compilation_config.use_inductor_graph_partition
):
return True
tp_size = get_tensor_model_parallel_world_size()
return bool(compile_range.is_single_size() and compile_range.end % tp_size == 0)
@VllmInductorPass.time_and_log
def __call__(self, graph: fx.Graph) -> None:
self.matched_count = self.patterns.apply(graph)
logger.debug("Replaced %s patterns", self.matched_count)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/compilation/passes/fusion/collective_fusion.py",
"license": "Apache License 2.0",
"lines": 361,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tools/pre_commit/check_forbidden_imports.py | #!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import sys
from dataclasses import dataclass, field
import regex as re
@dataclass
class ForbiddenImport:
pattern: str
tip: str
allowed_pattern: re.Pattern = re.compile(r"^$") # matches nothing by default
allowed_files: set[str] = field(default_factory=set)
CHECK_IMPORTS = {
"pickle/cloudpickle": ForbiddenImport(
pattern=(
r"^\s*(import\s+(pickle|cloudpickle)(\s|$|\sas)"
r"|from\s+(pickle|cloudpickle)\s+import\b)"
),
tip=(
"Avoid using pickle or cloudpickle or add this file to "
"tools/pre_commit/check_forbidden_imports.py."
),
allowed_files={
# pickle
"vllm/multimodal/hasher.py",
"vllm/transformers_utils/config.py",
"vllm/model_executor/models/registry.py",
"vllm/compilation/caching.py",
"vllm/compilation/piecewise_backend.py",
"vllm/distributed/utils.py",
"vllm/distributed/parallel_state.py",
"vllm/distributed/device_communicators/all_reduce_utils.py",
"vllm/distributed/device_communicators/shm_broadcast.py",
"vllm/distributed/device_communicators/shm_object_storage.py",
"vllm/distributed/weight_transfer/ipc_engine.py",
"tests/distributed/test_weight_transfer.py",
"vllm/utils/hashing.py",
"tests/multimodal/media/test_base.py",
"tests/tokenizers_/test_hf.py",
"tests/utils_/test_hashing.py",
"tests/compile/test_aot_compile.py",
"benchmarks/kernels/graph_machete_bench.py",
"benchmarks/kernels/benchmark_lora.py",
"benchmarks/kernels/benchmark_machete.py",
"benchmarks/fused_kernels/layernorm_rms_benchmarks.py",
"benchmarks/cutlass_benchmarks/w8a8_benchmarks.py",
"benchmarks/cutlass_benchmarks/sparse_benchmarks.py",
# cloudpickle
"vllm/v1/executor/multiproc_executor.py",
"vllm/v1/executor/ray_executor.py",
"vllm/entrypoints/llm.py",
"tests/utils.py",
# pickle and cloudpickle
"vllm/v1/serial_utils.py",
},
),
"re": ForbiddenImport(
pattern=r"^\s*(?:import\s+re(?:$|\s|,)|from\s+re\s+import)",
tip="Replace 'import re' with 'import regex as re' or 'import regex'.",
allowed_pattern=re.compile(r"^\s*import\s+regex(\s*|\s+as\s+re\s*)$"),
allowed_files={"setup.py"},
),
"triton": ForbiddenImport(
pattern=r"^(from|import)\s+triton(\s|\.|$)",
tip="Use 'from vllm.triton_utils import triton' instead.",
allowed_pattern=re.compile(
"from vllm.triton_utils import (triton|tl|tl, triton)"
),
allowed_files={"vllm/triton_utils/importing.py"},
),
}
def check_file(path: str) -> int:
with open(path, encoding="utf-8") as f:
content = f.read()
return_code = 0
# Check all patterns in the whole file
for import_name, forbidden_import in CHECK_IMPORTS.items():
# Skip files that are allowed for this import
if path in forbidden_import.allowed_files:
continue
# Search for forbidden imports
for match in re.finditer(forbidden_import.pattern, content, re.MULTILINE):
# Check if it's allowed
if forbidden_import.allowed_pattern.match(match.group()):
continue
# Calculate line number from match position
line_num = content[: match.start() + 1].count("\n") + 1
print(
f"{path}:{line_num}: "
"\033[91merror:\033[0m " # red color
f"Found forbidden import: {import_name}. {forbidden_import.tip}"
)
return_code = 1
return return_code
def main():
returncode = 0
for path in sys.argv[1:]:
returncode |= check_file(path)
return returncode
def test_regex():
test_cases = [
# Should match
("import pickle", True),
("import cloudpickle", True),
("import pickle as pkl", True),
("import cloudpickle as cpkl", True),
("from pickle import *", True),
("from cloudpickle import dumps", True),
("from pickle import dumps, loads", True),
("from cloudpickle import (dumps, loads)", True),
(" import pickle", True),
("\timport cloudpickle", True),
("from pickle import loads", True),
# Should not match
("import somethingelse", False),
("from somethingelse import pickle", False),
("# import pickle", False),
("print('import pickle')", False),
("import pickleas as asdf", False),
]
for i, (line, should_match) in enumerate(test_cases):
result = bool(CHECK_IMPORTS["pickle/cloudpickle"].pattern.match(line))
assert result == should_match, (
f"Test case {i} failed: '{line}' (expected {should_match}, got {result})"
)
print("All regex tests passed.")
if __name__ == "__main__":
if "--test-regex" in sys.argv:
test_regex()
else:
sys.exit(main())
| {
"repo_id": "vllm-project/vllm",
"file_path": "tools/pre_commit/check_forbidden_imports.py",
"license": "Apache License 2.0",
"lines": 131,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/models/language/pooling_mteb_test/test_voyage.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
from tests.models.language.pooling.embed_utils import correctness_test_embed_models
from tests.models.utils import EmbedModelInfo
from .mteb_embed_utils import mteb_test_embed_models
MODELS = [
EmbedModelInfo(
"voyageai/voyage-4-nano",
architecture="VoyageQwen3BidirectionalEmbedModel",
enable_test=True,
seq_pooling_type="MEAN",
attn_type="encoder_only",
is_prefix_caching_supported=False,
is_chunked_prefill_supported=False,
hf_overrides={
"architectures": ["VoyageQwen3BidirectionalEmbedModel"],
"num_labels": 2048,
},
mteb_score=0.7054,
# === MTEB Results ===
# STS12: 0.6613
# STS13: 0.6906
# STS14: 0.6556
# STS15: 0.7843
# STS16: 0.7340
# STSBenchmark: 0.7063
# Average score: 0.7054
),
]
@pytest.mark.parametrize("model_info", MODELS)
def test_embed_models_mteb(hf_runner, vllm_runner, model_info: EmbedModelInfo) -> None:
# Encoder-only attention models need enforce_eager=True to avoid
# CUDA graph capture issues with piecewise compilation
mteb_test_embed_models(
hf_runner, vllm_runner, model_info, vllm_extra_kwargs={"enforce_eager": True}
)
@pytest.mark.parametrize("model_info", MODELS)
def test_embed_models_correctness(
hf_runner, vllm_runner, model_info: EmbedModelInfo, example_prompts
) -> None:
correctness_test_embed_models(
hf_runner,
vllm_runner,
model_info,
example_prompts,
vllm_extra_kwargs={"enforce_eager": True},
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/models/language/pooling_mteb_test/test_voyage.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_executor/models/voyage.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from __future__ import annotations
from collections import defaultdict
from collections.abc import Iterable
import regex as re
import torch
import torch.nn as nn
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
from vllm.model_executor.models.qwen3 import Qwen3Model
from vllm.model_executor.models.utils import WeightsMapper
WeightItem = tuple[str, torch.Tensor]
_LAYER_RE = re.compile(r"^layers\.(\d+)\.(.+)$")
class VoyageQwen3BidirectionalEmbedModel(Qwen3Model):
"""
Qwen3Model + Voyage embedding head + bidirectional attention.
Checkpoint conventions (HF):
- MLP: gate_proj + up_proj (unfused)
- Attn: q_proj + k_proj + v_proj (unfused)
- Linear head: linear.weight
- Weights prefixed with "model." (e.g., model.layers.0...)
vLLM Qwen3Model expects:
- mlp.gate_up_proj (fused)
- self_attn.qkv_proj (fused)
- No "model." prefix
We remap/fuse weights using generator pipeline and load directly
(bypassing parent's stacked_params_mapping which would cause
double-transformation like qkv_proj -> qkqkv_proj).
"""
hf_to_vllm_mapper = WeightsMapper(orig_to_new_prefix={"model.": ""})
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Embedding head (hidden_size -> num_labels, bias=False)
self.linear = nn.Linear(
self.config.hidden_size,
self.config.num_labels,
bias=False,
)
def forward(self, *args, **kwargs):
out = super().forward(*args, **kwargs)
return self.linear(out)
def _fuse_qkv_proj(self, weights: Iterable[WeightItem]) -> Iterable[WeightItem]:
"""Fuse q_proj, k_proj, v_proj into qkv_proj."""
qkv_buf: dict[int, dict[str, torch.Tensor]] = defaultdict(dict)
qkv_suffixes = {
"self_attn.q_proj.weight": "q",
"self_attn.k_proj.weight": "k",
"self_attn.v_proj.weight": "v",
}
for name, tensor in weights:
m = _LAYER_RE.match(name)
if m and m.group(2) in qkv_suffixes:
layer_idx = int(m.group(1))
qkv_buf[layer_idx][qkv_suffixes[m.group(2)]] = tensor
else:
yield name, tensor
# Yield fused QKV weights
for layer_idx in sorted(qkv_buf.keys()):
parts = qkv_buf[layer_idx]
if all(p in parts for p in ("q", "k", "v")):
fused = torch.cat([parts["q"], parts["k"], parts["v"]], dim=0)
yield f"layers.{layer_idx}.self_attn.qkv_proj.weight", fused
elif parts:
missing = [p for p in ("q", "k", "v") if p not in parts]
raise ValueError(f"Layer {layer_idx} missing QKV parts: {missing}")
def _fuse_gate_up_proj(self, weights: Iterable[WeightItem]) -> Iterable[WeightItem]:
"""Fuse gate_proj and up_proj into gate_up_proj."""
mlp_buf: dict[int, dict[str, torch.Tensor]] = defaultdict(dict)
mlp_suffixes = {
"mlp.gate_proj.weight": "gate",
"mlp.up_proj.weight": "up",
}
for name, tensor in weights:
m = _LAYER_RE.match(name)
if m and m.group(2) in mlp_suffixes:
layer_idx = int(m.group(1))
mlp_buf[layer_idx][mlp_suffixes[m.group(2)]] = tensor
else:
yield name, tensor
# Yield fused gate_up weights
for layer_idx in sorted(mlp_buf.keys()):
parts = mlp_buf[layer_idx]
if all(p in parts for p in ("gate", "up")):
fused = torch.cat([parts["gate"], parts["up"]], dim=0)
yield f"layers.{layer_idx}.mlp.gate_up_proj.weight", fused
elif parts:
missing = [p for p in ("gate", "up") if p not in parts]
raise ValueError(f"Layer {layer_idx} missing MLP parts: {missing}")
def load_weights(self, weights: Iterable[WeightItem]) -> set[str]:
"""Remap, fuse, and load weights using generator pipeline."""
# Chain weight transformations
weights = self.hf_to_vllm_mapper.apply(weights)
weights = self._fuse_qkv_proj(weights)
weights = self._fuse_gate_up_proj(weights)
# Load weights directly into model parameters
# (bypass parent's stacked_params_mapping)
params_dict = dict(self.named_parameters())
loaded_params: set[str] = set()
for name, loaded_weight in weights:
if name not in params_dict:
continue
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
loaded_params.add(name)
return loaded_params
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/voyage.py",
"license": "Apache License 2.0",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/tracing/test_loading_tracing.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import asyncio
import pytest
from opentelemetry.sdk.environment_variables import OTEL_EXPORTER_OTLP_TRACES_INSECURE
from tests.tracing.conftest import FAKE_TRACE_SERVER_ADDRESS, FakeTraceService
from vllm.tracing import init_tracer, instrument, is_otel_available
# Skip everything if OTel is missing
pytestmark = pytest.mark.skipif(not is_otel_available(), reason="OTel required")
class TestCoreInstrumentation:
"""Focuses on the @instrument decorator's ability to capture execution data."""
@pytest.fixture(autouse=True)
def setup_tracing(self, monkeypatch):
monkeypatch.setenv(OTEL_EXPORTER_OTLP_TRACES_INSECURE, "true")
init_tracer("test.core", FAKE_TRACE_SERVER_ADDRESS)
def test_decorator_captures_sync_and_async(self, trace_service: FakeTraceService):
"""Verify basic span creation for both sync and async functions."""
@instrument(span_name="sync_task")
def sync_task():
return True
@instrument(span_name="async_task")
async def async_task():
return True
sync_task()
asyncio.run(async_task())
assert trace_service.wait_for_spans(count=2)
span_names = [s["name"] for s in trace_service.get_all_spans()]
assert "sync_task" in span_names
assert "async_task" in span_names
def test_nested_spans_hierarchy(self, trace_service: FakeTraceService):
"""Verify that nested calls create a parent-child relationship."""
@instrument(span_name="child")
def child():
pass
@instrument(span_name="parent")
def parent():
child()
parent()
assert trace_service.wait_for_spans(count=2)
spans = trace_service.get_all_spans()
parent_span = next(s for s in spans if s["name"] == "parent")
child_span = next(s for s in spans if s["name"] == "child")
assert child_span["parent_span_id"] == parent_span["span_id"]
class TestInterProcessPropagation:
"""Test the propagation of trace context between processes."""
def test_pickup_external_context(self, monkeypatch, trace_service):
"""Test that vLLM attaches to an existing trace ID if in environment."""
monkeypatch.setenv(OTEL_EXPORTER_OTLP_TRACES_INSECURE, "true")
# Manually simulate an external parent trace ID
fake_trace_id = "4bf92f3577b34da6a3ce929d0e0e4736"
fake_parent_id = "00f067aa0ba902b7"
monkeypatch.setenv("traceparent", f"00-{fake_trace_id}-{fake_parent_id}-01")
init_tracer("test.external", FAKE_TRACE_SERVER_ADDRESS)
@instrument(span_name="follower")
def follower_func():
pass
follower_func()
assert trace_service.wait_for_spans(count=1)
span = trace_service.get_all_spans()[0]
assert span["trace_id"] == fake_trace_id
assert span["parent_span_id"] == fake_parent_id
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/tracing/test_loading_tracing.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/tracing/otel.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import atexit
import functools
import inspect
import os
import traceback
from collections.abc import Mapping
from contextlib import contextmanager
from typing import Any
from vllm.logger import init_logger
from vllm.tracing.utils import TRACE_HEADERS, LoadingSpanAttributes
logger = init_logger(__name__)
try:
from opentelemetry import trace
from opentelemetry.context.context import Context
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (
OTLPSpanExporter as OTLPGrpcExporter,
)
from opentelemetry.exporter.otlp.proto.http.trace_exporter import (
OTLPSpanExporter as OTLPHttpExporter,
)
from opentelemetry.propagate import inject
from opentelemetry.sdk.environment_variables import (
OTEL_EXPORTER_OTLP_TRACES_PROTOCOL,
)
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor
from opentelemetry.trace import (
SpanKind, # noqa: F401
Tracer,
set_tracer_provider,
)
from opentelemetry.trace.propagation.tracecontext import (
TraceContextTextMapPropagator,
)
_IS_OTEL_AVAILABLE = True
otel_import_error_traceback = None
except ImportError:
_IS_OTEL_AVAILABLE = False
otel_import_error_traceback = traceback.format_exc()
trace = None # type: ignore
Context = Any # type: ignore
Tracer = Any # type: ignore
inject = None # type: ignore
Resource = None # type: ignore
SpanKind = Any # type: ignore
def is_otel_available() -> bool:
return _IS_OTEL_AVAILABLE
def init_otel_tracer(
instrumenting_module_name: str,
otlp_traces_endpoint: str,
extra_attributes: dict[str, str] | None = None,
) -> Tracer:
"""Initializes the OpenTelemetry tracer provider."""
if not _IS_OTEL_AVAILABLE:
raise ValueError(
"OpenTelemetry is not available. Unable to initialize "
"a tracer. Ensure OpenTelemetry packages are installed. "
f"Original error:\n{otel_import_error_traceback}"
)
# Store the endpoint in environment so child processes can inherit it
os.environ["OTEL_EXPORTER_OTLP_TRACES_ENDPOINT"] = otlp_traces_endpoint
resource_attrs = {}
resource_attrs["vllm.instrumenting_module_name"] = instrumenting_module_name
resource_attrs["vllm.process_id"] = str(os.getpid())
if extra_attributes:
resource_attrs.update(extra_attributes)
resource = Resource.create(resource_attrs)
trace_provider = TracerProvider(resource=resource)
span_exporter = get_span_exporter(otlp_traces_endpoint)
trace_provider.add_span_processor(BatchSpanProcessor(span_exporter))
set_tracer_provider(trace_provider)
atexit.register(trace_provider.shutdown)
tracer = trace_provider.get_tracer(instrumenting_module_name)
return tracer
def get_span_exporter(endpoint):
protocol = os.environ.get(OTEL_EXPORTER_OTLP_TRACES_PROTOCOL, "grpc")
if protocol == "grpc":
exporter = OTLPGrpcExporter(endpoint=endpoint, insecure=True)
elif protocol == "http/protobuf":
exporter = OTLPHttpExporter(endpoint=endpoint)
else:
raise ValueError(f"Unsupported OTLP protocol '{protocol}' is configured")
return exporter
def init_otel_worker_tracer(
instrumenting_module_name: str,
process_kind: str,
process_name: str,
) -> Tracer:
"""
Backend-specific initialization for OpenTelemetry in a worker process.
"""
# Initialize the tracer if an OTLP endpoint is configured.
# The endpoint is propagated via environment variable from the main process.
otlp_endpoint = os.environ.get("OTEL_EXPORTER_OTLP_TRACES_ENDPOINT")
if not otlp_endpoint:
return None
extra_attrs = {
"vllm.process_kind": process_kind,
"vllm.process_name": process_name,
}
return init_otel_tracer(instrumenting_module_name, otlp_endpoint, extra_attrs)
def extract_trace_context(headers: Mapping[str, str] | None) -> Context | None:
"""Extracts context from HTTP headers."""
if _IS_OTEL_AVAILABLE and headers:
return TraceContextTextMapPropagator().extract(headers)
return None
def instrument_otel(func, span_name, attributes, record_exception):
"""Internal wrapper logic for sync and async functions."""
# Pre-calculate static code attributes once (these don't change)
code_attrs = {
LoadingSpanAttributes.CODE_FUNCTION: func.__qualname__,
LoadingSpanAttributes.CODE_NAMESPACE: func.__module__,
LoadingSpanAttributes.CODE_FILEPATH: func.__code__.co_filename,
LoadingSpanAttributes.CODE_LINENO: str(func.__code__.co_firstlineno),
}
if attributes:
code_attrs.update(attributes)
final_span_name = span_name or func.__qualname__
module_name = func.__module__
@functools.wraps(func)
async def async_wrapper(*args, **kwargs):
tracer = trace.get_tracer(module_name)
ctx = _get_smart_context()
with (
tracer.start_as_current_span(
final_span_name,
context=ctx,
attributes=code_attrs,
record_exception=record_exception,
),
propagate_trace_to_env(),
):
return await func(*args, **kwargs)
@functools.wraps(func)
def sync_wrapper(*args, **kwargs):
tracer = trace.get_tracer(module_name)
ctx = _get_smart_context()
with (
tracer.start_as_current_span(
final_span_name,
context=ctx,
attributes=code_attrs,
record_exception=record_exception,
),
propagate_trace_to_env(),
):
return func(*args, **kwargs)
return async_wrapper if inspect.iscoroutinefunction(func) else sync_wrapper
def manual_instrument_otel(
span_name: str,
start_time: int,
end_time: int | None = None,
attributes: dict[str, Any] | None = None,
context: Context | None = None,
kind: Any = None, # SpanKind, but typed as Any for when OTEL unavailable
):
"""Manually create and end a span with explicit timestamps."""
if not _IS_OTEL_AVAILABLE:
return
tracer = trace.get_tracer(__name__)
# Use provided context, or fall back to smart context detection
ctx = context if context is not None else _get_smart_context()
span_kwargs: dict[str, Any] = {
"name": span_name,
"context": ctx,
"start_time": start_time,
}
if kind is not None:
span_kwargs["kind"] = kind
span = tracer.start_span(**span_kwargs)
if attributes:
span.set_attributes(attributes)
if end_time is not None:
span.end(end_time=end_time)
else:
span.end()
def _get_smart_context() -> Context | None:
"""
Determines the parent context.
1. If a Span is already active in this process, use it.
2. If not, extract from os.environ, handling the case-sensitivity mismatch.
"""
current_span = trace.get_current_span()
if current_span.get_span_context().is_valid:
return None
carrier = {}
if tp := os.environ.get("traceparent", os.environ.get("TRACEPARENT")): # noqa: SIM112
carrier["traceparent"] = tp
if ts := os.environ.get("tracestate", os.environ.get("TRACESTATE")): # noqa: SIM112
carrier["tracestate"] = ts
if not carrier:
carrier = dict(os.environ)
return TraceContextTextMapPropagator().extract(carrier)
@contextmanager
def propagate_trace_to_env():
"""
Temporarily injects the current OTel context into os.environ.
This ensures that any subprocesses (like vLLM workers) spawned
within this context inherit the correct traceparent.
"""
if not _IS_OTEL_AVAILABLE:
yield
return
# Capture original state of relevant keys
original_state = {k: os.environ.get(k) for k in TRACE_HEADERS}
try:
# inject() writes 'traceparent' and 'tracestate' to os.environ
inject(os.environ)
yield
finally:
# Restore original environment
for key, original_value in original_state.items():
if original_value is None:
os.environ.pop(key, None)
else:
os.environ[key] = original_value
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/tracing/otel.py",
"license": "Apache License 2.0",
"lines": 219,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/tracing/utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Mapping
from vllm.logger import init_logger
from vllm.utils.func_utils import run_once
logger = init_logger(__name__)
# Standard W3C headers used for context propagation
TRACE_HEADERS = ["traceparent", "tracestate"]
class SpanAttributes:
"""
Standard attributes for spans.
These are largely based on OpenTelemetry Semantic Conventions but are defined
here as constants so they can be used by any backend or logger.
"""
# Attribute names copied from OTel semantic conventions to avoid version conflicts
GEN_AI_USAGE_COMPLETION_TOKENS = "gen_ai.usage.completion_tokens"
GEN_AI_USAGE_PROMPT_TOKENS = "gen_ai.usage.prompt_tokens"
GEN_AI_REQUEST_MAX_TOKENS = "gen_ai.request.max_tokens"
GEN_AI_REQUEST_TOP_P = "gen_ai.request.top_p"
GEN_AI_REQUEST_TEMPERATURE = "gen_ai.request.temperature"
GEN_AI_RESPONSE_MODEL = "gen_ai.response.model"
# Custom attributes added until they are standardized
GEN_AI_REQUEST_ID = "gen_ai.request.id"
GEN_AI_REQUEST_N = "gen_ai.request.n"
GEN_AI_USAGE_NUM_SEQUENCES = "gen_ai.usage.num_sequences"
GEN_AI_LATENCY_TIME_IN_QUEUE = "gen_ai.latency.time_in_queue"
GEN_AI_LATENCY_TIME_TO_FIRST_TOKEN = "gen_ai.latency.time_to_first_token"
GEN_AI_LATENCY_E2E = "gen_ai.latency.e2e"
GEN_AI_LATENCY_TIME_IN_SCHEDULER = "gen_ai.latency.time_in_scheduler"
# Latency breakdowns
GEN_AI_LATENCY_TIME_IN_MODEL_FORWARD = "gen_ai.latency.time_in_model_forward"
GEN_AI_LATENCY_TIME_IN_MODEL_EXECUTE = "gen_ai.latency.time_in_model_execute"
GEN_AI_LATENCY_TIME_IN_MODEL_PREFILL = "gen_ai.latency.time_in_model_prefill"
GEN_AI_LATENCY_TIME_IN_MODEL_DECODE = "gen_ai.latency.time_in_model_decode"
GEN_AI_LATENCY_TIME_IN_MODEL_INFERENCE = "gen_ai.latency.time_in_model_inference"
class LoadingSpanAttributes:
"""Custom attributes for code-level tracing (file, line number)."""
CODE_NAMESPACE = "code.namespace"
CODE_FUNCTION = "code.function"
CODE_FILEPATH = "code.filepath"
CODE_LINENO = "code.lineno"
def contains_trace_headers(headers: Mapping[str, str]) -> bool:
"""Check if the provided headers dictionary contains trace context."""
return any(h in headers for h in TRACE_HEADERS)
def extract_trace_headers(headers: Mapping[str, str]) -> Mapping[str, str]:
"""
Extract only trace-related headers from a larger header dictionary.
Useful for logging or passing context to a non-OTel client.
"""
return {h: headers[h] for h in TRACE_HEADERS if h in headers}
@run_once
def log_tracing_disabled_warning() -> None:
logger.warning("Received a request with trace context but tracing is disabled")
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/tracing/utils.py",
"license": "Apache License 2.0",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/multimodal/media/test_connector.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import asyncio
import base64
import mimetypes
import os
from tempfile import NamedTemporaryFile, TemporaryDirectory
import aiohttp
import numpy as np
import pytest
import requests
import torch
from PIL import Image, ImageChops
from vllm.multimodal.image import convert_image_mode
from vllm.multimodal.inputs import PlaceholderRange
from vllm.multimodal.media import MediaConnector
# Test different image extensions (JPG/PNG) and formats (gray/RGB/RGBA)
TEST_IMAGE_ASSETS = [
"2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", # "https://vllm-public-assets.s3.us-west-2.amazonaws.com/vision_model_images/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
"Grayscale_8bits_palette_sample_image.png", # "https://vllm-public-assets.s3.us-west-2.amazonaws.com/vision_model_images/Grayscale_8bits_palette_sample_image.png",
"1280px-Venn_diagram_rgb.svg.png", # "https://vllm-public-assets.s3.us-west-2.amazonaws.com/vision_model_images/1280px-Venn_diagram_rgb.svg.png",
"RGBA_comp.png", # "https://vllm-public-assets.s3.us-west-2.amazonaws.com/vision_model_images/RGBA_comp.png",
]
TEST_VIDEO_URLS = [
"https://www.bogotobogo.com/python/OpenCV_Python/images/mean_shift_tracking/slow_traffic_small.mp4",
"https://github.com/opencv/opencv/raw/refs/tags/4.12.0/samples/data/vtest.avi",
]
@pytest.fixture(scope="module")
def url_images(local_asset_server) -> dict[str, Image.Image]:
return {
image_url: local_asset_server.get_image_asset(image_url)
for image_url in TEST_IMAGE_ASSETS
}
def get_supported_suffixes() -> tuple[str, ...]:
# We should at least test the file types mentioned in GPT-4 with Vision
OPENAI_SUPPORTED_SUFFIXES = (".png", ".jpeg", ".jpg", ".webp", ".gif")
# Additional file types that are supported by us
EXTRA_SUPPORTED_SUFFIXES = (".bmp", ".tiff")
return OPENAI_SUPPORTED_SUFFIXES + EXTRA_SUPPORTED_SUFFIXES
def _image_equals(a: Image.Image, b: Image.Image) -> bool:
return (np.asarray(a) == np.asarray(convert_image_mode(b, a.mode))).all()
@pytest.mark.asyncio
@pytest.mark.parametrize("image_url", TEST_IMAGE_ASSETS, indirect=True)
async def test_fetch_image_http(image_url: str):
connector = MediaConnector()
image_sync = connector.fetch_image(image_url)
image_async = await connector.fetch_image_async(image_url)
assert _image_equals(image_sync, image_async)
@pytest.mark.asyncio
@pytest.mark.parametrize("raw_image_url", TEST_IMAGE_ASSETS)
@pytest.mark.parametrize("suffix", get_supported_suffixes())
async def test_fetch_image_base64(
url_images: dict[str, Image.Image], raw_image_url: str, suffix: str
):
connector = MediaConnector(
# Domain restriction should not apply to data URLs.
allowed_media_domains=[
"www.bogotobogo.com",
"github.com",
]
)
url_image = url_images[raw_image_url]
try:
mime_type = Image.MIME[Image.registered_extensions()[suffix]]
except KeyError:
try:
mime_type = mimetypes.types_map[suffix]
except KeyError:
pytest.skip("No MIME type")
with NamedTemporaryFile(suffix=suffix) as f:
try:
url_image.save(f.name)
except Exception as e:
if e.args[0] == "cannot write mode RGBA as JPEG":
pytest.skip("Conversion not supported")
raise
base64_image = base64.b64encode(f.read()).decode("utf-8")
data_url = f"data:{mime_type};base64,{base64_image}"
data_image_sync = connector.fetch_image(data_url)
if _image_equals(url_image, Image.open(f)):
assert _image_equals(url_image, data_image_sync)
else:
pass # Lossy format; only check that image can be opened
data_image_async = await connector.fetch_image_async(data_url)
assert _image_equals(data_image_sync, data_image_async)
@pytest.mark.asyncio
@pytest.mark.parametrize("image_url", TEST_IMAGE_ASSETS, indirect=True)
async def test_fetch_image_local_files(image_url: str):
connector = MediaConnector()
with TemporaryDirectory() as temp_dir:
local_connector = MediaConnector(allowed_local_media_path=temp_dir)
origin_image = connector.fetch_image(image_url)
origin_image.save(
os.path.join(temp_dir, os.path.basename(image_url)),
quality=100,
icc_profile=origin_image.info.get("icc_profile"),
)
image_async = await local_connector.fetch_image_async(
f"file://{temp_dir}/{os.path.basename(image_url)}"
)
image_sync = local_connector.fetch_image(
f"file://{temp_dir}/{os.path.basename(image_url)}"
)
# Check that the images are equal
assert not ImageChops.difference(image_sync, image_async).getbbox()
with pytest.raises(ValueError, match="must be a subpath"):
await local_connector.fetch_image_async(
f"file://{temp_dir}/../{os.path.basename(image_url)}"
)
with pytest.raises(RuntimeError, match="Cannot load local files"):
await connector.fetch_image_async(
f"file://{temp_dir}/../{os.path.basename(image_url)}"
)
with pytest.raises(ValueError, match="must be a subpath"):
local_connector.fetch_image(
f"file://{temp_dir}/../{os.path.basename(image_url)}"
)
with pytest.raises(RuntimeError, match="Cannot load local files"):
connector.fetch_image(f"file://{temp_dir}/../{os.path.basename(image_url)}")
@pytest.mark.asyncio
@pytest.mark.parametrize("image_url", [TEST_IMAGE_ASSETS[0]], indirect=True)
async def test_fetch_image_local_files_with_space_in_name(image_url: str):
connector = MediaConnector()
with TemporaryDirectory() as temp_dir:
local_connector = MediaConnector(allowed_local_media_path=temp_dir)
origin_image = connector.fetch_image(image_url)
filename = "file name with space.jpg"
origin_image.save(
os.path.join(temp_dir, filename),
quality=100,
icc_profile=origin_image.info.get("icc_profile"),
)
try:
image_async = await local_connector.fetch_image_async(
f"file://{temp_dir}/{filename}"
)
image_sync = local_connector.fetch_image(f"file://{temp_dir}/{filename}")
except FileNotFoundError as e:
pytest.fail("Failed to fetch image with space in name: {}".format(e))
# Check that the images are equal
assert not ImageChops.difference(image_sync, image_async).getbbox()
@pytest.mark.asyncio
async def test_fetch_image_error_conversion():
connector = MediaConnector()
broken_img = "data:image/png;base64,aGVsbG9fdmxsbV9jb21tdW5pdHkK"
# PIL.UnidentifiedImageError should be converted to ValueError
with pytest.raises(ValueError):
await connector.fetch_image_async(broken_img)
with pytest.raises(ValueError):
connector.fetch_image(broken_img)
@pytest.mark.flaky(reruns=3, reruns_delay=5)
@pytest.mark.asyncio
@pytest.mark.parametrize("video_url", TEST_VIDEO_URLS)
@pytest.mark.parametrize("num_frames", [-1, 32, 1800])
async def test_fetch_video_http(video_url: str, num_frames: int):
connector = MediaConnector(
media_io_kwargs={
"video": {
"num_frames": num_frames,
}
}
)
try:
video_sync, metadata_sync = connector.fetch_video(video_url)
video_async, metadata_async = await connector.fetch_video_async(video_url)
except (TimeoutError, asyncio.TimeoutError) as e:
pytest.skip(f"Timeout fetching video (CI network flakiness): {e}")
assert np.array_equal(video_sync, video_async)
assert metadata_sync == metadata_async
@pytest.mark.asyncio
@pytest.mark.parametrize("video_url", TEST_VIDEO_URLS)
@pytest.mark.parametrize("max_duration", [1, 60, 1800])
@pytest.mark.parametrize("requested_fps", [2, 24])
async def test_fetch_video_http_with_dynamic_loader(
video_url: str,
max_duration: int,
requested_fps: int,
monkeypatch: pytest.MonkeyPatch,
):
with monkeypatch.context() as m:
m.setenv("VLLM_VIDEO_LOADER_BACKEND", "opencv_dynamic")
connector = MediaConnector(
media_io_kwargs={
"video": {
"max_duration": max_duration,
"requested_fps": requested_fps,
}
}
)
video_sync, metadata_sync = connector.fetch_video(video_url)
video_async, metadata_async = await connector.fetch_video_async(video_url)
assert np.array_equal(video_sync, video_async)
assert metadata_sync == metadata_async
assert metadata_sync["video_backend"] == "opencv_dynamic"
@pytest.mark.parametrize(
"is_embed,start_idx,end_idx,expected",
[
(None, 2, 4, (2, 4)),
(
torch.tensor([False, True, False, True, True]),
3,
5,
(1, 3),
),
(
torch.tensor([False, True, False, True, True]),
0,
2,
(0, 1),
),
(
torch.tensor([True, False, True, False]),
2,
2,
(1, 1),
),
],
)
def test_placeholder_range_get_embeds_indices_in_range(
is_embed, start_idx, end_idx, expected
):
length = len(is_embed) if is_embed is not None else 5
pr = PlaceholderRange(offset=0, length=length, is_embed=is_embed)
assert pr.get_embeds_indices_in_range(start_idx, end_idx) == expected
@pytest.mark.parametrize(
"offset,is_embed,expected",
[
(0, None, [(0, 4)]),
(
2,
torch.tensor([False, True, False, True, True]),
[(3, 3), (5, 6)],
),
(0, torch.tensor([True, True, True, True]), [(0, 3)]),
(0, torch.tensor([False, False, False, False]), []),
],
)
def test_placeholder_range_extract_embeds_range(offset, is_embed, expected):
length = len(is_embed) if is_embed is not None else 5
pr = PlaceholderRange(offset=offset, length=length, is_embed=is_embed)
assert pr.extract_embeds_range() == expected
@pytest.mark.asyncio
@pytest.mark.parametrize("video_url", TEST_VIDEO_URLS)
@pytest.mark.parametrize("num_frames", [-1, 32, 1800])
async def test_allowed_media_domains(video_url: str, num_frames: int):
connector = MediaConnector(
media_io_kwargs={
"video": {
"num_frames": num_frames,
}
},
allowed_media_domains=[
"www.bogotobogo.com",
"github.com",
],
)
video_sync, metadata_sync = connector.fetch_video(video_url)
video_async, metadata_async = await connector.fetch_video_async(video_url)
assert np.array_equal(video_sync, video_async)
assert metadata_sync == metadata_async
disallowed_url = "https://upload.wikimedia.org/wikipedia/commons/4/47/PNG_transparency_demonstration_1.png"
with pytest.raises(ValueError):
_, _ = connector.fetch_video(disallowed_url)
with pytest.raises(ValueError):
_, _ = await connector.fetch_video_async(disallowed_url)
@pytest.mark.asyncio
async def test_ssrf_bypass_backslash_in_url(local_asset_server):
"""Verify that backslash-@ URL parsing confusion cannot bypass the
allowed_media_domains check (GHSA-v359-jj2v-j536).
urllib3.parse_url() and aiohttp/yarl disagree on how to parse a
backslash before ``@``. urllib3 treats ``\\`` as part of the path
(encoding it as ``%5C``), while yarl treats it as a userinfo
separator, changing the effective host. The fix normalises the URL
through urllib3 *before* handing it to aiohttp so both layers agree.
"""
port = local_asset_server.port
asset = TEST_IMAGE_ASSETS[0]
# Craft the bypass payload: urllib3 sees host=127.0.0.1, but an
# un-patched aiohttp would see host=example.com.
bypass_url = f"http://127.0.0.1:{port}\\@example.com/{asset}"
connector = MediaConnector(
allowed_media_domains=["127.0.0.1"],
)
# After the fix the request is made to 127.0.0.1 (the local asset
# server) using the normalised URL. The normalised path will be
# /%5C@example.com/<asset> which won't match any file the server
# knows about, so we expect an HTTP error — but crucially NOT a
# successful fetch from example.com.
with pytest.raises(requests.exceptions.HTTPError):
connector.fetch_image(bypass_url)
with pytest.raises(aiohttp.ClientResponseError):
await connector.fetch_image_async(bypass_url)
@pytest.mark.asyncio
async def test_ssrf_bypass_backslash_disallowed_domain():
"""The reverse direction: even when the *attacker-controlled* host
appears in the urllib3-parsed hostname position the allowlist must
still block it.
"""
# urllib3.parse_url sees host=example.com which is NOT in the
# allowlist, so this must be rejected before any request is made.
bypass_url = "https://example.com\\@safe.example.org/image.png"
connector = MediaConnector(
allowed_media_domains=["safe.example.org"],
)
with pytest.raises(ValueError, match="allowed domains"):
connector.fetch_image(bypass_url)
with pytest.raises(ValueError, match="allowed domains"):
await connector.fetch_image_async(bypass_url)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/multimodal/media/test_connector.py",
"license": "Apache License 2.0",
"lines": 308,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:examples/offline_inference/new_weight_syncing/rlhf_async_new_apis.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Demonstrates async reinforcement learning using vLLM and Ray,
with native weight syncing APIs at engine instance.
The script separates training and inference workloads onto distinct GPUs
so that Ray can manage process placement and inter-process communication.
A Hugging Face Transformer model occupies one GPU for training, whereas a
2x tensor-parallel vLLM inference engine occupies two GPUs.
The example performs the following steps:
* Load the training model on one gpu (scheduled via ray)
* Initialize the inference model with dummy weights across
two gpus using vLLM's tensor parallelism and Ray placement groups.
* Generate gibberish from a list of prompts using the randomly initialized
inference engine.
* Pause generation once generation completes for one sequence
* Update the weights of the training model and broadcast the updated weights
to the inference engine by using a Ray collective RPC group.
* Resume generation and print out the results
This example assumes a single-node cluster with three GPUs, but Ray
supports multi-node clusters. vLLM expects the GPUs are only used for vLLM
workloads. Residual GPU activity interferes with vLLM memory profiling and
causes unexpected behavior.
"""
import asyncio
import uuid
from dataclasses import asdict
import ray
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
import vllm
from vllm import SamplingParams
from vllm.config import WeightTransferConfig
from vllm.distributed.weight_transfer.base import (
WeightTransferInitRequest,
WeightTransferUpdateRequest,
)
from vllm.distributed.weight_transfer.nccl_engine import (
NCCLTrainerSendWeightsArgs,
NCCLWeightTransferEngine,
NCCLWeightTransferInitInfo,
NCCLWeightTransferUpdateInfo,
)
from vllm.utils.network_utils import get_ip, get_open_port
from vllm.v1.executor import Executor
MODEL_NAME_V1 = "Qwen/Qwen3-1.7B-Base"
MODEL_NAME_V2 = "Qwen/Qwen3-1.7B"
PAUSE_TOKEN_THRESHOLD = 10
class MyLLM(vllm.AsyncLLMEngine):
"""Configure the vLLM worker for Ray placement group execution."""
def __init__(self, **kwargs):
engine_args = vllm.AsyncEngineArgs(**kwargs)
vllm_config = engine_args.create_engine_config()
executor_class = Executor.get_class(vllm_config)
super().__init__(
vllm_config=vllm_config,
executor_class=executor_class,
log_requests=engine_args.enable_log_requests,
log_stats=not engine_args.disable_log_stats,
)
self._generation_paused = False
self._request_pause_flag = False
async def do_generate(
self, prompt_token_ids: list[int], sampling_params: vllm.SamplingParams
) -> tuple[vllm.RequestOutput, int]:
"""Generate a single request, setting the request pause flag once the
token count reaches the threshold.
Returns (output, pause_token_index). pause_token_index is the number
of tokens generated before the weight change, or -1 if no pause.
"""
pause_token_index = -1
prev_token_count = 0
async for request_output in self.generate(
{"prompt_token_ids": prompt_token_ids},
sampling_params,
request_id=str(uuid.uuid4()),
):
output = request_output
cur_token_count = len(output.outputs[0].token_ids)
if (
cur_token_count >= PAUSE_TOKEN_THRESHOLD
and not self._request_pause_flag
):
self._request_pause_flag = True
if self._generation_paused and pause_token_index == -1:
pause_token_index = prev_token_count
prev_token_count = cur_token_count
return output, pause_token_index
async def pause_after_n_tokens(self):
"""Wait for any request to set the pause flag, then pause."""
while not self._request_pause_flag:
await asyncio.sleep(0)
await super().pause_generation(mode="keep")
await asyncio.sleep(0.2)
self._generation_paused = True
@ray.remote(num_gpus=1)
class TrainModel:
"""Ray actor that wraps the training model on a dedicated GPU."""
def __init__(self, model_name: str):
from vllm.model_executor.layers.batch_invariant import (
init_batch_invariance,
)
from vllm.v1.attention.backends.registry import AttentionBackendEnum
# need to init all env vars for batch invariance which affect nccl ops
init_batch_invariance(AttentionBackendEnum.FLASH_ATTN)
self.model = AutoModelForCausalLM.from_pretrained(
model_name, dtype=torch.bfloat16
).to("cuda:0")
self.port = get_open_port()
self.master_address = get_ip()
def get_master_address_and_port(self):
return self.master_address, self.port
def get_weight_metadata(self):
"""Return weight names, dtypes, and shapes for weight transfer."""
names = []
dtype_names = []
shapes = []
for name, p in self.model.named_parameters():
names.append(name)
dtype_names.append(str(p.dtype).split(".")[-1])
shapes.append(list(p.shape))
return names, dtype_names, shapes
def init_weight_transfer_group(self, world_size):
"""Initialize the NCCL process group for weight transfer."""
self.model_update_group = NCCLWeightTransferEngine.trainer_init(
dict(
master_address=self.master_address,
master_port=self.port,
world_size=world_size,
),
)
def broadcast_weights(self, packed: bool = True):
"""Broadcast weights to the inference engine."""
trainer_args = NCCLTrainerSendWeightsArgs(
group=self.model_update_group,
packed=packed,
)
NCCLWeightTransferEngine.trainer_send_weights(
iterator=self.model.named_parameters(),
trainer_args=trainer_args,
)
@torch.inference_mode()
def generate(self, token_ids: list[int], max_new_tokens: int) -> list[int]:
"""Greedy-decode max_new_tokens from the given context."""
input_ids = torch.tensor([token_ids], device="cuda:0")
output = self.model.generate(
input_ids,
max_new_tokens=max_new_tokens,
do_sample=False,
)
new_token_ids = output[0, len(token_ids) :].tolist()
return new_token_ids
ray.init(
runtime_env={
"env_vars": {
# enable batch invariance for deterministic outputs
"VLLM_BATCH_INVARIANT": "1",
# prevent ray from setting CUDA_VISIBLE_DEVICES
"RAY_EXPERIMENTAL_NOSET_CUDA_ENV_VAR": "1",
}
}
)
# Launch the training model actor. Ray's resource scheduler will allocate
# 1 GPU (via num_gpus=1 in the decorator), ensuring pg_inference gets different GPUs.
train_model = TrainModel.remote(MODEL_NAME_V2)
# Launch the vLLM inference engine. The `enforce_eager` flag reduces
# start-up latency.
# With data_parallel_backend="ray", vLLM's CoreEngineActorManager creates
# its own placement groups internally for each DP rank, so we must NOT
# create an outer placement group (it would reserve GPUs and hide them
# from the internal DP resource check).
llm = ray.remote(
num_cpus=0,
num_gpus=0,
)(MyLLM).remote(
model=MODEL_NAME_V1,
enforce_eager=True,
max_model_len=8192,
distributed_executor_backend="ray",
attention_backend="FLASH_ATTN",
gpu_memory_utilization=0.75,
weight_transfer_config=WeightTransferConfig(backend="nccl"),
)
PROMPTS = [
"The president of the United States is",
"The capital of France is",
"The largest ocean on Earth is",
"The speed of light in a vacuum is",
"The chemical formula for water is",
"The tallest mountain in the world is",
"The first person to walk on the moon was",
"The Great Wall of China was built to",
"Photosynthesis is the process by which",
"The theory of general relativity was proposed by",
"The boiling point of water at sea level is",
"The largest planet in our solar system is",
"DNA stands for deoxyribonucleic acid and it",
]
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME_V1)
batch_prompt_token_ids = [
tokenizer.encode(prompt, add_special_tokens=False) for prompt in PROMPTS
]
# Set up the communication channel between the training process and the
# inference engine.
master_address, master_port = ray.get(train_model.get_master_address_and_port.remote())
world_size = 2 # 1 trainer + 1 inference worker
inference_handle = llm.init_weight_transfer_engine.remote(
WeightTransferInitRequest(
init_info=asdict(
NCCLWeightTransferInitInfo(
master_address=master_address,
master_port=master_port,
rank_offset=1,
world_size=world_size,
)
)
)
)
# Initialize weight transfer group on both the training actor and inference engine
train_handle = train_model.init_weight_transfer_group.remote(world_size)
ray.get([train_handle, inference_handle])
N_NEW_TOKENS = 100
# Collect weight metadata once
names, dtype_names, shapes = ray.get(train_model.get_weight_metadata.remote())
# ── Phase 1: concurrent requests with weight sync ───────────────────
print(f"\n{'=' * 50}")
print(f"Prompts ({len(PROMPTS)}):")
for p in PROMPTS:
print(f" - {p!r}")
print(f"{'=' * 50}")
sampling_params = SamplingParams(
temperature=0, max_tokens=PAUSE_TOKEN_THRESHOLD + N_NEW_TOKENS
)
gen_futures = [
llm.do_generate.remote(ptids, sampling_params) for ptids in batch_prompt_token_ids
]
ray.get(llm.pause_after_n_tokens.remote())
inference_handle = llm.update_weights.remote(
WeightTransferUpdateRequest(
update_info=asdict(
NCCLWeightTransferUpdateInfo(
names=names,
dtype_names=dtype_names,
shapes=shapes,
packed=True,
)
)
)
)
train_handle = train_model.broadcast_weights.remote(packed=True)
ray.get([train_handle, inference_handle])
ray.get(llm.resume_generation.remote())
results = ray.get(gen_futures)
for i, (output, pause_idx) in enumerate(results):
all_token_ids = list(output.outputs[0].token_ids)
before_text = tokenizer.decode(all_token_ids[:pause_idx])
after_text = tokenizer.decode(all_token_ids[pause_idx:])
print(f"\n Request {i} ({PROMPTS[i]!r}):")
print(f" Old weights ({pause_idx} tokens): {before_text!r}")
n_after = len(all_token_ids) - pause_idx
print(f" New weights ({n_after} tokens): {after_text!r}")
# ── Phase 2: validate with a fresh V2 vLLM instance ────────────────
print(f"\n{'=' * 50}")
print("VALIDATION: comparing weight-synced vLLM with fresh V2 instance")
print(f"{'=' * 50}")
ray.get(llm.shutdown.remote())
ray.kill(llm)
ray.kill(train_model)
llm_v2 = ray.remote(
num_cpus=0,
num_gpus=0,
)(MyLLM).remote(
model=MODEL_NAME_V2,
enforce_eager=True,
max_model_len=8192,
gpu_memory_utilization=0.75,
distributed_executor_backend="ray",
attention_backend="FLASH_ATTN",
)
val_futures = [
llm_v2.do_generate.remote(
list(output.prompt_token_ids) + list(output.outputs[0].token_ids)[:pause_idx],
SamplingParams(
temperature=0, max_tokens=len(output.outputs[0].token_ids) - pause_idx
),
)
for output, pause_idx in results
]
val_results = ray.get(val_futures)
all_pass = True
for i, ((output, pause_idx), (val_output, _)) in enumerate(zip(results, val_results)):
expected = list(output.outputs[0].token_ids)[pause_idx:]
actual = list(val_output.outputs[0].token_ids)
match = actual == expected
if match:
print(f" [PASS] {PROMPTS[i]!r}")
else:
all_pass = False
print(f" [FAIL] {PROMPTS[i]!r}")
print(f" weight-synced vLLM: {tokenizer.decode(expected)!r}")
print(f" V2 vLLM: {tokenizer.decode(actual)!r}")
for j, (e, a) in enumerate(zip(expected, actual)):
if e != a:
print(
f" first divergence at output token {j}: "
f"expected {e} ({tokenizer.decode([e])!r}) vs "
f"actual {a} ({tokenizer.decode([a])!r})"
)
break
ray.get(llm_v2.shutdown.remote())
ray.kill(llm_v2)
assert all_pass, "Some prompts failed validation, see above for details"
print("=" * 50)
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/offline_inference/new_weight_syncing/rlhf_async_new_apis.py",
"license": "Apache License 2.0",
"lines": 313,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/distributed/test_packed_tensor.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Tests for packed tensor broadcasting functionality.
Unit tests for packed_broadcast_producer and packed_broadcast_consumer.
These utilities enable efficient batched tensor transfer over NCCL.
"""
import pytest
import torch
from vllm.distributed.weight_transfer.nccl_engine import NCCLWeightTransferUpdateInfo
from vllm.distributed.weight_transfer.packed_tensor import (
packed_broadcast_consumer,
packed_broadcast_producer,
)
class MockCommunicationGroup:
"""Mock communication group for testing producer broadcast operations."""
def __init__(self):
self.broadcasted_tensors: list[torch.Tensor] = []
self.broadcast_count = 0
self.device = torch.device("cuda:0")
def broadcast(self, tensor, src):
"""Mock broadcast that stores the tensor for later verification."""
self.broadcasted_tensors.append(tensor.clone())
self.broadcast_count += 1
class MockConsumerCommunicationGroup:
"""Mock communication group for consumer that returns pre-stored tensors."""
def __init__(self, tensors_to_return: list[torch.Tensor]):
self.tensors_to_return = tensors_to_return
self.current_index = 0
self.device = torch.device("cuda:0")
def broadcast(self, tensor, src):
"""Mock broadcast that fills the tensor with pre-stored data."""
if self.current_index < len(self.tensors_to_return):
tensor.copy_(self.tensors_to_return[self.current_index])
self.current_index += 1
def create_mock_model_params(
num_layers: int = 3,
dtype: torch.dtype = torch.float32,
) -> list[tuple[str, torch.Tensor]]:
"""Create mock model parameters for testing."""
params = []
for i in range(num_layers):
params.append((f"layer{i}.weight", torch.randn(10, 20, dtype=dtype)))
params.append((f"layer{i}.bias", torch.randn(10, dtype=dtype)))
return params
def create_state_dict_info(
params: list[tuple[str, torch.Tensor]],
) -> dict[str, tuple[tuple[int, ...], torch.dtype]]:
"""Create state dict info (name -> (shape, dtype)) from params."""
return {name: (tuple(tensor.shape), tensor.dtype) for name, tensor in params}
# --- Unit Tests: NCCLWeightTransferUpdateInfo packed field ---
class TestNCCLWeightTransferUpdateInfoPacked:
"""Test NCCLWeightTransferUpdateInfo dataclass packed field."""
def test_packed_default_false(self):
"""Test that packed defaults to False."""
info = NCCLWeightTransferUpdateInfo(
names=["layer.weight"],
dtype_names=["float32"],
shapes=[[10, 10]],
)
assert info.packed is False
def test_packed_can_be_set_true(self):
"""Test that packed can be set to True."""
info = NCCLWeightTransferUpdateInfo(
names=["layer.weight"],
dtype_names=["float32"],
shapes=[[10, 10]],
packed=True,
)
assert info.packed is True
# --- Unit Tests: packed_broadcast_producer ---
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
class TestPackedBroadcastProducer:
"""Test packed_broadcast_producer function."""
def test_producer_broadcasts_tensors(self):
"""Test that producer broadcasts all tensors."""
params = create_mock_model_params()
params_cuda = [(name, tensor.cuda()) for name, tensor in params]
mock_group = MockCommunicationGroup()
# Use a small target size to force multiple batches
packed_broadcast_producer(
iterator=iter(params_cuda),
group=mock_group,
src=0,
post_iter_func=lambda x: x[1],
buffer_size_bytes=500,
)
# Should have broadcasted some tensors
assert mock_group.broadcast_count > 0
assert len(mock_group.broadcasted_tensors) > 0
def test_producer_single_large_tensor(self):
"""Test with a single tensor larger than target size."""
# Create a large tensor
large_tensor = torch.randn(1000, 1000, dtype=torch.float32).cuda()
params = [("large_weight", large_tensor)]
mock_group = MockCommunicationGroup()
# Small target size to force the tensor to exceed it
packed_broadcast_producer(
iterator=iter(params),
group=mock_group,
src=0,
post_iter_func=lambda x: x[1],
buffer_size_bytes=100,
)
# Should still broadcast the tensor (at least 1 broadcast)
assert mock_group.broadcast_count >= 1
assert len(mock_group.broadcasted_tensors) >= 1
# Verify the total broadcasted size matches the tensor
expected_size = large_tensor.numel() * large_tensor.element_size()
actual_size = sum(t.numel() for t in mock_group.broadcasted_tensors)
assert actual_size == expected_size
def test_producer_multiple_batches(self):
"""Test that tensors are properly batched when exceeding target size."""
# Create many small tensors
params = [
(f"weight_{i}", torch.randn(10, 10, dtype=torch.float32).cuda())
for i in range(20)
]
mock_group = MockCommunicationGroup()
# Small target size to force multiple batches
packed_broadcast_producer(
iterator=iter(params),
group=mock_group,
src=0,
post_iter_func=lambda x: x[1],
buffer_size_bytes=2000,
)
# Should have multiple broadcasts
assert mock_group.broadcast_count > 1
# Total size should match sum of all tensors
expected_total = sum(t.numel() * t.element_size() for _, t in params)
actual_total = sum(t.numel() for t in mock_group.broadcasted_tensors)
assert actual_total == expected_total
def test_producer_empty_iterator(self):
"""Test producer handles empty iterator gracefully."""
mock_group = MockCommunicationGroup()
packed_broadcast_producer(
iterator=iter([]),
group=mock_group,
src=0,
post_iter_func=lambda x: x[1],
buffer_size_bytes=1000,
)
# No broadcasts for empty iterator
assert mock_group.broadcast_count == 0
# --- Unit Tests: packed_broadcast_consumer ---
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
class TestPackedBroadcastConsumer:
"""Test packed_broadcast_consumer function."""
def test_consumer_receives_tensors(self):
"""Test that consumer receives and unpacks tensors."""
params = create_mock_model_params()
params_cuda = [(name, tensor.cuda()) for name, tensor in params]
buffer_size = 2000
# First, run producer to get the broadcasted tensors
producer_group = MockCommunicationGroup()
packed_broadcast_producer(
iterator=iter(params_cuda),
group=producer_group,
src=0,
post_iter_func=lambda x: x[1],
buffer_size_bytes=buffer_size,
)
# Now run consumer with the broadcasted tensors
consumer_group = MockConsumerCommunicationGroup(
producer_group.broadcasted_tensors
)
state_dict_info = create_state_dict_info(params_cuda)
unpacked_tensors = {}
def post_unpack_func(tensor_list):
for name, tensor in tensor_list:
unpacked_tensors[name] = tensor.clone()
packed_broadcast_consumer(
iterator=iter(state_dict_info.items()),
group=consumer_group,
src=0,
post_unpack_func=post_unpack_func,
buffer_size_bytes=buffer_size,
)
# Verify all parameters were unpacked
assert len(unpacked_tensors) == len(params)
# Verify each tensor matches the original
for name, original_tensor in params_cuda:
assert name in unpacked_tensors
unpacked = unpacked_tensors[name]
assert unpacked.shape == original_tensor.shape
assert unpacked.dtype == original_tensor.dtype
assert torch.allclose(unpacked, original_tensor, rtol=1e-5, atol=1e-7)
# --- Integration Tests: Producer-Consumer Roundtrip ---
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
class TestPackedBroadcastRoundtrip:
"""Test producer-consumer roundtrip behavior."""
@pytest.mark.parametrize("dtype", [torch.float32, torch.float16, torch.bfloat16])
def test_roundtrip_different_dtypes(self, dtype):
"""Test roundtrip with different data types."""
params = create_mock_model_params(num_layers=2, dtype=dtype)
params_cuda = [(name, tensor.cuda()) for name, tensor in params]
buffer_size = 1000
producer_group = MockCommunicationGroup()
packed_broadcast_producer(
iterator=iter(params_cuda),
group=producer_group,
src=0,
post_iter_func=lambda x: x[1],
buffer_size_bytes=buffer_size,
)
consumer_group = MockConsumerCommunicationGroup(
producer_group.broadcasted_tensors
)
state_dict_info = create_state_dict_info(params_cuda)
unpacked_tensors = {}
def post_unpack_func(tensor_list):
for name, tensor in tensor_list:
unpacked_tensors[name] = tensor.clone()
packed_broadcast_consumer(
iterator=iter(state_dict_info.items()),
group=consumer_group,
src=0,
post_unpack_func=post_unpack_func,
buffer_size_bytes=buffer_size,
)
# Verify roundtrip preserves data
for name, original_tensor in params_cuda:
assert name in unpacked_tensors
unpacked = unpacked_tensors[name]
assert unpacked.dtype == dtype
assert torch.allclose(unpacked, original_tensor, rtol=1e-4, atol=1e-6)
def test_roundtrip_mixed_dtypes(self):
"""Test roundtrip with mixed data types."""
# Create params with mixed dtypes
params = [
("layer1.weight", torch.randn(10, 20, dtype=torch.float32).cuda()),
("layer1.bias", torch.randn(10, dtype=torch.float16).cuda()),
("layer2.weight", torch.randn(20, 30, dtype=torch.bfloat16).cuda()),
]
buffer_size = 500
producer_group = MockCommunicationGroup()
packed_broadcast_producer(
iterator=iter(params),
group=producer_group,
src=0,
post_iter_func=lambda x: x[1],
buffer_size_bytes=buffer_size,
)
consumer_group = MockConsumerCommunicationGroup(
producer_group.broadcasted_tensors
)
state_dict_info = create_state_dict_info(params)
unpacked_tensors = {}
def post_unpack_func(tensor_list):
for name, tensor in tensor_list:
unpacked_tensors[name] = tensor.clone()
packed_broadcast_consumer(
iterator=iter(state_dict_info.items()),
group=consumer_group,
src=0,
post_unpack_func=post_unpack_func,
buffer_size_bytes=buffer_size,
)
# Verify all params roundtrip correctly with correct dtypes
for name, original_tensor in params:
assert name in unpacked_tensors
unpacked = unpacked_tensors[name]
assert unpacked.shape == original_tensor.shape
assert unpacked.dtype == original_tensor.dtype
assert torch.allclose(unpacked, original_tensor, rtol=1e-4, atol=1e-6)
@pytest.mark.parametrize("target_size", [100, 1000, 10000, 100000])
def test_roundtrip_different_batch_sizes(self, target_size):
"""Test roundtrip with different target batch sizes."""
params = create_mock_model_params(num_layers=5)
params_cuda = [(name, tensor.cuda()) for name, tensor in params]
producer_group = MockCommunicationGroup()
packed_broadcast_producer(
iterator=iter(params_cuda),
group=producer_group,
src=0,
post_iter_func=lambda x: x[1],
buffer_size_bytes=target_size,
)
consumer_group = MockConsumerCommunicationGroup(
producer_group.broadcasted_tensors
)
state_dict_info = create_state_dict_info(params_cuda)
unpacked_tensors = {}
def post_unpack_func(tensor_list):
for name, tensor in tensor_list:
unpacked_tensors[name] = tensor.clone()
packed_broadcast_consumer(
iterator=iter(state_dict_info.items()),
group=consumer_group,
src=0,
post_unpack_func=post_unpack_func,
buffer_size_bytes=target_size,
)
# Verify all params roundtrip correctly
assert len(unpacked_tensors) == len(params)
for name, original_tensor in params_cuda:
assert name in unpacked_tensors
assert torch.allclose(
unpacked_tensors[name], original_tensor, rtol=1e-5, atol=1e-7
)
def test_roundtrip_non_contiguous_tensors(self):
"""Test roundtrip with non-contiguous tensors from the trainer."""
# Create non-contiguous tensors (simulating trainer outputs)
# Transposed tensors are non-contiguous
weight1 = torch.randn(20, 10, dtype=torch.float32).cuda().T
# Sliced tensors with step are non-contiguous
weight2 = torch.randn(40, 30, dtype=torch.float16).cuda()[::2, ::2]
# Permuted tensors are non-contiguous
weight3 = torch.randn(5, 10, 15, dtype=torch.bfloat16).cuda().permute(2, 0, 1)
params = [
("layer1.weight", weight1),
("layer2.weight", weight2),
("layer3.weight", weight3),
]
# Verify tensors are indeed non-contiguous
for name, tensor in params:
assert not tensor.is_contiguous(), f"{name} should be non-contiguous"
buffer_size = 500
producer_group = MockCommunicationGroup()
packed_broadcast_producer(
iterator=iter(params),
group=producer_group,
src=0,
post_iter_func=lambda x: x[1],
buffer_size_bytes=buffer_size,
)
consumer_group = MockConsumerCommunicationGroup(
producer_group.broadcasted_tensors
)
state_dict_info = create_state_dict_info(params)
unpacked_tensors = {}
def post_unpack_func(tensor_list):
for name, tensor in tensor_list:
unpacked_tensors[name] = tensor.clone()
packed_broadcast_consumer(
iterator=iter(state_dict_info.items()),
group=consumer_group,
src=0,
post_unpack_func=post_unpack_func,
buffer_size_bytes=buffer_size,
)
# Verify all non-contiguous params roundtrip correctly
for name, original_tensor in params:
assert name in unpacked_tensors
unpacked = unpacked_tensors[name]
assert unpacked.shape == original_tensor.shape
assert unpacked.dtype == original_tensor.dtype
assert torch.allclose(unpacked, original_tensor, rtol=1e-4, atol=1e-6)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/distributed/test_packed_tensor.py",
"license": "Apache License 2.0",
"lines": 348,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/distributed/test_weight_transfer.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Tests for weight transfer engine backends.
Unit tests for engine classes (parsing, validation, registry).
Integration tests for NCCL and IPC weight transfer between processes using Ray.
"""
import base64
import pickle
from unittest.mock import MagicMock
import pytest
import ray
import torch
from torch.multiprocessing.reductions import reduce_tensor
from vllm.config.parallel import ParallelConfig
from vllm.config.weight_transfer import WeightTransferConfig
from vllm.distributed.weight_transfer import WeightTransferEngineFactory
from vllm.distributed.weight_transfer.ipc_engine import (
IPCWeightTransferEngine,
IPCWeightTransferInitInfo,
IPCWeightTransferUpdateInfo,
)
from vllm.distributed.weight_transfer.nccl_engine import (
NCCLWeightTransferEngine,
NCCLWeightTransferInitInfo,
NCCLWeightTransferUpdateInfo,
)
from vllm.utils.network_utils import get_open_port
def create_mock_parallel_config(
rank: int = 0,
world_size: int = 1,
dp_rank: int = 0,
) -> ParallelConfig:
"""Create a mock ParallelConfig for testing."""
config = MagicMock(spec=ParallelConfig)
config.rank = rank
config.world_size = world_size
config.data_parallel_rank = dp_rank
return config
# --- Unit Tests: NCCLWeightTransferUpdateInfo Validation ---
class TestNCCLWeightTransferUpdateInfoValidation:
"""Test NCCLWeightTransferUpdateInfo dataclass validation."""
def test_valid_update_info(self):
"""Test creating valid NCCLWeightTransferUpdateInfo."""
info = NCCLWeightTransferUpdateInfo(
names=["layer.weight", "layer.bias"],
dtype_names=["float32", "float32"],
shapes=[[10, 10], [10]],
)
assert info.names == ["layer.weight", "layer.bias"]
assert info.dtype_names == ["float32", "float32"]
assert info.shapes == [[10, 10], [10]]
def test_mismatched_dtype_names_raises(self):
"""Test that mismatched dtype_names length raises ValueError."""
with pytest.raises(ValueError, match="dtype_names"):
NCCLWeightTransferUpdateInfo(
names=["layer.weight", "layer.bias"],
dtype_names=["float32"], # Only one dtype
shapes=[[10, 10], [10]],
)
def test_mismatched_shapes_raises(self):
"""Test that mismatched shapes length raises ValueError."""
with pytest.raises(ValueError, match="shapes"):
NCCLWeightTransferUpdateInfo(
names=["layer.weight", "layer.bias"],
dtype_names=["float32", "float32"],
shapes=[[10, 10]], # Only one shape
)
def test_empty_lists_valid(self):
"""Test that empty lists are valid."""
info = NCCLWeightTransferUpdateInfo(
names=[],
dtype_names=[],
shapes=[],
)
assert len(info.names) == 0
# --- Unit Tests: Engine Parsing ---
class TestNCCLEngineParsing:
"""Test NCCLWeightTransferEngine parsing methods."""
def test_parse_init_info_valid(self):
"""Test parsing valid init info dict."""
config = WeightTransferConfig(backend="nccl")
parallel_config = create_mock_parallel_config()
engine = NCCLWeightTransferEngine(config, parallel_config)
init_info = engine.parse_init_info(
{
"master_address": "127.0.0.1",
"master_port": 12345,
"rank_offset": 1,
"world_size": 3,
}
)
assert isinstance(init_info, NCCLWeightTransferInitInfo)
assert init_info.master_address == "127.0.0.1"
assert init_info.master_port == 12345
assert init_info.rank_offset == 1
assert init_info.world_size == 3
def test_parse_init_info_missing_field_raises(self):
"""Test parsing init info with missing required field."""
config = WeightTransferConfig(backend="nccl")
parallel_config = create_mock_parallel_config()
engine = NCCLWeightTransferEngine(config, parallel_config)
with pytest.raises(ValueError, match="Invalid init_info"):
engine.parse_init_info(
{
"master_address": "127.0.0.1",
# Missing master_port, rank_offset, world_size
}
)
def test_parse_update_info_valid(self):
"""Test parsing valid update info dict."""
config = WeightTransferConfig(backend="nccl")
parallel_config = create_mock_parallel_config()
engine = NCCLWeightTransferEngine(config, parallel_config)
update_info = engine.parse_update_info(
{
"names": ["w1", "w2"],
"dtype_names": ["float32", "bfloat16"],
"shapes": [[100, 100], [50]],
}
)
assert isinstance(update_info, NCCLWeightTransferUpdateInfo)
assert update_info.names == ["w1", "w2"]
assert update_info.dtype_names == ["float32", "bfloat16"]
assert update_info.shapes == [[100, 100], [50]]
# --- Unit Tests: Engine Registry ---
class TestEngineRegistry:
"""Test weight transfer engine registry."""
def test_create_engine_nccl(self):
"""Test factory creates NCCL engine."""
config = WeightTransferConfig(backend="nccl")
parallel_config = create_mock_parallel_config()
engine = WeightTransferEngineFactory.create_engine(config, parallel_config)
assert isinstance(engine, NCCLWeightTransferEngine)
def test_create_engine_ipc(self):
"""Test factory creates IPC engine."""
config = WeightTransferConfig(backend="ipc")
parallel_config = create_mock_parallel_config()
engine = WeightTransferEngineFactory.create_engine(config, parallel_config)
assert isinstance(engine, IPCWeightTransferEngine)
def test_create_engine_invalid_backend(self):
"""Test factory raises for invalid backend."""
# Pydantic validates Literal types at construction, so we can't create
# a config with an invalid backend. Instead, we test by directly
# accessing the registry or using model_construct to bypass validation.
from pydantic import ValidationError
# Test that Pydantic prevents invalid backend at construction
with pytest.raises(ValidationError):
WeightTransferConfig(backend="invalid")
# Test factory error by creating a config with valid backend but
# then manually modifying the backend attribute (bypassing validation)
config = WeightTransferConfig(backend="nccl")
# Use object.__setattr__ to bypass Pydantic validation
object.__setattr__(config, "backend", "invalid")
parallel_config = create_mock_parallel_config()
with pytest.raises(ValueError, match="Invalid weight transfer backend"):
WeightTransferEngineFactory.create_engine(config, parallel_config)
def test_register_duplicate_raises(self):
"""Test registering duplicate engine name raises."""
with pytest.raises(ValueError, match="already registered"):
WeightTransferEngineFactory.register_engine(
"nccl", NCCLWeightTransferEngine
)
# --- Test receive_weights without init raises ---
def test_nccl_receive_weights_without_init_raises():
"""Test that receive_weights raises if init_transfer_engine wasn't called."""
if torch.cuda.device_count() < 1:
pytest.skip("Need at least 1 GPU for this test")
config = WeightTransferConfig(backend="nccl")
parallel_config = create_mock_parallel_config()
engine = NCCLWeightTransferEngine(config, parallel_config)
update_info = NCCLWeightTransferUpdateInfo(
names=["w"],
dtype_names=["float32"],
shapes=[[10]],
)
with pytest.raises(RuntimeError, match="not initialized"):
engine.receive_weights(update_info, lambda x: None)
# --- Integration Test: NCCL Weight Transfer Between Ray Tasks ---
@ray.remote(num_gpus=1)
def trainer_broadcast_tensor(
master_address: str,
master_port: int,
world_size: int,
tensor_shape: list[int],
tensor_dtype: str,
) -> bool:
"""Trainer task that broadcasts a tensor via NCCL."""
import torch
from vllm.distributed.device_communicators.pynccl import PyNcclCommunicator
from vllm.distributed.utils import StatelessProcessGroup
# Create process group as rank 0 (trainer)
pg = StatelessProcessGroup.create(
host=master_address,
port=master_port,
rank=0,
world_size=world_size,
)
# Ray sets CUDA_VISIBLE_DEVICES, so device 0 is the assigned GPU
comm = PyNcclCommunicator(pg, device=0)
# Create and broadcast the tensor
dtype = getattr(torch, tensor_dtype)
tensor_to_send = torch.ones(tensor_shape, dtype=dtype, device="cuda:0")
comm.broadcast(tensor_to_send, src=0, stream=torch.cuda.current_stream())
torch.cuda.synchronize()
return True
@ray.remote(num_gpus=1)
def inference_receive_tensor(
master_address: str,
master_port: int,
world_size: int,
tensor_shape: list[int],
tensor_dtype: str,
) -> dict:
"""Inference task that receives tensor via NCCLWeightTransferEngine."""
from unittest.mock import MagicMock
import torch
from vllm.config.parallel import ParallelConfig
from vllm.config.weight_transfer import WeightTransferConfig
from vllm.distributed.weight_transfer.nccl_engine import (
NCCLWeightTransferEngine,
NCCLWeightTransferInitInfo,
NCCLWeightTransferUpdateInfo,
)
# Create engine with mock parallel config
config = WeightTransferConfig(backend="nccl")
parallel_config = MagicMock(spec=ParallelConfig)
parallel_config.rank = 0
parallel_config.world_size = 1
parallel_config.data_parallel_rank = 0
engine = NCCLWeightTransferEngine(config, parallel_config)
# Initialize the engine (joins as rank 1)
init_info = NCCLWeightTransferInitInfo(
master_address=master_address,
master_port=master_port,
rank_offset=1, # Trainer is rank 0, we become rank 1
world_size=world_size,
)
engine.init_transfer_engine(init_info)
# Receive weights with a no-op load_weights that captures the tensor
received_tensors = []
def noop_load_weights(weights: list[tuple[str, torch.Tensor]]):
for name, tensor in weights:
# Clone tensor to keep it after engine cleans up
received_tensors.append((name, tensor.clone()))
update_info = NCCLWeightTransferUpdateInfo(
names=["test.weight"],
dtype_names=[tensor_dtype],
shapes=[tensor_shape],
)
engine.receive_weights(update_info, noop_load_weights)
torch.cuda.synchronize()
# Verify we received the tensor
success = False
received_shape = None
received_sum = None
if len(received_tensors) == 1:
name, tensor = received_tensors[0]
received_shape = list(tensor.shape)
received_sum = tensor.sum().item()
# Check shape matches and values are all 1s (trainer sends ones)
if received_shape == tensor_shape:
expected_sum = 1.0 * torch.tensor(tensor_shape).prod().item()
if abs(received_sum - expected_sum) < 0.01:
success = True
engine.shutdown()
return {
"success": success,
"received_shape": received_shape,
"received_sum": received_sum,
}
@pytest.mark.skipif(
torch.cuda.device_count() < 2,
reason="Need at least 2 GPUs to run NCCL weight transfer test.",
)
def test_nccl_weight_transfer_between_processes():
"""Test NCCL weight transfer from trainer to inference process using Ray.
This test verifies that the NCCLWeightTransferEngine can receive
tensors broadcast by a trainer process via NCCL.
"""
ray.init(ignore_reinit_error=True)
master_address = "127.0.0.1"
master_port = get_open_port()
world_size = 2 # 1 trainer + 1 inference worker
# Tensor to transfer: 100x100 ones
tensor_shape = [100, 100]
tensor_dtype = "float32"
# Start both tasks concurrently - Ray assigns GPUs automatically
inference_future = inference_receive_tensor.remote(
master_address, master_port, world_size, tensor_shape, tensor_dtype
)
trainer_future = trainer_broadcast_tensor.remote(
master_address, master_port, world_size, tensor_shape, tensor_dtype
)
# Wait for both to complete
trainer_result, result = ray.get([trainer_future, inference_future])
assert trainer_result, "Trainer should complete successfully"
assert result["success"], (
f"Weight transfer failed. "
f"Received shape: {result['received_shape']}, "
f"Received sum: {result['received_sum']}"
)
# --- Unit Tests: IPCWeightTransferUpdateInfo Validation ---
class TestIPCWeightTransferUpdateInfoValidation:
"""Test IPCWeightTransferUpdateInfo dataclass validation."""
def test_valid_update_info(self):
"""Test creating valid IPCWeightTransferUpdateInfo."""
if torch.cuda.device_count() < 1:
pytest.skip("Need at least 1 GPU for this test")
# Create a dummy tensor and IPC handle
dummy_tensor = torch.ones(10, 10, device="cuda:0")
ipc_handle = reduce_tensor(dummy_tensor)
gpu_uuid = str(torch.cuda.get_device_properties(0).uuid)
ipc_handles = [{gpu_uuid: ipc_handle}]
info = IPCWeightTransferUpdateInfo(
names=["layer.weight"],
dtype_names=["float32"],
shapes=[[10, 10]],
ipc_handles=ipc_handles,
)
assert info.names == ["layer.weight"]
assert info.dtype_names == ["float32"]
assert info.shapes == [[10, 10]]
assert len(info.ipc_handles) == 1
def test_mismatched_dtype_names_raises(self):
"""Test that mismatched dtype_names length raises ValueError."""
if torch.cuda.device_count() < 1:
pytest.skip("Need at least 1 GPU for this test")
dummy_tensor = torch.ones(10, 10, device="cuda:0")
ipc_handle = reduce_tensor(dummy_tensor)
gpu_uuid = str(torch.cuda.get_device_properties(0).uuid)
ipc_handles = [{gpu_uuid: ipc_handle}, {gpu_uuid: ipc_handle}]
with pytest.raises(ValueError, match="dtype_names"):
IPCWeightTransferUpdateInfo(
names=["layer.weight", "layer.bias"],
dtype_names=["float32"], # Only one dtype
shapes=[[10, 10], [10]],
ipc_handles=ipc_handles,
)
def test_mismatched_shapes_raises(self):
"""Test that mismatched shapes length raises ValueError."""
if torch.cuda.device_count() < 1:
pytest.skip("Need at least 1 GPU for this test")
dummy_tensor = torch.ones(10, 10, device="cuda:0")
ipc_handle = reduce_tensor(dummy_tensor)
gpu_uuid = str(torch.cuda.get_device_properties(0).uuid)
ipc_handles = [{gpu_uuid: ipc_handle}, {gpu_uuid: ipc_handle}]
with pytest.raises(ValueError, match="shapes"):
IPCWeightTransferUpdateInfo(
names=["layer.weight", "layer.bias"],
dtype_names=["float32", "float32"],
shapes=[[10, 10]], # Only one shape
ipc_handles=ipc_handles,
)
def test_mismatched_ipc_handles_raises(self):
"""Test that mismatched ipc_handles length raises ValueError."""
if torch.cuda.device_count() < 1:
pytest.skip("Need at least 1 GPU for this test")
dummy_tensor = torch.ones(10, 10, device="cuda:0")
ipc_handle = reduce_tensor(dummy_tensor)
gpu_uuid = str(torch.cuda.get_device_properties(0).uuid)
ipc_handles = [{gpu_uuid: ipc_handle}] # Only one handle
with pytest.raises(ValueError, match="ipc_handles"):
IPCWeightTransferUpdateInfo(
names=["layer.weight", "layer.bias"],
dtype_names=["float32", "float32"],
shapes=[[10, 10], [10]],
ipc_handles=ipc_handles,
)
def test_valid_update_info_from_pickled(self):
"""Test creating IPCWeightTransferUpdateInfo from pickled handles."""
if torch.cuda.device_count() < 1:
pytest.skip("Need at least 1 GPU for this test")
dummy_tensor = torch.ones(10, 10, device="cuda:0")
ipc_handle = reduce_tensor(dummy_tensor)
gpu_uuid = str(torch.cuda.get_device_properties(0).uuid)
ipc_handles = [{gpu_uuid: ipc_handle}]
pickled = base64.b64encode(pickle.dumps(ipc_handles)).decode("utf-8")
info = IPCWeightTransferUpdateInfo(
names=["layer.weight"],
dtype_names=["float32"],
shapes=[[10, 10]],
ipc_handles_pickled=pickled,
)
assert info.ipc_handles == ipc_handles
assert info.ipc_handles_pickled is None
def test_both_handles_and_pickled_raises(self):
"""Test that providing both ipc_handles and ipc_handles_pickled raises."""
if torch.cuda.device_count() < 1:
pytest.skip("Need at least 1 GPU for this test")
dummy_tensor = torch.ones(10, 10, device="cuda:0")
ipc_handle = reduce_tensor(dummy_tensor)
gpu_uuid = str(torch.cuda.get_device_properties(0).uuid)
ipc_handles = [{gpu_uuid: ipc_handle}]
pickled = base64.b64encode(pickle.dumps(ipc_handles)).decode("utf-8")
with pytest.raises(ValueError, match="Cannot specify both"):
IPCWeightTransferUpdateInfo(
names=["layer.weight"],
dtype_names=["float32"],
shapes=[[10, 10]],
ipc_handles=ipc_handles,
ipc_handles_pickled=pickled,
)
def test_neither_handles_nor_pickled_raises(self):
"""Test that providing neither ipc_handles nor ipc_handles_pickled raises."""
with pytest.raises(ValueError, match="must be provided"):
IPCWeightTransferUpdateInfo(
names=["layer.weight"],
dtype_names=["float32"],
shapes=[[10, 10]],
)
def test_empty_lists_valid(self):
"""Test that empty lists are valid."""
info = IPCWeightTransferUpdateInfo(
names=[],
dtype_names=[],
shapes=[],
ipc_handles=[],
)
assert len(info.names) == 0
# --- Unit Tests: IPC Engine Parsing ---
class TestIPCEngineParsing:
"""Test IPCWeightTransferEngine parsing methods."""
def test_parse_update_info_valid(self):
"""Test parsing valid update info dict."""
if torch.cuda.device_count() < 1:
pytest.skip("Need at least 1 GPU for this test")
config = WeightTransferConfig(backend="ipc")
parallel_config = create_mock_parallel_config()
engine = IPCWeightTransferEngine(config, parallel_config)
# Create dummy IPC handles
dummy_tensor1 = torch.ones(100, 100, device="cuda:0")
dummy_tensor2 = torch.ones(50, device="cuda:0")
ipc_handle1 = reduce_tensor(dummy_tensor1)
ipc_handle2 = reduce_tensor(dummy_tensor2)
gpu_uuid = str(torch.cuda.get_device_properties(0).uuid)
ipc_handles = [{gpu_uuid: ipc_handle1}, {gpu_uuid: ipc_handle2}]
update_info = engine.parse_update_info(
{
"names": ["w1", "w2"],
"dtype_names": ["float32", "bfloat16"],
"shapes": [[100, 100], [50]],
"ipc_handles": ipc_handles,
}
)
assert isinstance(update_info, IPCWeightTransferUpdateInfo)
assert update_info.names == ["w1", "w2"]
assert update_info.dtype_names == ["float32", "bfloat16"]
assert update_info.shapes == [[100, 100], [50]]
assert len(update_info.ipc_handles) == 2
def test_parse_update_info_pickled(self):
"""Test parsing update info with pickled IPC handles (HTTP path)."""
if torch.cuda.device_count() < 1:
pytest.skip("Need at least 1 GPU for this test")
config = WeightTransferConfig(backend="ipc")
parallel_config = create_mock_parallel_config()
engine = IPCWeightTransferEngine(config, parallel_config)
dummy_tensor1 = torch.ones(100, 100, device="cuda:0")
dummy_tensor2 = torch.ones(50, device="cuda:0")
ipc_handle1 = reduce_tensor(dummy_tensor1)
ipc_handle2 = reduce_tensor(dummy_tensor2)
gpu_uuid = str(torch.cuda.get_device_properties(0).uuid)
ipc_handles = [{gpu_uuid: ipc_handle1}, {gpu_uuid: ipc_handle2}]
pickled = base64.b64encode(pickle.dumps(ipc_handles)).decode("utf-8")
update_info = engine.parse_update_info(
{
"names": ["w1", "w2"],
"dtype_names": ["float32", "bfloat16"],
"shapes": [[100, 100], [50]],
"ipc_handles_pickled": pickled,
}
)
assert isinstance(update_info, IPCWeightTransferUpdateInfo)
assert update_info.names == ["w1", "w2"]
assert len(update_info.ipc_handles) == 2
assert update_info.ipc_handles_pickled is None
assert gpu_uuid in update_info.ipc_handles[0]
assert gpu_uuid in update_info.ipc_handles[1]
# --- Integration Test: IPC Weight Transfer Between Ray Tasks ---
def get_physical_gpu_id(device_index: int = 0) -> str:
"""Get physical GPU UUID for a device."""
props = torch.cuda.get_device_properties(device_index)
return str(props.uuid)
@ray.remote(num_gpus=0.5)
class TrainerActor:
"""Trainer actor that creates and holds CUDA IPC handles."""
def __init__(self, tensor_shape: list[int], tensor_dtype: str):
# Create tensor on GPU and keep it alive
dtype = getattr(torch, tensor_dtype)
self.tensor = torch.ones(tensor_shape, dtype=dtype, device="cuda:0")
self.tensor.fill_(42.0) # Fill with 42 to verify correct transfer
# Create IPC handle (tensor must stay alive for IPC to work)
ipc_handle = reduce_tensor(self.tensor)
gpu_uuid = get_physical_gpu_id(0)
torch.cuda.synchronize()
self.ipc_handle_dict = {
"ipc_handle": ipc_handle,
"gpu_uuid": gpu_uuid,
"shape": tensor_shape,
"dtype": tensor_dtype,
}
def get_ipc_handle_dict(self) -> dict:
"""Return IPC handle dict. Tensor stays alive in this actor."""
return self.ipc_handle_dict
@ray.remote(num_gpus=0.5)
def inference_receive_ipc_tensor(
ipc_handle_dict: dict,
mode: str = "ray",
) -> dict:
"""Inference task that receives tensor via IPCWeightTransferEngine."""
from unittest.mock import MagicMock
import torch
from vllm.config.parallel import ParallelConfig
from vllm.config.weight_transfer import WeightTransferConfig
from vllm.distributed.weight_transfer.ipc_engine import (
IPCWeightTransferEngine,
)
# Create engine with mock parallel config
config = WeightTransferConfig(backend="ipc")
parallel_config = MagicMock(spec=ParallelConfig)
parallel_config.rank = 0
parallel_config.world_size = 1
parallel_config.data_parallel_rank = 0
engine = IPCWeightTransferEngine(config, parallel_config)
# Initialize the engine (no-op for IPC)
init_info = IPCWeightTransferInitInfo()
engine.init_transfer_engine(init_info)
# Receive weights with a no-op load_weights that captures the tensor
received_tensors = []
def noop_load_weights(weights: list[tuple[str, torch.Tensor]]):
for name, tensor in weights:
# Clone tensor to keep it after engine cleans up
received_tensors.append((name, tensor.clone()))
# Build update dict and go through parse_update_info (exercises __post_init__)
ipc_handles = [{ipc_handle_dict["gpu_uuid"]: ipc_handle_dict["ipc_handle"]}]
if mode == "ray":
update_dict: dict = {
"names": ["test.weight"],
"dtype_names": [ipc_handle_dict["dtype"]],
"shapes": [ipc_handle_dict["shape"]],
"ipc_handles": ipc_handles,
}
elif mode == "http":
pickled = base64.b64encode(pickle.dumps(ipc_handles)).decode("utf-8")
update_dict = {
"names": ["test.weight"],
"dtype_names": [ipc_handle_dict["dtype"]],
"shapes": [ipc_handle_dict["shape"]],
"ipc_handles_pickled": pickled,
}
else:
raise ValueError(f"Unknown mode: {mode}")
update_info = engine.parse_update_info(update_dict)
engine.receive_weights(update_info, noop_load_weights)
torch.cuda.synchronize()
# Verify we received the tensor
success = False
received_shape = None
received_sum = None
if len(received_tensors) == 1:
name, tensor = received_tensors[0]
received_shape = list(tensor.shape)
received_sum = tensor.sum().item()
# Check shape matches and values are all 42s (trainer sends 42s)
if received_shape == ipc_handle_dict["shape"]:
expected_sum = 42.0 * torch.tensor(ipc_handle_dict["shape"]).prod().item()
if abs(received_sum - expected_sum) < 0.01:
success = True
engine.shutdown()
return {
"success": success,
"received_shape": received_shape,
"received_sum": received_sum,
}
@pytest.mark.skipif(
torch.cuda.device_count() < 1,
reason="Need at least 1 GPU to run IPC weight transfer test.",
)
@pytest.mark.parametrize("mode", ["ray", "http"])
def test_ipc_weight_transfer_between_processes(mode: str):
"""Test IPC weight transfer from trainer to inference process using Ray.
Parametrized over transport modes:
- 'ray': ipc_handles passed directly.
- 'http': ipc_handles pickled + base64-encoded, unpickled via __post_init__.
IPC requires same-GPU access, so we use a placement group to co-locate
the trainer actor and inference task on the same GPU.
"""
from ray.util.placement_group import placement_group
from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy
ray.init(ignore_reinit_error=True)
# Create a placement group to ensure both processes are on the same GPU
# Use fractional GPUs so both tasks can share the same GPU bundle
pg = placement_group([{"GPU": 1, "CPU": 2}])
ray.get(pg.ready())
scheduling_strategy = PlacementGroupSchedulingStrategy(
placement_group=pg,
placement_group_capture_child_tasks=True,
)
# Tensor to transfer: 100x100 filled with 42s
tensor_shape = [100, 100]
tensor_dtype = "float32"
# Create trainer actor that holds the tensor and IPC handle (stays alive)
trainer_actor = TrainerActor.options( # type: ignore[attr-defined]
scheduling_strategy=scheduling_strategy
).remote(tensor_shape, tensor_dtype)
# Get IPC handle dict (tensor stays alive in trainer actor)
ipc_handle_dict = ray.get(trainer_actor.get_ipc_handle_dict.remote())
# Receive tensor in inference process using IPC handles (on same GPU)
# Trainer actor stays alive during this operation
inference_result = ray.get(
inference_receive_ipc_tensor.options(
scheduling_strategy=scheduling_strategy
).remote(ipc_handle_dict, mode=mode)
)
assert inference_result["success"], (
f"IPC weight transfer failed (mode={mode}). "
f"Received shape: {inference_result['received_shape']}, "
f"Received sum: {inference_result['received_sum']}"
)
def test_ipc_receive_weights_missing_gpu_uuid_raises():
"""Test that receive_weights raises if GPU UUID not found in IPC handles."""
if torch.cuda.device_count() < 1:
pytest.skip("Need at least 1 GPU for this test")
config = WeightTransferConfig(backend="ipc")
parallel_config = create_mock_parallel_config()
engine = IPCWeightTransferEngine(config, parallel_config)
# Create IPC handle with wrong GPU UUID
dummy_tensor = torch.ones(10, 10, device="cuda:0")
ipc_handle = reduce_tensor(dummy_tensor)
wrong_uuid = "wrong-uuid-12345"
ipc_handles = [{wrong_uuid: ipc_handle}]
update_info = IPCWeightTransferUpdateInfo(
names=["w"],
dtype_names=["float32"],
shapes=[[10, 10]],
ipc_handles=ipc_handles,
)
with pytest.raises(ValueError, match="IPC handle not found"):
engine.receive_weights(update_info, lambda x: None)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/distributed/test_weight_transfer.py",
"license": "Apache License 2.0",
"lines": 638,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/entrypoints/weight_transfer/test_weight_transfer_llm.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Tests for weight transfer APIs via LLM class.
These tests use a mock weight transfer engine to verify that the API
calls the correct methods with the right arguments, without requiring
actual NCCL communication.
"""
import os
from collections.abc import Callable
from dataclasses import dataclass
from unittest.mock import patch
import pytest
import torch
from vllm import LLM
from vllm.config import WeightTransferConfig
from vllm.distributed.weight_transfer.base import (
WeightTransferEngine,
WeightTransferInitInfo,
WeightTransferInitRequest,
WeightTransferUpdateInfo,
WeightTransferUpdateRequest,
)
from ...utils import create_new_process_for_each_test
# Use a tiny model for fast testing
MODEL_NAME = "hmellor/tiny-random-LlamaForCausalLM"
# --- Mock Weight Transfer Engine ---
@dataclass
class MockInitInfo(WeightTransferInitInfo):
"""Mock initialization info."""
test_param: str = "test"
@dataclass
class MockUpdateInfo(WeightTransferUpdateInfo):
"""Mock update info."""
names: list[str] | None = None
dtype_names: list[str] | None = None
shapes: list[list[int]] | None = None
class MockWeightTransferEngine(WeightTransferEngine[MockInitInfo, MockUpdateInfo]):
"""Mock weight transfer engine that tracks method calls."""
init_info_cls = MockInitInfo
update_info_cls = MockUpdateInfo
# Class-level tracking for verification across processes
init_transfer_engine_called: bool = False
receive_weights_called: bool = False
shutdown_called: bool = False
last_init_info: MockInitInfo | None = None
last_update_info: MockUpdateInfo | None = None
def __init__(self, config, parallel_config):
super().__init__(config, parallel_config)
# Reset tracking on init
MockWeightTransferEngine.init_transfer_engine_called = False
MockWeightTransferEngine.receive_weights_called = False
MockWeightTransferEngine.shutdown_called = False
MockWeightTransferEngine.last_init_info = None
MockWeightTransferEngine.last_update_info = None
def init_transfer_engine(self, init_info: MockInitInfo) -> None:
MockWeightTransferEngine.init_transfer_engine_called = True
MockWeightTransferEngine.last_init_info = init_info
def receive_weights(
self,
update_info: MockUpdateInfo,
load_weights: Callable[[list[tuple[str, torch.Tensor]]], None],
) -> None:
MockWeightTransferEngine.receive_weights_called = True
MockWeightTransferEngine.last_update_info = update_info
# Simulate loading weights by calling load_weights with empty list
# (In real implementation, this would receive and load actual weights)
load_weights([])
def shutdown(self) -> None:
MockWeightTransferEngine.shutdown_called = True
def trainer_send_weights(self, *args, **kwargs):
"""Mock method to simulate trainer sending weights."""
pass
def mock_create_engine(config, parallel_config):
"""Mock factory function that returns our mock engine."""
return MockWeightTransferEngine(config, parallel_config)
# --- Tests ---
@create_new_process_for_each_test()
def test_get_world_size_tp1():
"""Test world_size is correctly configured for TP=1."""
if torch.cuda.device_count() < 1:
pytest.skip("Need at least 1 GPU for this test")
llm = LLM(
model=MODEL_NAME,
enforce_eager=True,
load_format="dummy",
tensor_parallel_size=1,
weight_transfer_config=WeightTransferConfig(backend="nccl"),
)
world_size = llm.llm_engine.vllm_config.parallel_config.world_size
assert world_size == 1
@create_new_process_for_each_test()
def test_init_weight_transfer_engine_calls_engine():
"""Test that init_weight_transfer_engine calls the engine's
init_transfer_engine method."""
if torch.cuda.device_count() < 1:
pytest.skip("Need at least 1 GPU for this test")
# Run in-process so mock.patch works (spawn won't inherit the mock)
os.environ["VLLM_ENABLE_V1_MULTIPROCESSING"] = "0"
# Enable insecure serialization to allow pickling functions for collective_rpc
os.environ["VLLM_ALLOW_INSECURE_SERIALIZATION"] = "1"
with patch(
"vllm.v1.worker.gpu_worker.WeightTransferEngineFactory.create_engine",
mock_create_engine,
):
llm = LLM(
model=MODEL_NAME,
enforce_eager=True,
load_format="dummy",
tensor_parallel_size=1,
weight_transfer_config=WeightTransferConfig(backend="nccl"),
)
# Verify engine was created
def check_engine_exists(self):
return self.weight_transfer_engine is not None
results = llm.collective_rpc(check_engine_exists)
assert all(results), "Weight transfer engine should be initialized"
# Call init_weight_transfer_engine
llm.init_weight_transfer_engine(
WeightTransferInitRequest(init_info={"test_param": "hello"})
)
# Verify init_transfer_engine was called on the engine
def check_init_called(self):
engine = self.weight_transfer_engine
return (
engine.init_transfer_engine_called,
engine.last_init_info.test_param if engine.last_init_info else None,
)
results = llm.collective_rpc(check_init_called)
for called, param in results:
assert called, "init_transfer_engine should have been called"
assert param == "hello", f"Expected 'hello', got {param}"
@create_new_process_for_each_test()
def test_update_weights_calls_engine():
"""Test that update_weights calls the engine's receive_weights method."""
if torch.cuda.device_count() < 1:
pytest.skip("Need at least 1 GPU for this test")
# Run in-process so mock.patch works (spawn won't inherit the mock)
os.environ["VLLM_ENABLE_V1_MULTIPROCESSING"] = "0"
# Enable insecure serialization to allow pickling functions for collective_rpc
os.environ["VLLM_ALLOW_INSECURE_SERIALIZATION"] = "1"
with patch(
"vllm.v1.worker.gpu_worker.WeightTransferEngineFactory.create_engine",
mock_create_engine,
):
llm = LLM(
model=MODEL_NAME,
enforce_eager=True,
load_format="dummy",
tensor_parallel_size=1,
weight_transfer_config=WeightTransferConfig(backend="nccl"),
)
# First init the weight transfer
llm.init_weight_transfer_engine(
WeightTransferInitRequest(init_info={"test_param": "init"})
)
# Call update_weights
test_names = ["layer.weight", "layer.bias"]
test_dtypes = ["float32", "float32"]
test_shapes = [[10, 10], [10]]
llm.update_weights(
WeightTransferUpdateRequest(
update_info={
"names": test_names,
"dtype_names": test_dtypes,
"shapes": test_shapes,
}
)
)
# Verify receive_weights was called with correct info
def check_update_called(self):
engine = self.weight_transfer_engine
if not engine.receive_weights_called:
return False, None, None, None
info = engine.last_update_info
return (True, info.names, info.dtype_names, info.shapes)
results = llm.collective_rpc(check_update_called)
for called, names, dtypes, shapes in results:
assert called, "receive_weights should have been called"
assert names == test_names
assert dtypes == test_dtypes
assert shapes == test_shapes
@create_new_process_for_each_test()
def test_full_weight_transfer_flow():
"""Test the complete weight transfer flow: init -> update."""
if torch.cuda.device_count() < 1:
pytest.skip("Need at least 1 GPU for this test")
# Run in-process so mock.patch works (spawn won't inherit the mock)
os.environ["VLLM_ENABLE_V1_MULTIPROCESSING"] = "0"
# Enable insecure serialization to allow pickling functions for collective_rpc
os.environ["VLLM_ALLOW_INSECURE_SERIALIZATION"] = "1"
with patch(
"vllm.v1.worker.gpu_worker.WeightTransferEngineFactory.create_engine",
mock_create_engine,
):
llm = LLM(
model=MODEL_NAME,
enforce_eager=True,
load_format="dummy",
tensor_parallel_size=1,
weight_transfer_config=WeightTransferConfig(backend="nccl"),
)
# Step 1: Initialize
llm.init_weight_transfer_engine(
WeightTransferInitRequest(init_info={"test_param": "flow_test"})
)
# Step 2: Update weights
llm.update_weights(
WeightTransferUpdateRequest(
update_info={
"names": ["test.weight"],
"dtype_names": ["bfloat16"],
"shapes": [[100, 100]],
}
)
)
# Verify the full flow completed
def check_flow(self):
engine = self.weight_transfer_engine
return {
"init_called": engine.init_transfer_engine_called,
"update_called": engine.receive_weights_called,
"init_param": (
engine.last_init_info.test_param if engine.last_init_info else None
),
"update_names": (
engine.last_update_info.names if engine.last_update_info else None
),
}
results = llm.collective_rpc(check_flow)
for result in results:
assert result["init_called"], "init_transfer_engine should be called"
assert result["update_called"], "receive_weights should be called"
assert result["init_param"] == "flow_test"
assert result["update_names"] == ["test.weight"]
@create_new_process_for_each_test()
def test_weight_transfer_config_backend():
"""Test that WeightTransferConfig backend is properly configured."""
if torch.cuda.device_count() < 1:
pytest.skip("Need at least 1 GPU for this test")
# Test with nccl backend
llm = LLM(
model=MODEL_NAME,
enforce_eager=True,
load_format="dummy",
tensor_parallel_size=1,
weight_transfer_config=WeightTransferConfig(backend="nccl"),
)
config = llm.llm_engine.vllm_config.weight_transfer_config
assert config.backend == "nccl"
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/entrypoints/weight_transfer/test_weight_transfer_llm.py",
"license": "Apache License 2.0",
"lines": 249,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/config/weight_transfer.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from typing import Literal
from vllm.config.utils import config
@config
class WeightTransferConfig:
"""Configuration for weight transfer during RL training."""
backend: Literal["nccl", "ipc"] = "nccl"
"""The backend to use for weight transfer."""
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/config/weight_transfer.py",
"license": "Apache License 2.0",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/distributed/weight_transfer/base.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Base class for weight transfer engines."""
from abc import ABC, abstractmethod
from collections.abc import Callable, Iterator
from dataclasses import KW_ONLY, dataclass, field
from typing import Any, Generic, TypeVar
import torch
from vllm.config.parallel import ParallelConfig
from vllm.config.weight_transfer import WeightTransferConfig
TInitInfo = TypeVar("TInitInfo", bound="WeightTransferInitInfo")
TUpdateInfo = TypeVar("TUpdateInfo", bound="WeightTransferUpdateInfo")
# Base protocols for backend-specific dataclasses
@dataclass
class WeightTransferInitInfo(ABC): # noqa: B024
"""Base class for backend-specific initialization info."""
pass
@dataclass
class WeightTransferUpdateInfo(ABC): # noqa: B024
"""Base class for backend-specific weight update info."""
_: KW_ONLY
is_checkpoint_format: bool = True
"""Set to True if weights are in checkpoint/original model format and need
layerwise processing. Set to False if weights have already been processed
into kernel format (repacking, renaming, etc.)."""
# API-level request classes (accept dicts for backend-agnostic serialization)
@dataclass
class WeightTransferInitRequest:
"""API-level weight transfer initialization request."""
init_info: dict[str, Any] = field(default_factory=dict)
@dataclass
class WeightTransferUpdateRequest:
"""API-level weight update request."""
update_info: dict[str, Any] = field(default_factory=dict)
class WeightTransferEngine(ABC, Generic[TInitInfo, TUpdateInfo]):
"""
Base class for weight transfer engines that handle transport of model weights
from a trainer to inference workers.
This abstraction separates weight transfer transport logic from the worker
implementation, allowing different backends (NCCL, CUDA IPC[TODO], RDMA[TODO]) to be
plugged in.
Subclasses should define:
init_info_cls: Type of backend-specific initialization info
update_info_cls: Type of backend-specific update info
"""
# Subclasses should override these class attributes
init_info_cls: type[TInitInfo]
update_info_cls: type[TUpdateInfo]
def __init__(
self, config: WeightTransferConfig, parallel_config: ParallelConfig
) -> None:
"""
Initialize the weight transfer engine.
Args:
config: The configuration for the weight transfer engine
parallel_config: The configuration for the parallel setup
"""
self.config = config
self.parallel_config = parallel_config
def parse_init_info(self, init_dict: dict[str, Any]) -> TInitInfo:
"""
Construct typed init info from dict with validation.
Args:
init_dict: Dictionary containing backend-specific initialization parameters
Returns:
Typed backend-specific init info dataclass
Raises:
ValueError: If init_dict is invalid for this backend
"""
try:
return self.init_info_cls(**init_dict)
except TypeError as e:
raise ValueError(
f"Invalid init_info for {self.__class__.__name__}: {e}"
) from e
def parse_update_info(self, update_dict: dict[str, Any]) -> TUpdateInfo:
"""
Construct typed update info from dict with validation.
Args:
update_dict: Dictionary containing backend-specific update parameters
Returns:
Typed backend-specific update info dataclass
Raises:
ValueError: If update_dict is invalid for this backend
"""
try:
return self.update_info_cls(**update_dict)
except TypeError as e:
raise ValueError(
f"Invalid update_info for {self.__class__.__name__}: {e}"
) from e
@abstractmethod
def init_transfer_engine(self, init_info: TInitInfo) -> None:
"""
Initialize the weight transfer mechanism.
This is called once at the beginning of training.
Args:
init_info: Backend-specific initialization info
"""
raise NotImplementedError
@abstractmethod
def receive_weights(
self,
update_info: TUpdateInfo,
load_weights: Callable[[list[tuple[str, torch.Tensor]]], None],
) -> None:
"""
Receive weights from the trainer and load them incrementally.
Args:
update_info: Backend-specific update info containing parameter metadata
and any backend-specific data
load_weights: Callable that loads weights into the model. Called
incrementally for each weight to avoid OOM.
"""
raise NotImplementedError
@abstractmethod
def shutdown(self) -> None:
"""
Shutdown the weight transfer engine.
This should be called when the worker is shutting down.
"""
raise NotImplementedError
@staticmethod
@abstractmethod
def trainer_send_weights(
iterator: Iterator[tuple[str, torch.Tensor]],
trainer_args: dict[str, Any] | Any,
) -> None:
"""
Send weights from trainer to inference workers.
This is a static method that can be called from the trainer process
to send weights to all inference workers.
Args:
iterator: Iterator of model parameters. Returns (name, tensor) tuples.
The tensors should be on the appropriate device for the backend.
trainer_args: Dictionary containing backend-specific arguments needed
to send weights. The structure depends on the backend:
- NCCL: Contains 'group', 'src', 'packed', etc.
- IPC: Contains 'mode' ('http' or 'ray'),
'llm_handle' (for Ray), 'url' (for HTTP), etc.
Example:
>>> param_iter = ((n, p) for n, p in model.named_parameters())
>>> engine.trainer_send_weights(param_iter, trainer_args)
"""
raise NotImplementedError
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/distributed/weight_transfer/base.py",
"license": "Apache License 2.0",
"lines": 145,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/distributed/weight_transfer/factory.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Factory for weight transfer engines with lazy loading."""
import importlib
from collections.abc import Callable
from typing import TYPE_CHECKING
from vllm.distributed.weight_transfer.base import WeightTransferEngine
from vllm.logger import init_logger
if TYPE_CHECKING:
from vllm.config.parallel import ParallelConfig
from vllm.config.weight_transfer import WeightTransferConfig
logger = init_logger(__name__)
class WeightTransferEngineFactory:
"""Factory for creating weight transfer engines with lazy loading.
This factory implements a registry pattern that supports:
- Lazy loading: Engine modules are only imported when actually needed
- Extensibility: Custom engines can be registered at runtime
- Centralized registration: All built-in engines registered in one place
"""
_registry: dict[str, Callable[[], type[WeightTransferEngine]]] = {}
@classmethod
def register_engine(
cls,
name: str,
module_path_or_cls: str | type[WeightTransferEngine],
class_name: str | None = None,
) -> None:
"""Register an engine with lazy-loading or direct class reference.
Supports two calling conventions:
1. Lazy loading: register_engine(name, module_path, class_name)
2. Direct class: register_engine(name, engine_cls)
Args:
name: The name to register the engine under (e.g., "nccl")
module_path_or_cls: Either a module path string for lazy loading,
or the engine class directly
class_name: Name of the engine class (required if module_path is string)
Raises:
ValueError: If an engine with the same name is already registered
"""
if name in cls._registry:
raise ValueError(f"Weight transfer engine '{name}' is already registered.")
if isinstance(module_path_or_cls, str):
# Lazy loading path
module_path = module_path_or_cls
if class_name is None:
raise ValueError(
"class_name is required when registering with module path"
)
def loader() -> type[WeightTransferEngine]:
module = importlib.import_module(module_path)
return getattr(module, class_name)
cls._registry[name] = loader
else:
# Direct class registration
engine_cls = module_path_or_cls
cls._registry[name] = lambda: engine_cls
@classmethod
def create_engine(
cls,
config: "WeightTransferConfig",
parallel_config: "ParallelConfig",
) -> WeightTransferEngine:
"""Create a weight transfer engine instance.
Args:
config: Weight transfer configuration containing the backend name
parallel_config: Parallel configuration for the engine
Returns:
An initialized weight transfer engine instance
Raises:
ValueError: If the backend is not registered
"""
backend = config.backend
if backend not in cls._registry:
available = list(cls._registry.keys())
raise ValueError(
f"Invalid weight transfer backend: {backend}. "
f"Available engines: {available}"
)
engine_cls = cls._registry[backend]()
logger.info(
"Creating weight transfer engine: %s",
engine_cls.__name__,
)
return engine_cls(config, parallel_config)
# Register built-in weight transfer engines here.
# Registration should be centralized to ensure lazy loading -
# engine modules are only imported when actually used.
WeightTransferEngineFactory.register_engine(
"nccl",
"vllm.distributed.weight_transfer.nccl_engine",
"NCCLWeightTransferEngine",
)
WeightTransferEngineFactory.register_engine(
"ipc",
"vllm.distributed.weight_transfer.ipc_engine",
"IPCWeightTransferEngine",
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/distributed/weight_transfer/factory.py",
"license": "Apache License 2.0",
"lines": 97,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/distributed/weight_transfer/nccl_engine.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""NCCL-based weight transfer engine."""
from collections.abc import Callable, Iterator
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any
import torch
if TYPE_CHECKING:
from vllm.distributed.device_communicators.pynccl import PyNcclCommunicator
from vllm.config.parallel import ParallelConfig
from vllm.config.weight_transfer import WeightTransferConfig
from vllm.distributed.weight_transfer.base import (
WeightTransferEngine,
WeightTransferInitInfo,
WeightTransferUpdateInfo,
)
from vllm.distributed.weight_transfer.packed_tensor import (
DEFAULT_PACKED_BUFFER_SIZE_BYTES,
DEFAULT_PACKED_NUM_BUFFERS,
packed_broadcast_consumer,
)
@dataclass
class NCCLWeightTransferInitInfo(WeightTransferInitInfo):
"""Initialization info for NCCL weight transfer backend."""
master_address: str
master_port: int
rank_offset: int
world_size: int
@dataclass
class NCCLTrainerSendWeightsArgs:
"""Arguments for NCCL trainer_send_weights method."""
group: Any
"""Process group (PyNcclCommunicator) for NCCL communication."""
src: int = 0
"""Source rank (default 0, trainer is typically rank 0)."""
post_iter_func: Callable[[tuple[str, torch.Tensor]], torch.Tensor] | None = None
"""Optional function to apply to each (name, tensor) pair before broadcasting.
If None, extracts just the tensor."""
packed: bool = False
"""Whether to use packed tensor broadcasting for efficiency.
When True, multiple tensors are batched together before broadcasting
to reduce NCCL communication overhead."""
stream: torch.cuda.Stream | None = None
"""CUDA stream to use for broadcasting if packed is False.
If packed is True, new streams will be created for each buffer."""
packed_buffer_size_bytes: int = DEFAULT_PACKED_BUFFER_SIZE_BYTES
"""Size in bytes for each packed tensor buffer.
Must match the value used in NCCLWeightTransferUpdateInfo."""
packed_num_buffers: int = DEFAULT_PACKED_NUM_BUFFERS
"""Number of buffers for double/triple buffering during packed transfer.
Must match the value used in NCCLWeightTransferUpdateInfo."""
@dataclass
class NCCLWeightTransferUpdateInfo(WeightTransferUpdateInfo):
"""Update info for NCCL weight transfer backend."""
names: list[str]
dtype_names: list[str]
shapes: list[list[int]]
packed: bool = False
"""Whether to use packed tensor broadcasting for efficiency.
When True, multiple tensors are batched together before broadcasting
to reduce NCCL communication overhead."""
packed_buffer_size_bytes: int = DEFAULT_PACKED_BUFFER_SIZE_BYTES
"""Size in bytes for each packed tensor buffer.
Both producer and consumer must use the same value."""
packed_num_buffers: int = DEFAULT_PACKED_NUM_BUFFERS
"""Number of buffers for double/triple buffering during packed transfer.
Both producer and consumer must use the same value."""
def __post_init__(self):
"""Validate that all lists have the same length."""
num_params = len(self.names)
if len(self.dtype_names) != num_params:
raise ValueError(
f"`dtype_names` should be of the same size as `names`: "
f"got {len(self.dtype_names)} and {len(self.names)}"
)
if len(self.shapes) != num_params:
raise ValueError(
f"`shapes` should be of the same size as `names`: "
f"got {len(self.shapes)} and {len(self.names)}"
)
class NCCLWeightTransferEngine(
WeightTransferEngine[NCCLWeightTransferInitInfo, NCCLWeightTransferUpdateInfo]
):
"""
Weight transfer engine using NCCL for communication between trainer and workers.
This implementation uses NCCL broadcast operations to transfer weights from
the trainer (rank 0) to all inference workers in a process group.
"""
# Define backend-specific dataclass types
init_info_cls = NCCLWeightTransferInitInfo
update_info_cls = NCCLWeightTransferUpdateInfo
def __init__(
self, config: WeightTransferConfig, parallel_config: ParallelConfig
) -> None:
"""
Initialize the NCCL weight transfer engine.
Args:
config: The configuration for the weight transfer engine
parallel_config: The configuration for the parallel setup
"""
super().__init__(config, parallel_config)
self.model_update_group: PyNcclCommunicator | None = None
def init_transfer_engine(self, init_info: NCCLWeightTransferInitInfo) -> None:
"""
Initialize NCCL process group with the trainer.
Args:
init_info: NCCL initialization info containing master address, port,
rank offset, and world size
"""
# Calculate the global rank in the trainer-worker process group
# Must account for data parallel to get unique ranks across all workers
dp_rank = self.parallel_config.data_parallel_rank
world_size_per_dp = self.parallel_config.world_size # TP * PP
rank_within_dp = self.parallel_config.rank
# Unique rank across all DP groups
worker_rank = dp_rank * world_size_per_dp + rank_within_dp
rank = worker_rank + init_info.rank_offset
# Create stateless process group
self.model_update_group = (
NCCLWeightTransferEngine._stateless_init_process_group(
init_info.master_address,
init_info.master_port,
rank,
init_info.world_size,
torch.cuda.current_device(),
)
)
def receive_weights(
self,
update_info: NCCLWeightTransferUpdateInfo,
load_weights: Callable[[list[tuple[str, torch.Tensor]]], None],
) -> None:
"""
Receive weights from trainer via NCCL broadcast and load them incrementally.
If update_info.packed is True, uses packed tensor broadcasting for
efficient transfer of multiple weights in batches. Otherwise, uses simple
one-by-one broadcasting.
Args:
update_info: NCCL update info containing parameter names, dtypes, shapes,
and packed flag
load_weights: Callable that loads weights into the model. Called
incrementally for each batch of weights to avoid OOM.
"""
if self.model_update_group is None:
raise RuntimeError(
"NCCL weight transfer not initialized. "
"Call init_transfer_engine() first."
)
if update_info.packed:
# Build iterator of (name, (shape, dtype)) from update_info
def state_dict_info_iterator():
for name, dtype_name, shape in zip(
update_info.names, update_info.dtype_names, update_info.shapes
):
dtype = getattr(torch, dtype_name)
yield (name, (shape, dtype))
packed_broadcast_consumer(
iterator=state_dict_info_iterator(),
group=self.model_update_group,
src=0,
post_unpack_func=load_weights,
buffer_size_bytes=update_info.packed_buffer_size_bytes,
num_buffers=update_info.packed_num_buffers,
)
else:
# Use simple one-by-one broadcasting
for name, dtype_name, shape in zip(
update_info.names, update_info.dtype_names, update_info.shapes
):
dtype = getattr(torch, dtype_name)
weight = torch.empty(shape, dtype=dtype, device="cuda")
self.model_update_group.broadcast(
weight, src=0, stream=torch.cuda.current_stream()
)
load_weights([(name, weight)])
del weight
def shutdown(self) -> None:
if self.model_update_group is not None:
# Clean up the communicator by removing the reference
self.model_update_group = None
@staticmethod
def trainer_send_weights(
iterator: Iterator[tuple[str, torch.Tensor]],
trainer_args: dict[str, Any] | NCCLTrainerSendWeightsArgs,
) -> None:
"""Broadcast weights from trainer to vLLM workers.
Args:
iterator: Iterator of model parameters. Returns (name, tensor) tuples
trainer_args: Dictionary or NCCLTrainerSendWeightsArgs instance containing
NCCL-specific arguments. If a dict, should contain keys from
NCCLTrainerSendWeightsArgs.
Example:
>>> from vllm.distributed.weight_transfer.nccl_engine import (
... NCCLWeightTransferEngine,
... NCCLTrainerSendWeightsArgs,
... )
>>> param_iter = ((n, p) for n, p in model.named_parameters())
>>> args = NCCLTrainerSendWeightsArgs(group=group, packed=True)
>>> NCCLWeightTransferEngine.trainer_send_weights(param_iter, args)
"""
# Parse trainer args - accept either dict or dataclass instance
if isinstance(trainer_args, dict):
args = NCCLTrainerSendWeightsArgs(**trainer_args)
else:
args = trainer_args
if args.post_iter_func is None:
# Default: extract just the tensor from (name, tensor) tuple
post_iter_func = lambda x: x[1]
else:
post_iter_func = args.post_iter_func
if args.packed:
# Use packed tensor broadcasting for efficiency
from vllm.distributed.weight_transfer.packed_tensor import (
packed_broadcast_producer,
)
packed_broadcast_producer(
iterator=iterator,
group=args.group,
src=args.src,
post_iter_func=post_iter_func,
buffer_size_bytes=args.packed_buffer_size_bytes,
num_buffers=args.packed_num_buffers,
)
else:
# Use simple one-by-one broadcasting
for item in iterator:
tensor = post_iter_func(item)
args.group.broadcast(
tensor,
src=args.src,
stream=args.stream or torch.cuda.current_stream(),
)
@staticmethod
def trainer_init(
init_info: NCCLWeightTransferInitInfo | dict,
) -> "PyNcclCommunicator":
"""
Initialize NCCL process group for trainer-side weight transfer.
The trainer is always rank 0 in the process group. Uses the current
CUDA device (torch.cuda.current_device()).
Args:
init_info: Either an NCCLWeightTransferInitInfo object or a dict with keys:
- master_address: str
- master_port: int
- world_size: int
Returns:
PyNcclCommunicator for weight transfer.
Example:
>>> from vllm.distributed.weight_transfer.nccl_engine import (
... NCCLWeightTransferEngine,
... )
>>> group = NCCLWeightTransferEngine.trainer_init(
... dict(
... master_address=master_address,
... master_port=master_port,
... world_size=world_size,
... ),
... )
"""
if isinstance(init_info, dict):
master_address = init_info["master_address"]
master_port = init_info["master_port"]
world_size = init_info["world_size"]
else:
# NCCLWeightTransferInitInfo object
master_address = init_info.master_address
master_port = init_info.master_port
world_size = init_info.world_size
# Trainer is always rank 0
return NCCLWeightTransferEngine._stateless_init_process_group(
master_address, master_port, 0, world_size, torch.cuda.current_device()
)
@staticmethod
def _stateless_init_process_group(
master_address, master_port, rank, world_size, device
):
"""
vLLM provides `StatelessProcessGroup` to create a process group
without considering the global process group in torch.distributed.
It is recommended to create `StatelessProcessGroup`, and then initialize
the data-plane communication (NCCL) between external (train processes)
and vLLM workers.
"""
from vllm.distributed.device_communicators.pynccl import PyNcclCommunicator
from vllm.distributed.utils import StatelessProcessGroup
pg = StatelessProcessGroup.create(
host=master_address, port=master_port, rank=rank, world_size=world_size
)
pynccl = PyNcclCommunicator(pg, device=device)
return pynccl
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/distributed/weight_transfer/nccl_engine.py",
"license": "Apache License 2.0",
"lines": 290,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/distributed/weight_transfer/packed_tensor.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Packed tensor utilities for efficient weight transfer."""
import math
from collections.abc import Callable, Iterator
from typing import Any
import torch
# Default values for packed tensor configuration.
# These are imported by NCCLWeightTransferUpdateInfo and trainer_send_weights.
DEFAULT_PACKED_BUFFER_SIZE_BYTES = 1024 * 1024 * 1024 # 1GB
DEFAULT_PACKED_NUM_BUFFERS = 2
def packed_broadcast_producer(
iterator: Iterator[tuple[str, torch.Tensor]],
group: Any,
src: int,
post_iter_func: Callable[[tuple[str, torch.Tensor]], torch.Tensor],
buffer_size_bytes: int = DEFAULT_PACKED_BUFFER_SIZE_BYTES,
num_buffers: int = DEFAULT_PACKED_NUM_BUFFERS,
) -> None:
"""Broadcast tensors in a packed manner from trainer to workers.
Args:
iterator: Iterator of model parameters. Returns a tuple of (name, tensor)
group: Process group (PyNcclCommunicator)
src: Source rank (0 in current implementation)
post_iter_func: Function to apply to each (name, tensor) pair before
packing, should return a tensor
buffer_size_bytes: Size in bytes for each packed tensor buffer.
Both producer and consumer must use the same value.
num_buffers: Number of buffers for double/triple buffering.
Both producer and consumer must use the same value.
"""
target_packed_tensor_size = buffer_size_bytes
streams = [torch.cuda.Stream() for _ in range(num_buffers)]
buffer_idx = 0
packing_tensor_list: list[list[torch.Tensor]] = [[] for _ in range(num_buffers)]
packing_tensor_sizes: list[int] = [0 for _ in range(num_buffers)]
packed_tensors: list[torch.Tensor] = [
torch.empty(0, dtype=torch.uint8, device="cuda") for _ in range(num_buffers)
]
while True:
# Synchronize the current stream
streams[buffer_idx].synchronize()
# Start tasks for the new buffer in a new stream
with torch.cuda.stream(streams[buffer_idx]):
try:
# Initialize the packing tensor list and sizes
packing_tensor_list[buffer_idx] = []
packing_tensor_sizes[buffer_idx] = 0
# Pack the tensors
while True:
# Apply post processing and convert to linearized uint8 tensor
tensor = (
post_iter_func(next(iterator))
.contiguous()
.view(torch.uint8)
.view(-1)
)
packing_tensor_list[buffer_idx].append(tensor)
packing_tensor_sizes[buffer_idx] += tensor.numel()
if packing_tensor_sizes[buffer_idx] > target_packed_tensor_size:
break
# Pack the tensors and call broadcast collective
packed_tensors[buffer_idx] = torch.cat(
packing_tensor_list[buffer_idx], dim=0
)
group.broadcast(packed_tensors[buffer_idx], src=src)
# Move to the next buffer
buffer_idx = (buffer_idx + 1) % num_buffers
except StopIteration:
# Do the last broadcast if there are remaining tensors
if len(packing_tensor_list[buffer_idx]) > 0:
packed_tensors[buffer_idx] = torch.cat(
packing_tensor_list[buffer_idx], dim=0
)
group.broadcast(packed_tensors[buffer_idx], src=src)
break
def packed_broadcast_consumer(
iterator: Iterator[tuple[str, tuple[list[int], torch.dtype]]],
group: Any,
src: int,
post_unpack_func: Callable[[list[tuple[str, torch.Tensor]]], None],
buffer_size_bytes: int = DEFAULT_PACKED_BUFFER_SIZE_BYTES,
num_buffers: int = DEFAULT_PACKED_NUM_BUFFERS,
) -> None:
"""Consume packed tensors and unpack them into a list of tensors.
Args:
iterator: Iterator of parameter metadata. Returns (name, (shape, dtype))
group: Process group (PyNcclCommunicator)
src: Source rank (0 in current implementation)
post_unpack_func: Function to apply to each list of (name, tensor) after
unpacking
buffer_size_bytes: Size in bytes for each packed tensor buffer.
Both producer and consumer must use the same value.
num_buffers: Number of buffers for double/triple buffering.
Both producer and consumer must use the same value.
"""
def unpack_tensor(
packed_tensor: torch.Tensor,
names: list[str],
shapes: list[list[int]],
dtypes: list[torch.dtype],
tensor_sizes: list[int],
) -> list[tuple[str, torch.Tensor]]:
"""Unpack a single tensor into a list of tensors.
Args:
packed_tensor: The packed torch.uint8 tensor to unpack
names: List of tensor names
shapes: List of tensor shapes
dtypes: List of tensor dtypes
tensor_sizes: List of tensor sizes in bytes
Returns:
unpacked List[(name, tensor)]
"""
unpacked_tensors = packed_tensor.split(tensor_sizes)
unpacked_list = [
(name, tensor.contiguous().view(dtype).view(*shape))
for name, shape, dtype, tensor in zip(
names, shapes, dtypes, unpacked_tensors
)
]
return unpacked_list
target_packed_tensor_size = buffer_size_bytes
streams = [torch.cuda.Stream() for _ in range(num_buffers)]
buffer_idx = 0
packing_tensor_meta_data: list[list[tuple[str, list[int], torch.dtype, int]]] = [
[] for _ in range(num_buffers)
]
packing_tensor_sizes: list[int] = [0 for _ in range(num_buffers)]
packed_tensors: list[torch.Tensor] = [
torch.empty(0, dtype=torch.uint8, device="cuda") for _ in range(num_buffers)
]
while True:
# Synchronize the current stream
streams[buffer_idx].synchronize()
with torch.cuda.stream(streams[buffer_idx]):
# Initialize the packing tensor meta data
packing_tensor_meta_data[buffer_idx] = []
packing_tensor_sizes[buffer_idx] = 0
try:
# Form a packed tensor
while True:
name, (shape, dtype) = next(iterator)
tensor_size = math.prod(shape) * dtype.itemsize
packing_tensor_meta_data[buffer_idx].append(
(name, shape, dtype, tensor_size)
)
packing_tensor_sizes[buffer_idx] += tensor_size
if packing_tensor_sizes[buffer_idx] > target_packed_tensor_size:
break
# Create a packed tensor and broadcast it
packed_tensors[buffer_idx] = torch.empty(
packing_tensor_sizes[buffer_idx], dtype=torch.uint8, device="cuda"
)
group.broadcast(packed_tensors[buffer_idx], src=src)
# Load the packed tensor into the model
names, shapes, dtypes, tensor_sizes = zip(
*packing_tensor_meta_data[buffer_idx]
)
post_unpack_func(
unpack_tensor(
packed_tensors[buffer_idx],
list(names),
list(shapes),
list(dtypes),
list(tensor_sizes),
)
)
# Move to the next buffer
buffer_idx = (buffer_idx + 1) % num_buffers
except StopIteration:
# Do the last broadcast if there are remaining tensors
if len(packing_tensor_meta_data[buffer_idx]) > 0:
# Create a packed tensor and broadcast it
packed_tensors[buffer_idx] = torch.empty(
packing_tensor_sizes[buffer_idx],
dtype=torch.uint8,
device="cuda",
)
group.broadcast(packed_tensors[buffer_idx], src=src)
# Load the packed tensor into the model
names, shapes, dtypes, tensor_sizes = zip(
*packing_tensor_meta_data[buffer_idx]
)
post_unpack_func(
unpack_tensor(
packed_tensors[buffer_idx],
list(names),
list(shapes),
list(dtypes),
list(tensor_sizes),
)
)
break
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/distributed/weight_transfer/packed_tensor.py",
"license": "Apache License 2.0",
"lines": 193,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/tool_parsers/test_step3p5_tool_parser.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import json
from collections.abc import Generator
import pytest
from vllm.entrypoints.openai.chat_completion.protocol import (
ChatCompletionRequest,
ChatCompletionToolsParam,
)
from vllm.entrypoints.openai.engine.protocol import (
DeltaMessage,
FunctionCall,
ToolCall,
)
from vllm.tokenizers import TokenizerLike, get_tokenizer
from vllm.tokenizers.detokenizer_utils import detokenize_incrementally
from vllm.tool_parsers.step3p5_tool_parser import Step3p5ToolParser
MODEL = "stepfun-ai/Step-3.5-Flash"
@pytest.fixture(scope="module")
def step3p5_tokenizer():
return get_tokenizer(tokenizer_name=MODEL)
@pytest.fixture
def step3p5_tool_parser(step3p5_tokenizer):
return Step3p5ToolParser(step3p5_tokenizer)
@pytest.fixture
def sample_tools():
return [
ChatCompletionToolsParam(
type="function",
function={
"name": "get_current_weather",
"description": "Get the current weather",
"parameters": {
"type": "object",
"properties": {
"city": {"type": "string", "description": "The city name"},
"state": {"type": "string", "description": "The state code"},
"unit": {"type": "string", "enum": ["fahrenheit", "celsius"]},
},
"required": ["city", "state"],
},
},
),
ChatCompletionToolsParam(
type="function",
function={
"name": "calculate_area",
"description": "Calculate area of a shape",
"parameters": {
"type": "object",
"properties": {
"shape": {"type": "string"},
"dimensions": {"type": "object"},
"precision": {"type": "integer"},
},
},
},
),
]
def assert_tool_calls(
actual_tool_calls: list[ToolCall], expected_tool_calls: list[ToolCall]
):
assert len(actual_tool_calls) == len(expected_tool_calls)
for actual_tool_call, expected_tool_call in zip(
actual_tool_calls, expected_tool_calls
):
assert actual_tool_call.type == "function"
assert actual_tool_call.function.name == expected_tool_call.function.name
assert json.loads(actual_tool_call.function.arguments) == json.loads(
expected_tool_call.function.arguments
)
def stream_delta_message_generator(
step3p5_tool_parser,
step3p5_tokenizer: TokenizerLike,
model_output: str,
request: ChatCompletionRequest | None = None,
) -> Generator[DeltaMessage, None, None]:
all_token_ids = step3p5_tokenizer.encode(model_output, add_special_tokens=False)
previous_text = ""
previous_tokens = None
prefix_offset = 0
read_offset = 0
for i, delta_token in enumerate(all_token_ids):
delta_token_ids = [delta_token]
previous_token_ids = all_token_ids[:i]
current_token_ids = all_token_ids[: i + 1]
(new_tokens, delta_text, new_prefix_offset, new_read_offset) = (
detokenize_incrementally(
tokenizer=step3p5_tokenizer,
all_input_ids=current_token_ids,
prev_tokens=previous_tokens,
prefix_offset=prefix_offset,
read_offset=read_offset,
skip_special_tokens=False,
spaces_between_special_tokens=True,
)
)
current_text = previous_text + delta_text
delta_message = step3p5_tool_parser.extract_tool_calls_streaming(
previous_text,
current_text,
delta_text,
previous_token_ids,
current_token_ids,
delta_token_ids,
request=request,
)
if delta_message:
yield delta_message
previous_text = current_text
previous_tokens = (
previous_tokens + new_tokens if previous_tokens else new_tokens
)
prefix_offset = new_prefix_offset
read_offset = new_read_offset
def stream_delta_message_generator_from_chunks(
step3p5_tool_parser,
step3p5_tokenizer: TokenizerLike,
delta_text_chunks: list[str],
request: ChatCompletionRequest | None = None,
) -> Generator[DeltaMessage, None, None]:
previous_text = ""
previous_token_ids: list[int] = []
for delta_text in delta_text_chunks:
delta_token_ids = step3p5_tokenizer.encode(delta_text, add_special_tokens=False)
current_text = previous_text + delta_text
current_token_ids = previous_token_ids + delta_token_ids
delta_message = step3p5_tool_parser.extract_tool_calls_streaming(
previous_text,
current_text,
delta_text,
previous_token_ids,
current_token_ids,
delta_token_ids,
request=request,
)
if delta_message:
yield delta_message
previous_text = current_text
previous_token_ids = current_token_ids
def test_extract_tool_calls_no_tools(step3p5_tool_parser):
model_output = "This is a test response without any tool calls"
extracted_tool_calls = step3p5_tool_parser.extract_tool_calls(
model_output, request=None
) # type: ignore[arg-type]
assert not extracted_tool_calls.tools_called
assert extracted_tool_calls.tool_calls == []
assert extracted_tool_calls.content == model_output
@pytest.mark.parametrize(
ids=[
"single_tool",
"single_tool_with_content",
"single_tool_multiline_param",
"parallel_tools",
"tool_with_typed_params",
],
argnames=["model_output", "expected_tool_calls", "expected_content"],
argvalues=[
(
"""<tool_call>
<function=get_current_weather>
<parameter=city>
Dallas
</parameter>
<parameter=state>
TX
</parameter>
<parameter=unit>
fahrenheit
</parameter>
</function>
</tool_call>""",
[
ToolCall(
function=FunctionCall(
name="get_current_weather",
arguments=json.dumps(
{"city": "Dallas", "state": "TX", "unit": "fahrenheit"}
),
)
)
],
None,
),
(
"""Sure! Let me check the weather for you.<tool_call>
<function=get_current_weather>
<parameter=city>
Dallas
</parameter>
<parameter=state>
TX
</parameter>
<parameter=unit>
fahrenheit
</parameter>
</function>
</tool_call>""",
[
ToolCall(
function=FunctionCall(
name="get_current_weather",
arguments=json.dumps(
{"city": "Dallas", "state": "TX", "unit": "fahrenheit"}
),
)
)
],
"Sure! Let me check the weather for you.",
),
(
"""<tool_call>
<function=calculate_area>
<parameter=shape>
rectangle
</parameter>
<parameter=dimensions>
{"width": 10,
"height": 20}
</parameter>
<parameter=precision>
2
</parameter>
</function>
</tool_call>""",
[
ToolCall(
function=FunctionCall(
name="calculate_area",
arguments=json.dumps(
{
"shape": "rectangle",
"dimensions": {"width": 10, "height": 20},
"precision": 2,
}
),
)
)
],
None,
),
(
"""<tool_call>
<function=get_current_weather>
<parameter=city>
Dallas
</parameter>
<parameter=state>
TX
</parameter>
<parameter=unit>
fahrenheit
</parameter>
</function>
</tool_call>
<tool_call>
<function=get_current_weather>
<parameter=city>
Orlando
</parameter>
<parameter=state>
FL
</parameter>
<parameter=unit>
fahrenheit
</parameter>
</function>
</tool_call>""",
[
ToolCall(
function=FunctionCall(
name="get_current_weather",
arguments=json.dumps(
{"city": "Dallas", "state": "TX", "unit": "fahrenheit"}
),
)
),
ToolCall(
function=FunctionCall(
name="get_current_weather",
arguments=json.dumps(
{"city": "Orlando", "state": "FL", "unit": "fahrenheit"}
),
)
),
],
None,
),
(
"""Let me calculate that area for you.<tool_call>
<function=calculate_area>
<parameter=shape>
circle
</parameter>
<parameter=dimensions>
{"radius": 15.5}
</parameter>
<parameter=precision>
3
</parameter>
</function>
</tool_call>""",
[
ToolCall(
function=FunctionCall(
name="calculate_area",
arguments=json.dumps(
{
"shape": "circle",
"dimensions": {"radius": 15.5},
"precision": 3,
}
),
)
)
],
"Let me calculate that area for you.",
),
],
)
def test_extract_tool_calls(
step3p5_tool_parser,
sample_tools,
model_output,
expected_tool_calls,
expected_content,
):
request = ChatCompletionRequest(model=MODEL, messages=[], tools=sample_tools)
extracted_tool_calls = step3p5_tool_parser.extract_tool_calls(
model_output, request=request
)
assert extracted_tool_calls.tools_called
assert_tool_calls(extracted_tool_calls.tool_calls, expected_tool_calls)
assert extracted_tool_calls.content == expected_content
def test_extract_tool_calls_fallback_no_tags(step3p5_tool_parser, sample_tools):
"""Test fallback parsing when XML tags are missing"""
model_output = """<function=get_current_weather>
<parameter=city>
Dallas
</parameter>
<parameter=state>
TX
</parameter>
</function>"""
request = ChatCompletionRequest(model=MODEL, messages=[], tools=sample_tools)
extracted_tool_calls = step3p5_tool_parser.extract_tool_calls(
model_output, request=request
)
assert extracted_tool_calls.tools_called
assert len(extracted_tool_calls.tool_calls) == 1
assert extracted_tool_calls.tool_calls[0].function.name == "get_current_weather"
def test_extract_tool_calls_type_conversion(step3p5_tool_parser):
"""Test parameter type conversion based on tool schema"""
tools = [
ChatCompletionToolsParam(
type="function",
function={
"name": "test_types",
"parameters": {
"type": "object",
"properties": {
"int_param": {"type": "integer"},
"float_param": {"type": "float"},
"bool_param": {"type": "boolean"},
"str_param": {"type": "string"},
"obj_param": {"type": "object"},
},
},
},
)
]
model_output = """<tool_call>
<function=test_types>
<parameter=int_param>
42
</parameter>
<parameter=float_param>
3.14
</parameter>
<parameter=bool_param>
true
</parameter>
<parameter=str_param>
hello world
</parameter>
<parameter=obj_param>
{"key": "value"}
</parameter>
</function>
</tool_call>"""
request = ChatCompletionRequest(model=MODEL, messages=[], tools=tools)
extracted_tool_calls = step3p5_tool_parser.extract_tool_calls(
model_output, request=request
)
args = json.loads(extracted_tool_calls.tool_calls[0].function.arguments)
assert args["int_param"] == 42
assert args["float_param"] == 3.14
assert args["bool_param"] is True
assert args["str_param"] == "hello world"
assert args["obj_param"] == {"key": "value"}
@pytest.mark.parametrize(
ids=[
"no_tools",
"single_tool",
"single_tool_with_content",
"single_tool_multiline_param",
"parallel_tools",
"tool_with_typed_params", # Added this test case
],
argnames=["model_output", "expected_tool_calls", "expected_content"],
argvalues=[
("This is a test without tools", [], "This is a test without tools"),
(
"""<tool_call>
<function=get_current_weather>
<parameter=city>
Dallas
</parameter>
<parameter=state>
TX
</parameter>
<parameter=unit>
fahrenheit
</parameter>
</function>
</tool_call>""",
[
ToolCall(
function=FunctionCall(
name="get_current_weather",
arguments=json.dumps(
{"city": "Dallas", "state": "TX", "unit": "fahrenheit"}
),
)
)
],
None,
),
(
"""Sure! Let me check the weather for you.<tool_call>
<function=get_current_weather>
<parameter=city>
Dallas
</parameter>
<parameter=state>
TX
</parameter>
<parameter=unit>
fahrenheit
</parameter>
</function>
</tool_call>""",
[
ToolCall(
function=FunctionCall(
name="get_current_weather",
arguments=json.dumps(
{"city": "Dallas", "state": "TX", "unit": "fahrenheit"}
),
)
)
],
"Sure! Let me check the weather for you.",
),
(
"""<tool_call>
<function=calculate_area>
<parameter=shape>
rectangle
</parameter>
<parameter=dimensions>
{"width": 10,
"height": 20}
</parameter>
<parameter=precision>
2
</parameter>
</function>
</tool_call>""",
[
ToolCall(
function=FunctionCall(
name="calculate_area",
arguments=json.dumps(
{
"shape": "rectangle",
"dimensions": {"width": 10, "height": 20},
"precision": 2,
}
),
)
)
],
None,
),
(
"""<tool_call>
<function=get_current_weather>
<parameter=city>
Dallas
</parameter>
<parameter=state>
TX
</parameter>
<parameter=unit>
fahrenheit
</parameter>
</function>
</tool_call>
<tool_call>
<function=get_current_weather>
<parameter=city>
Orlando
</parameter>
<parameter=state>
FL
</parameter>
<parameter=unit>
celsius
</parameter>
</function>
</tool_call>""",
[
ToolCall(
function=FunctionCall(
name="get_current_weather",
arguments=json.dumps(
{"city": "Dallas", "state": "TX", "unit": "fahrenheit"}
),
)
),
ToolCall(
function=FunctionCall(
name="get_current_weather",
arguments=json.dumps(
{"city": "Orlando", "state": "FL", "unit": "celsius"}
),
)
),
],
None,
),
# Added tool_with_typed_params test case
(
"""Let me calculate that area for you.<tool_call>
<function=calculate_area>
<parameter=shape>
circle
</parameter>
<parameter=dimensions>
{"radius": 15.5}
</parameter>
<parameter=precision>
3
</parameter>
</function>
</tool_call>""",
[
ToolCall(
function=FunctionCall(
name="calculate_area",
arguments=json.dumps(
{
"shape": "circle",
"dimensions": {"radius": 15.5},
"precision": 3,
}
),
)
)
],
"Let me calculate that area for you.",
),
],
)
def test_extract_tool_calls_streaming(
step3p5_tool_parser,
step3p5_tokenizer,
sample_tools,
model_output,
expected_tool_calls,
expected_content,
):
"""Test incremental streaming behavior including typed parameters"""
request = ChatCompletionRequest(model=MODEL, messages=[], tools=sample_tools)
other_content = ""
tool_states = {} # Track state per tool index
for delta_message in stream_delta_message_generator(
step3p5_tool_parser, step3p5_tokenizer, model_output, request
):
# role should never be streamed from tool parser
assert not delta_message.role
if delta_message.content:
other_content += delta_message.content
if delta_message.tool_calls:
for tool_call in delta_message.tool_calls:
idx = tool_call.index
# Initialize state for new tool
if idx not in tool_states:
tool_states[idx] = {
"id": None,
"name": None,
"arguments": "",
"type": None,
}
# First chunk should have id, name, and type
if tool_call.id:
tool_states[idx]["id"] = tool_call.id
if tool_call.type:
assert tool_call.type == "function"
tool_states[idx]["type"] = tool_call.type
if tool_call.function:
if tool_call.function.name:
# Should only be set once
assert tool_states[idx]["name"] is None
tool_states[idx]["name"] = tool_call.function.name
if tool_call.function.arguments is not None:
# Accumulate arguments incrementally
tool_states[idx]["arguments"] += tool_call.function.arguments
# Verify final content
assert other_content == (expected_content or "") # Handle None case
# Verify we got all expected tool calls
assert len(tool_states) == len(expected_tool_calls)
# Verify each tool call
for idx, expected_tool in enumerate(expected_tool_calls):
state = tool_states[idx]
assert state["id"] is not None
assert state["type"] == "function"
assert state["name"] == expected_tool.function.name
# Parse accumulated arguments
arguments_str = state["arguments"]
assert arguments_str is not None
actual_args = json.loads(arguments_str)
expected_args = json.loads(expected_tool.function.arguments)
assert actual_args == expected_args
def test_extract_tool_calls_missing_closing_parameter_tag(
step3p5_tool_parser, sample_tools
):
"""Test handling of missing closing </parameter> tag"""
# Using get_current_weather from sample_tools but with malformed XML
model_output = """Let me check the weather for you:
<tool_call>
<function=get_current_weather>
<parameter=city>
Dallas
<parameter=state>
TX
</parameter>
<parameter=unit>
fahrenheit
</parameter>
</function>
</tool_call>"""
request = ChatCompletionRequest(model=MODEL, messages=[], tools=sample_tools)
extracted_tool_calls = step3p5_tool_parser.extract_tool_calls(
model_output, request=request
)
# The parser should handle the malformed XML gracefully
assert extracted_tool_calls.tools_called
assert len(extracted_tool_calls.tool_calls) == 1
# Verify the function name is correct
assert extracted_tool_calls.tool_calls[0].function.name == "get_current_weather"
# Verify the arguments are parsed despite the missing closing tag
args = json.loads(extracted_tool_calls.tool_calls[0].function.arguments)
assert "city" in args
assert args["city"] == "Dallas"
assert args["state"] == "TX"
assert args["unit"] == "fahrenheit"
# Check that content before the tool call is preserved
assert "Let me check the weather for you:" in extracted_tool_calls.content
def test_extract_tool_calls_streaming_missing_closing_tag(
step3p5_tool_parser, step3p5_tokenizer, sample_tools
):
"""Test streaming with missing closing </parameter> tag"""
# Using get_current_weather from sample_tools but with malformed XML
model_output = """Let me check the weather for you:
<tool_call>
<function=get_current_weather>
<parameter=city>
Dallas
<parameter=state>
TX
</parameter>
<parameter=unit>
fahrenheit
</parameter>
</function>
</tool_call>"""
request = ChatCompletionRequest(model=MODEL, messages=[], tools=sample_tools)
other_content = ""
tool_states = {}
for delta_message in stream_delta_message_generator(
step3p5_tool_parser, step3p5_tokenizer, model_output, request
):
if delta_message.content:
other_content += delta_message.content
if delta_message.tool_calls:
for tool_call in delta_message.tool_calls:
idx = tool_call.index
if idx not in tool_states:
tool_states[idx] = {
"id": None,
"name": None,
"arguments": "",
"type": None,
}
if tool_call.id:
tool_states[idx]["id"] = tool_call.id
if tool_call.type:
assert tool_call.type == "function"
tool_states[idx]["type"] = tool_call.type
if tool_call.function:
if tool_call.function.name:
tool_states[idx]["name"] = tool_call.function.name
if tool_call.function.arguments is not None:
tool_states[idx]["arguments"] += tool_call.function.arguments
# Verify content was streamed
assert "Let me check the weather for you:" in other_content
# Verify we got the tool call
assert len(tool_states) == 1
state = tool_states[0]
assert state["id"] is not None
assert state["type"] == "function"
assert state["name"] == "get_current_weather"
# Verify arguments were parsed correctly despite missing closing tag
assert state["arguments"] is not None
args = json.loads(state["arguments"])
assert args["city"] == "Dallas"
assert args["state"] == "TX"
assert args["unit"] == "fahrenheit"
def test_extract_tool_calls_streaming_incremental(
step3p5_tool_parser, step3p5_tokenizer, sample_tools
):
"""Test that streaming is truly incremental"""
model_output = """I'll check the weather.<tool_call>
<function=get_current_weather>
<parameter=city>
Dallas
</parameter>
<parameter=state>
TX
</parameter>
</function>
</tool_call>"""
request = ChatCompletionRequest(model=MODEL, messages=[], tools=sample_tools)
chunks = []
for delta_message in stream_delta_message_generator(
step3p5_tool_parser, step3p5_tokenizer, model_output, request
):
chunks.append(delta_message)
# Should have multiple chunks
assert len(chunks) > 3
# First chunk(s) should be content
assert chunks[0].content is not None
assert chunks[0].tool_calls is None or chunks[0].tool_calls == []
# Should have a chunk with tool header (id, name, type)
header_found = False
for chunk in chunks:
if chunk.tool_calls and chunk.tool_calls[0].id:
header_found = True
assert chunk.tool_calls[0].function.name == "get_current_weather"
assert chunk.tool_calls[0].type == "function"
# Empty initially
assert chunk.tool_calls[0].function.arguments == ""
break
assert header_found
# Should have chunks with incremental arguments
arg_chunks = []
for chunk in chunks:
if chunk.tool_calls and chunk.tool_calls[0].function.arguments:
arg_chunks.append(chunk.tool_calls[0].function.arguments)
# Arguments should be streamed incrementally
assert len(arg_chunks) > 1
# Concatenated arguments should form valid JSON
full_args = "".join(arg_chunks)
parsed_args = json.loads(full_args)
assert parsed_args["city"] == "Dallas"
assert parsed_args["state"] == "TX"
def test_extract_tool_calls_complex_type_with_single_quote(step3p5_tool_parser):
"""Test parameter type conversion based on tool schema"""
tools = [
ChatCompletionToolsParam(
type="function",
function={
"name": "test_types",
"parameters": {
"type": "object",
"properties": {
"int_param": {"type": "integer"},
"float_param": {"type": "float"},
"bool_param": {"type": "boolean"},
"str_param": {"type": "string"},
"obj_param": {"type": "object"},
},
},
},
)
]
model_output = """<tool_call>
<function=test_types>
<parameter=obj_param>
{'key': 'value'}
</parameter>
</function>
</tool_call>"""
request = ChatCompletionRequest(model=MODEL, messages=[], tools=tools)
extracted_tool_calls = step3p5_tool_parser.extract_tool_calls(
model_output, request=request
)
args = json.loads(extracted_tool_calls.tool_calls[0].function.arguments)
assert args["obj_param"] == {"key": "value"}
def test_extract_tool_calls_streaming_mixed_content_and_multiple_tool_calls(
step3p5_tool_parser, step3p5_tokenizer, sample_tools
):
"""Test mixed content with multiple complete tool calls.
Scenario: Model outputs "hello" + complete tool call + "hi" + complete tool call.
Expected: "hello" as content, first tool call parsed (index=0), "hi" as content,
second tool call parsed (index=1).
"""
# Model output: hello + complete tool call + hi + complete tool call
model_output = """hello<tool_call>
<function=get_current_weather>
<parameter=city>
Dallas
</parameter>
<parameter=state>
TX
</parameter>
</function>
</tool_call>hi<tool_call>
<function=calculate_area>
<parameter=shape>
rectangle
</parameter>
<parameter=dimensions>
{"width": 10, "height": 5}
</parameter>
</function>
</tool_call>"""
request = ChatCompletionRequest(model=MODEL, messages=[], tools=sample_tools)
other_content = ""
tool_states = {}
for delta_message in stream_delta_message_generator(
step3p5_tool_parser, step3p5_tokenizer, model_output, request
):
if delta_message.content:
other_content += delta_message.content
if delta_message.tool_calls:
for tool_call in delta_message.tool_calls:
idx = tool_call.index
if idx not in tool_states:
tool_states[idx] = {
"id": None,
"name": None,
"arguments": "",
"type": None,
}
if tool_call.id:
tool_states[idx]["id"] = tool_call.id
if tool_call.type:
assert tool_call.type == "function"
tool_states[idx]["type"] = tool_call.type
if tool_call.function:
if tool_call.function.name:
tool_states[idx]["name"] = tool_call.function.name
if tool_call.function.arguments is not None:
tool_states[idx]["arguments"] += tool_call.function.arguments
# Should have exactly two complete tool calls
assert len(tool_states) == 2, "Should have exactly two complete tool calls"
# Verify the first tool call (index=0)
assert tool_states[0]["name"] == "get_current_weather"
assert tool_states[0]["arguments"]
args_dict_0 = json.loads(tool_states[0]["arguments"])
assert args_dict_0["city"] == "Dallas"
assert args_dict_0["state"] == "TX"
# Verify the second tool call (index=1)
assert tool_states[1]["name"] == "calculate_area"
assert tool_states[1]["arguments"]
args_dict_1 = json.loads(tool_states[1]["arguments"])
assert args_dict_1["shape"] == "rectangle"
assert isinstance(args_dict_1["dimensions"], dict), "dimensions should be a dict"
assert args_dict_1["dimensions"]["width"] == 10
assert args_dict_1["dimensions"]["height"] == 5
# Verify content: should contain "hello", "hi"
assert "hello" in other_content, "Should contain 'hello' as content"
assert "hi" in other_content, "Should contain 'hi' as content"
# Verify the order: hello should come first, then hi
hello_index = other_content.find("hello")
hi_index = other_content.find("hi")
assert hello_index >= 0, "'hello' should be in content"
assert hi_index > hello_index, "'hi' should come after 'hello'"
# Verify that tool call tags are NOT in the content
# We should not see complete tool call structures in content
assert "<function=get_current_weather>" not in other_content, (
"First tool call should not be in content"
)
assert "<function=calculate_area>" not in other_content, (
"Second tool call should not be in content"
)
def test_extract_tool_calls_non_streaming_mixed_content_and_multiple_tool_calls(
step3p5_tool_parser, sample_tools
):
"""Test non-streaming extraction with mixed content and multiple tool calls.
Scenario: Model outputs "hello" + complete tool call + "hi" + complete tool call.
Expected: "hello" as content, first tool call parsed (index=0), "hi" as content,
second tool call parsed (index=1)
"""
# Model output: hello + complete tool call + hi + complete tool call
model_output = """hello<tool_call>
<function=get_current_weather>
<parameter=city>
Dallas
</parameter>
<parameter=state>
TX
</parameter>
</function>
</tool_call>hi<tool_call>
<function=calculate_area>
<parameter=shape>
rectangle
</parameter>
<parameter=dimensions>
{"width": 10, "height": 5}
</parameter>
</function>
</tool_call>"""
request = ChatCompletionRequest(model=MODEL, messages=[], tools=sample_tools)
extracted_tool_calls = step3p5_tool_parser.extract_tool_calls(
model_output, request=request
)
# Should have exactly two complete tool calls
assert extracted_tool_calls.tools_called
assert len(extracted_tool_calls.tool_calls) == 2, (
"Should have exactly two complete tool calls"
)
# Verify the first tool call (index=0)
assert extracted_tool_calls.tool_calls[0].function.name == "get_current_weather"
args_dict_0 = json.loads(extracted_tool_calls.tool_calls[0].function.arguments)
assert args_dict_0["city"] == "Dallas"
assert args_dict_0["state"] == "TX"
# Verify the second tool call (index=1)
assert extracted_tool_calls.tool_calls[1].function.name == "calculate_area"
args_dict_1 = json.loads(extracted_tool_calls.tool_calls[1].function.arguments)
assert args_dict_1["shape"] == "rectangle"
assert isinstance(args_dict_1["dimensions"], dict), "dimensions should be a dict"
assert args_dict_1["dimensions"]["width"] == 10
assert args_dict_1["dimensions"]["height"] == 5
# Verify content: should contain "hello", "hi"
assert extracted_tool_calls.content is not None
assert "hello" in extracted_tool_calls.content, "Should contain 'hello' as content"
assert "hi" in extracted_tool_calls.content, "Should contain 'hi' as content"
# Verify the order: hello should come first, then hi
hello_index = extracted_tool_calls.content.find("hello")
hi_index = extracted_tool_calls.content.find("hi")
assert hello_index >= 0, "'hello' should be in content"
assert hi_index > hello_index, "'hi' should come after 'hello'"
# Verify that tool call tags are NOT in the content
assert "<function=get_current_weather>" not in extracted_tool_calls.content, (
"First tool call should not be in content"
)
assert "<function=calculate_area>" not in extracted_tool_calls.content, (
"Second tool call should not be in content"
)
def test_extract_tool_calls_streaming_full_input_mixed_content_and_multiple_tool_calls(
step3p5_tool_parser, step3p5_tokenizer, sample_tools
):
"""Test streaming with entire input as single delta_text.
Scenario: Model outputs "hello" + complete tool call + "hi" + complete tool call.
This test simulates the case where the entire input is sent as a single delta_text.
Expected: "hello" as content, first tool call parsed (index=0), "hi" as content,
second tool call parsed (index=1).
"""
# Model output: hello + complete tool call + hi + complete tool call
model_output = """hello<tool_call>
<function=get_current_weather>
<parameter=city>
Dallas
</parameter>
<parameter=state>
TX
</parameter>
</function>
</tool_call>hi<tool_call>
<function=calculate_area>
<parameter=shape>
rectangle
</parameter>
<parameter=dimensions>
{"width": 10, "height": 5}
</parameter>
</function>
</tool_call>"""
request = ChatCompletionRequest(model=MODEL, messages=[], tools=sample_tools)
other_content = ""
tool_states = {}
# Encode all content tokens at once
all_token_ids = step3p5_tokenizer.encode(model_output, add_special_tokens=False)
eos_token_id = step3p5_tokenizer.eos_token_id
# Include EOS token in delta_token_ids if available
if eos_token_id is not None:
delta_token_ids = all_token_ids + [eos_token_id]
else:
delta_token_ids = all_token_ids
# current_token_ids includes all content tokens (EOS is not part of the text)
current_token_ids = all_token_ids
previous_token_ids: list[int] = []
# Decode all tokens to get the full text
current_text = step3p5_tokenizer.decode(
current_token_ids, skip_special_tokens=False
)
previous_text = ""
delta_text = current_text
# Call parser once with all tokens including EOS
delta_result = step3p5_tool_parser.extract_tool_calls_streaming(
previous_text,
current_text,
delta_text,
previous_token_ids,
current_token_ids,
delta_token_ids,
request=request,
)
# Process delta result
if delta_result:
if delta_result.content:
other_content += delta_result.content
if delta_result.tool_calls:
for tool_call in delta_result.tool_calls:
idx = tool_call.index
if idx not in tool_states:
tool_states[idx] = {
"id": None,
"name": None,
"arguments": "",
"type": None,
}
if tool_call.id:
tool_states[idx]["id"] = tool_call.id
if tool_call.type:
tool_states[idx]["type"] = tool_call.type
if tool_call.function:
if tool_call.function.name:
tool_states[idx]["name"] = tool_call.function.name
if tool_call.function.arguments is not None:
tool_states[idx]["arguments"] += tool_call.function.arguments
# Should have exactly two complete tool calls
assert len(tool_states) == 2, "Should have exactly two complete tool calls"
# Verify the first tool call (index=0)
assert tool_states[0]["name"] == "get_current_weather"
assert tool_states[0]["arguments"]
args_dict_0 = json.loads(tool_states[0]["arguments"])
assert args_dict_0["city"] == "Dallas"
assert args_dict_0["state"] == "TX"
# Verify the second tool call (index=1)
assert tool_states[1]["name"] == "calculate_area"
assert tool_states[1]["arguments"]
args_dict_1 = json.loads(tool_states[1]["arguments"])
assert args_dict_1["shape"] == "rectangle"
assert isinstance(args_dict_1["dimensions"], dict), "dimensions should be a dict"
assert args_dict_1["dimensions"]["width"] == 10
assert args_dict_1["dimensions"]["height"] == 5
# Verify content: should contain "hello", "hi"
assert "hello" in other_content, "Should contain 'hello' as content"
assert "hi" in other_content, "Should contain 'hi' as content"
# Verify the order: hello should come first, then hi
hello_index = other_content.find("hello")
hi_index = other_content.find("hi")
assert hello_index >= 0, "'hello' should be in content"
assert hi_index > hello_index, "'hi' should come after 'hello'"
# Verify that tool call tags are NOT in the content
assert "<function=get_current_weather>" not in other_content, (
"First tool call should not be in content"
)
assert "<function=calculate_area>" not in other_content, (
"Second tool call should not be in content"
)
def test_extract_tool_calls_streaming_multiple_tool_calls_no_content_between(
step3p5_tool_parser, step3p5_tokenizer, sample_tools
):
"""Test multiple tool calls with no content between them.
Scenario: Model outputs "hello" + tool call + tool call
Expected: "hello" as content, first tool call parsed (index=0),
second tool call parsed (index=1).
No content should appear between the two tool calls.
"""
# Model output: hello + tool call + tool call (no content between tool calls)
model_output = """hello<tool_call>
<function=get_current_weather>
<parameter=city>
Dallas
</parameter>
<parameter=state>
TX
</parameter>
</function>
</tool_call><tool_call>
<function=calculate_area>
<parameter=shape>
rectangle
</parameter>
<parameter=dimensions>
{"width": 10, "height": 5}
</parameter>
</function>
</tool_call>"""
request = ChatCompletionRequest(model=MODEL, messages=[], tools=sample_tools)
other_content = ""
tool_states = {}
for delta_message in stream_delta_message_generator(
step3p5_tool_parser, step3p5_tokenizer, model_output, request
):
if delta_message.content:
other_content += delta_message.content
if delta_message.tool_calls:
for tool_call in delta_message.tool_calls:
idx = tool_call.index
if idx not in tool_states:
tool_states[idx] = {
"id": None,
"name": None,
"arguments": "",
"type": None,
}
if tool_call.id:
tool_states[idx]["id"] = tool_call.id
if tool_call.type:
assert tool_call.type == "function"
tool_states[idx]["type"] = tool_call.type
if tool_call.function:
if tool_call.function.name:
tool_states[idx]["name"] = tool_call.function.name
if tool_call.function.arguments is not None:
tool_states[idx]["arguments"] += tool_call.function.arguments
# Should have exactly two complete tool calls
assert len(tool_states) == 2, "Should have exactly two complete tool calls"
# Verify the first tool call (index=0)
assert tool_states[0]["name"] == "get_current_weather"
assert tool_states[0]["arguments"]
args_dict_0 = json.loads(tool_states[0]["arguments"])
assert args_dict_0["city"] == "Dallas"
assert args_dict_0["state"] == "TX"
# Verify the second tool call (index=1)
assert tool_states[1]["name"] == "calculate_area"
assert tool_states[1]["arguments"]
args_dict_1 = json.loads(tool_states[1]["arguments"])
assert args_dict_1["shape"] == "rectangle"
assert isinstance(args_dict_1["dimensions"], dict), "dimensions should be a dict"
assert args_dict_1["dimensions"]["width"] == 10
assert args_dict_1["dimensions"]["height"] == 5
assert "hello" in other_content, "Should contain 'hello' as content"
# Verify that tool call tags are NOT in the content
assert "<function=get_current_weather>" not in other_content, (
"First tool call should not be in content"
)
assert "<function=calculate_area>" not in other_content, (
"Second tool call should not be in content"
)
def test_extract_tool_calls_streaming_multi_token_chunk_boundary(
step3p5_tool_parser, step3p5_tokenizer, sample_tools
):
"""Ensure fallback doesn't close a new tool_call when boundary is in one chunk."""
request = ChatCompletionRequest(model=MODEL, messages=[], tools=sample_tools)
delta_text_chunks = [
"""<tool_call>
<function=get_current_weather>
<parameter=city>
Sys""",
"""
</parameter>
</function>
""",
"""</tool_call><tool_call>
<""",
"""function=calculate_area>
<parameter=shape>
rectangle""",
"""</parameter>
</function>
</tool_call>""",
]
boundary_chunk = delta_text_chunks[1]
assert len(step3p5_tokenizer.encode(boundary_chunk, add_special_tokens=False)) > 1
tool_states = {}
for delta_message in stream_delta_message_generator_from_chunks(
step3p5_tool_parser, step3p5_tokenizer, delta_text_chunks, request
):
print(delta_message)
if delta_message.tool_calls:
for tool_call in delta_message.tool_calls:
idx = tool_call.index
if idx not in tool_states:
tool_states[idx] = {
"name": None,
"arguments": "",
}
if tool_call.function:
if tool_call.function.name:
tool_states[idx]["name"] = tool_call.function.name
if tool_call.function.arguments is not None:
tool_states[idx]["arguments"] += tool_call.function.arguments
assert len(tool_states) == 2
assert all(state["name"] for state in tool_states.values())
assert tool_states[0]["name"] == "get_current_weather"
assert tool_states[1]["name"] == "calculate_area"
def test_extract_tool_calls_non_streaming_multiple_tool_calls_no_content_between(
step3p5_tool_parser, sample_tools
):
"""Test non-streaming extraction with tool calls and no content between them.
Scenario: Model outputs "hello" + tool call + tool call.
Expected: "hello" as content, first tool call parsed (index=0),
second tool call parsed (index=1).
No content should appear between the two tool calls.
"""
# Model output: hello + tool call + tool call (no content between tool calls)
model_output = """hello<tool_call>
<function=get_current_weather>
<parameter=city>
Dallas
</parameter>
<parameter=state>
TX
</parameter>
</function>
</tool_call><tool_call>
<function=calculate_area>
<parameter=shape>
rectangle
</parameter>
<parameter=dimensions>
{"width": 10, "height": 5}
</parameter>
</function>
</tool_call>"""
request = ChatCompletionRequest(model=MODEL, messages=[], tools=sample_tools)
extracted_tool_calls = step3p5_tool_parser.extract_tool_calls(
model_output, request=request
)
# Should have exactly two complete tool calls
assert extracted_tool_calls.tools_called
assert len(extracted_tool_calls.tool_calls) == 2, (
"Should have exactly two complete tool calls"
)
# Verify the first tool call (index=0)
assert extracted_tool_calls.tool_calls[0].function.name == "get_current_weather"
args_dict_0 = json.loads(extracted_tool_calls.tool_calls[0].function.arguments)
assert args_dict_0["city"] == "Dallas"
assert args_dict_0["state"] == "TX"
# Verify the second tool call (index=1)
assert extracted_tool_calls.tool_calls[1].function.name == "calculate_area"
args_dict_1 = json.loads(extracted_tool_calls.tool_calls[1].function.arguments)
assert args_dict_1["shape"] == "rectangle"
assert isinstance(args_dict_1["dimensions"], dict), "dimensions should be a dict"
assert args_dict_1["dimensions"]["width"] == 10
assert args_dict_1["dimensions"]["height"] == 5
# Verify content: should contain "hello"
assert extracted_tool_calls.content is not None
assert "hello" in extracted_tool_calls.content, "Should contain 'hello' as content"
# Verify that tool call tags are NOT in the content
assert "<function=get_current_weather>" not in extracted_tool_calls.content, (
"First tool call should not be in content"
)
assert "<function=calculate_area>" not in extracted_tool_calls.content, (
"Second tool call should not be in content"
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/tool_parsers/test_step3p5_tool_parser.py",
"license": "Apache License 2.0",
"lines": 1265,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/compile/fusions_e2e/common.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import itertools
from collections.abc import Callable, Iterable
from typing import Any, NamedTuple
import pytest
import regex as re
from vllm.platforms import current_platform
from vllm.v1.attention.backends.registry import AttentionBackendEnum
class Matches(NamedTuple):
# simple pointwise
rms_quant_fusion: int = 0
act_quant_fusion: int = 0
norm_rope_fusion: int = 0
attn_quant_fusion: int = 0
# distributed
ar_rms_fusion: int = 0
sequence_parallel: int = 0
async_tp: int = 0
class ModelFusionInfo(NamedTuple):
model_name: str
matches: Callable[[int], Matches]
"""Given number of hidden layers, produces the matches object"""
model_kwargs: dict[str, Any] = {}
hf_overrides: Callable[[int], dict] = lambda n: {"num_hidden_layers": n}
class AttentionBackendCase(NamedTuple):
backend: AttentionBackendEnum
model_kwargs: dict[str, Any] = {}
"""Additional args required for attn+quant fusion"""
is_blackwell = lambda: current_platform.is_device_capability_family(100)
"""Are we running on Blackwell, a lot of tests depend on it"""
def custom_ops_combos(*custom_ops: str) -> Iterable[str]:
"""Generate all combinations of custom ops for parametrization."""
custom_ops_lists = [[f"-{op}", f"+{op}"] for op in custom_ops]
for op_list in itertools.product(*custom_ops_lists):
yield ",".join(op_list)
# Quick inline validation
assert list(custom_ops_combos("silu_and_mul")) == ["-silu_and_mul", "+silu_and_mul"]
assert list(custom_ops_combos("quant_fp8", "rms_norm")) == [
"-quant_fp8,-rms_norm",
"-quant_fp8,+rms_norm",
"+quant_fp8,-rms_norm",
"+quant_fp8,+rms_norm",
]
def has_cuda_graph_wrapper_metadata() -> bool:
from importlib import import_module
try:
module = import_module("torch._inductor.utils")
module.CUDAGraphWrapperMetadata # noqa B018
except AttributeError:
return False
return True
INDUCTOR_GRAPH_PARTITION = [
pytest.param(
True,
marks=pytest.mark.skipif(
not has_cuda_graph_wrapper_metadata(),
reason="torch version does not support Inductor partition",
),
id="inductor_partition",
),
pytest.param(False, id="dynamo_partition"),
]
FUSION_LOG_PATTERNS: dict[str, re.Pattern] = {
"rms_quant_fusion": re.compile(r"rms_quant_fusion.py:\d+] Replaced (\d+) patterns"),
"act_quant_fusion": re.compile(r"act_quant_fusion.py:\d+] Replaced (\d+) patterns"),
"norm_rope_fusion": re.compile(
r"qk_norm_rope_fusion.py:\d+] Fused QK Norm\+RoPE on (\d+) sites"
),
"attn_quant_fusion": re.compile(
r"attn_quant_fusion.py:\d+] Fused quant onto (\d+) attention nodes"
),
"ar_rms_fusion": re.compile(
r"allreduce_rms_fusion.py:\d+] Replaced (\d+) patterns"
),
"sequence_parallel": re.compile(
r"sequence_parallelism.py:\d+] Replaced (\d+) patterns"
),
"async_tp": re.compile(r"collective_fusion.py:\d+] Replaced (\d+) patterns"),
}
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/compile/fusions_e2e/common.py",
"license": "Apache License 2.0",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/compile/fusions_e2e/models.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
from vllm.utils.flashinfer import has_flashinfer
from vllm.v1.attention.backends.registry import AttentionBackendEnum
from .common import AttentionBackendCase, Matches, ModelFusionInfo, is_blackwell
# Attn backends
FLASHINFER_ATTN = pytest.param(
AttentionBackendCase(
backend=AttentionBackendEnum.FLASHINFER,
model_kwargs=dict(kv_cache_dtype="fp8"),
),
id="FLASHINFER",
marks=pytest.mark.skipif(
not is_blackwell() or not has_flashinfer(),
reason="FI backend requires Blackwell and FlashInfer",
),
)
TRITON_ATTN = pytest.param(
AttentionBackendCase(backend=AttentionBackendEnum.TRITON_ATTN), id="TRITON_ATTN"
)
# Models
llama3_8b = ModelFusionInfo(
model_name="meta-llama/Llama-3.1-8B-Instruct",
matches=lambda n_layers: Matches(
ar_rms_fusion=n_layers * 2 + 1,
sequence_parallel=n_layers * 2 + 1,
async_tp=n_layers * 4,
),
)
llama3_8b_fp8 = ModelFusionInfo(
model_name="RedHatAI/Meta-Llama-3.1-8B-Instruct-FP8",
matches=lambda n_layers: Matches(
rms_quant_fusion=n_layers * 2,
act_quant_fusion=n_layers,
attn_quant_fusion=n_layers,
ar_rms_fusion=n_layers * 2 + 1,
sequence_parallel=n_layers * 2 + 1,
async_tp=n_layers * 4,
),
)
llama3_8b_fp4 = ModelFusionInfo(
model_name="nvidia/Llama-3.1-8B-Instruct-FP4",
matches=lambda n_layers: Matches(
rms_quant_fusion=0,
act_quant_fusion=n_layers,
attn_quant_fusion=n_layers,
ar_rms_fusion=n_layers * 2 + 1,
sequence_parallel=n_layers * 2 + 1,
async_tp=n_layers * 4,
),
)
# MoEs cannot do act+quant fusion because those ops are hidden from torch.compile.
# MoEs also only expose 1 rms+quant fusion because the quant for up_proj is hidden.
# TODO(luka): https://github.com/vllm-project/vllm/issues/31985
# Also, for MoEs, gemm+collective fusion only happens for dense GEMMs (o_proj/qkv proj)
llama4_scout_fp8 = ModelFusionInfo(
model_name="nvidia/Llama-4-Scout-17B-16E-Instruct-FP8",
hf_overrides=lambda n_layers: {"text_config": {"num_hidden_layers": n_layers}},
matches=lambda n_layers: Matches(
rms_quant_fusion=n_layers,
attn_quant_fusion=n_layers,
ar_rms_fusion=n_layers * 2,
sequence_parallel=n_layers * 2,
async_tp=n_layers * 2 - 1,
),
)
llama4_scout_fp4 = ModelFusionInfo(
model_name="nvidia/Llama-4-Scout-17B-16E-Instruct-NVFP4",
hf_overrides=lambda n_layers: {"text_config": {"num_hidden_layers": n_layers}},
matches=lambda n_layers: Matches(
rms_quant_fusion=0,
attn_quant_fusion=n_layers,
ar_rms_fusion=n_layers * 2,
sequence_parallel=n_layers * 2,
async_tp=n_layers * 2 - 1,
),
)
qwen3_a3b = ModelFusionInfo(
model_name="Qwen/Qwen3-30B-A3B",
matches=lambda n_layers: Matches(
norm_rope_fusion=n_layers,
ar_rms_fusion=n_layers * 2 + 1,
sequence_parallel=n_layers * 2 + 1,
async_tp=n_layers * 2,
),
)
qwen3_a3b_fp8 = ModelFusionInfo(
model_name="Qwen/Qwen3-30B-A3B-FP8",
matches=lambda n_layers: Matches(
rms_quant_fusion=n_layers,
norm_rope_fusion=n_layers,
attn_quant_fusion=0, # attn + group quant not supported
ar_rms_fusion=n_layers * 2 + 1,
sequence_parallel=n_layers * 2 + 1,
async_tp=n_layers * 2,
),
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/compile/fusions_e2e/models.py",
"license": "Apache License 2.0",
"lines": 98,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/compile/fusions_e2e/test_tp1_quant.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Callable
import pytest
from vllm.config import PassConfig
from vllm.utils.flashinfer import is_flashinfer_fp8_blockscale_gemm_supported
from .common import (
INDUCTOR_GRAPH_PARTITION,
AttentionBackendCase,
Matches,
custom_ops_combos,
is_blackwell,
)
from .models import (
FLASHINFER_ATTN,
TRITON_ATTN,
llama3_8b_fp4,
llama3_8b_fp8,
llama4_scout_fp4,
llama4_scout_fp8,
qwen3_a3b_fp8,
)
@pytest.mark.parametrize(
"model_name, matches_fn, model_kwargs, hf_overrides, use_deepgemm",
[
(*llama3_8b_fp8, False),
(*llama4_scout_fp8, False),
(*qwen3_a3b_fp8, False),
(*qwen3_a3b_fp8, True),
],
)
@pytest.mark.parametrize("attn_backend", [TRITON_ATTN, FLASHINFER_ATTN])
@pytest.mark.parametrize("n_layers", [6])
@pytest.mark.parametrize("custom_ops", custom_ops_combos("quant_fp8", "rms_norm"))
@pytest.mark.parametrize("inductor_graph_partition", INDUCTOR_GRAPH_PARTITION)
def test_tp1_fp8_fusions(
model_name: str,
matches_fn: Callable[[int], Matches],
model_kwargs: dict,
hf_overrides: Callable[[int], dict],
attn_backend: AttentionBackendCase,
n_layers: int,
custom_ops: str,
inductor_graph_partition: bool,
use_deepgemm: bool,
run_e2e_fusion_test,
monkeypatch,
):
if use_deepgemm and is_flashinfer_fp8_blockscale_gemm_supported():
# Flashinfer block FP8 GEMM has internal quantization, so it can't
# be fused with other ops.
pytest.skip("FlashInfer block FP8 GEMM not supported")
if use_deepgemm and is_blackwell():
# TODO(luka) DeepGEMM uses different quants, matching not supported
# - on Blackwell, uses a special quant fp8, currently not supported
pytest.skip("DeepGEMM & quant matching not currently supported")
matches = matches_fn(n_layers)
if "qwen" in model_name.lower() and "-quant_fp8" in custom_ops:
# This is why config forces +quant_fp8 by default
pytest.skip("native QuantFP8 matching not supported for group quant")
# Reduce size of model and skip weight loading time
model_kwargs["hf_overrides"] = hf_overrides(n_layers)
model_kwargs["load_format"] = "dummy"
model_kwargs["max_model_len"] = 1024
compilation_config = dict(
use_inductor_graph_partition=inductor_graph_partition,
custom_ops=custom_ops.split(","),
pass_config=PassConfig(
fuse_norm_quant=True,
fuse_act_quant=True,
fuse_attn_quant=True,
enable_qk_norm_rope_fusion=True,
),
)
matches_check = [
"rms_quant_fusion",
"act_quant_fusion",
"norm_rope_fusion",
"attn_quant_fusion",
]
run_e2e_fusion_test(
model_name,
matches,
model_kwargs,
attn_backend,
compilation_config,
matches_check,
use_deepgemm=use_deepgemm,
)
@pytest.mark.parametrize(
"model_name, matches_fn, model_kwargs, hf_overrides",
[llama3_8b_fp4, llama4_scout_fp4],
)
@pytest.mark.parametrize("attn_backend", [FLASHINFER_ATTN])
@pytest.mark.parametrize("n_layers", [6])
@pytest.mark.parametrize("custom_ops", custom_ops_combos("rms_norm"))
@pytest.mark.parametrize("inductor_graph_partition", INDUCTOR_GRAPH_PARTITION)
@pytest.mark.skipif(not is_blackwell(), reason="Blackwell required for fp4")
def test_tp1_fp4_fusions(
model_name: str,
matches_fn: Callable[[int], Matches],
model_kwargs: dict,
hf_overrides: Callable[[int], dict],
attn_backend: AttentionBackendCase,
n_layers: int,
custom_ops: str,
inductor_graph_partition: bool,
run_e2e_fusion_test,
):
matches = matches_fn(n_layers)
# Reduce size of model and skip weight loading time
model_kwargs["hf_overrides"] = hf_overrides(n_layers)
model_kwargs["load_format"] = "dummy"
model_kwargs["max_model_len"] = 1024
compilation_config = dict(
use_inductor_graph_partition=inductor_graph_partition,
custom_ops=custom_ops.split(","),
pass_config=PassConfig(
fuse_norm_quant=True,
fuse_act_quant=True,
fuse_attn_quant=True,
enable_qk_norm_rope_fusion=True,
),
)
matches_check = ["act_quant_fusion", "attn_quant_fusion", "norm_rope_fusion"]
run_e2e_fusion_test(
model_name,
matches,
model_kwargs,
attn_backend,
compilation_config,
matches_check,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/compile/fusions_e2e/test_tp1_quant.py",
"license": "Apache License 2.0",
"lines": 133,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/compile/fusions_e2e/test_tp2_ar_rms.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Callable
import pytest
from vllm.config import PassConfig
from ...utils import multi_gpu_test
from .common import (
INDUCTOR_GRAPH_PARTITION,
AttentionBackendCase,
Matches,
custom_ops_combos,
is_blackwell,
)
from .models import (
FLASHINFER_ATTN,
TRITON_ATTN,
llama3_8b,
llama3_8b_fp4,
llama3_8b_fp8,
llama4_scout_fp4,
llama4_scout_fp8,
qwen3_a3b,
qwen3_a3b_fp8,
)
@multi_gpu_test(num_gpus=2)
@pytest.mark.parametrize(
"model_name, matches_fn, model_kwargs, hf_overrides",
# qwen3-fp8 should still fuse AR+rms even though group quant is not yet supported
[llama3_8b_fp8, llama4_scout_fp8, qwen3_a3b_fp8],
)
@pytest.mark.parametrize("attn_backend", [TRITON_ATTN, FLASHINFER_ATTN])
@pytest.mark.parametrize("n_layers", [4])
@pytest.mark.parametrize("custom_ops", custom_ops_combos("quant_fp8", "rms_norm"))
@pytest.mark.parametrize("inductor_graph_partition", INDUCTOR_GRAPH_PARTITION)
def test_tp2_ar_rms_fp8_fusions(
model_name: str,
matches_fn: Callable[[int], Matches],
model_kwargs: dict,
hf_overrides: Callable[[int], dict],
attn_backend: AttentionBackendCase,
n_layers: int,
custom_ops: str,
inductor_graph_partition: bool,
run_e2e_fusion_test,
monkeypatch,
):
matches = matches_fn(n_layers)
if "qwen" in model_name.lower() and "-quant_fp8" in custom_ops:
# This is why config forces +quant_fp8 by default
pytest.skip("native QuantFP8 matching not supported for group quant")
# Reduce size of model and skip weight loading time
model_kwargs["hf_overrides"] = hf_overrides(n_layers)
model_kwargs["load_format"] = "dummy"
model_kwargs["max_model_len"] = 1024
compilation_config = dict(
use_inductor_graph_partition=inductor_graph_partition,
custom_ops=custom_ops.split(","),
pass_config=PassConfig(
fuse_norm_quant=True,
fuse_act_quant=True,
fuse_attn_quant=True,
enable_qk_norm_rope_fusion=True,
fuse_allreduce_rms=True,
),
)
matches_check = [
"rms_quant_fusion",
"act_quant_fusion",
"norm_rope_fusion",
"attn_quant_fusion",
"ar_rms_fusion",
]
run_e2e_fusion_test(
model_name,
matches,
model_kwargs,
attn_backend,
compilation_config,
matches_check,
tp_size=2,
)
@multi_gpu_test(num_gpus=2)
@pytest.mark.parametrize(
"model_name, matches_fn, model_kwargs, hf_overrides",
[llama3_8b_fp4, llama4_scout_fp4],
)
@pytest.mark.parametrize("attn_backend", [FLASHINFER_ATTN])
@pytest.mark.parametrize("n_layers", [4])
@pytest.mark.parametrize("custom_ops", custom_ops_combos("rms_norm"))
@pytest.mark.parametrize("inductor_graph_partition", INDUCTOR_GRAPH_PARTITION)
@pytest.mark.skipif(not is_blackwell(), reason="Blackwell required for fp4")
def test_tp2_ar_rms_fp4_fusions(
model_name: str,
matches_fn: Callable[[int], Matches],
model_kwargs: dict,
hf_overrides: Callable[[int], dict],
attn_backend: AttentionBackendCase,
n_layers: int,
custom_ops: str,
inductor_graph_partition: bool,
run_e2e_fusion_test,
monkeypatch,
):
matches = matches_fn(n_layers)
# Reduce size of model and skip weight loading time
model_kwargs["hf_overrides"] = hf_overrides(n_layers)
model_kwargs["load_format"] = "dummy"
model_kwargs["max_model_len"] = 1024
compilation_config = dict(
use_inductor_graph_partition=inductor_graph_partition,
custom_ops=custom_ops.split(","),
pass_config=PassConfig(
fuse_act_quant=True,
fuse_attn_quant=True,
fuse_allreduce_rms=True,
),
)
matches_check = [
"act_quant_fusion",
"attn_quant_fusion",
"ar_rms_fusion",
]
run_e2e_fusion_test(
model_name,
matches,
model_kwargs,
attn_backend,
compilation_config,
matches_check,
tp_size=2,
)
@multi_gpu_test(num_gpus=2)
@pytest.mark.parametrize(
"model_name, matches_fn, model_kwargs, hf_overrides",
[llama3_8b, qwen3_a3b],
)
@pytest.mark.parametrize("attn_backend", [TRITON_ATTN])
@pytest.mark.parametrize("n_layers", [4])
@pytest.mark.parametrize("custom_ops", custom_ops_combos("rms_norm"))
@pytest.mark.parametrize("inductor_graph_partition", INDUCTOR_GRAPH_PARTITION)
def test_tp2_ar_rms_fusions(
model_name: str,
matches_fn: Callable[[int], Matches],
model_kwargs: dict,
hf_overrides: Callable[[int], dict],
attn_backend: AttentionBackendCase,
n_layers: int,
custom_ops: str,
inductor_graph_partition: bool,
run_e2e_fusion_test,
):
matches = matches_fn(n_layers)
# Reduce size of model and skip weight loading time
model_kwargs["hf_overrides"] = hf_overrides(n_layers)
model_kwargs["load_format"] = "dummy"
model_kwargs["max_model_len"] = 1024
compilation_config = dict(
use_inductor_graph_partition=inductor_graph_partition,
custom_ops=custom_ops.split(","),
pass_config=PassConfig(
enable_qk_norm_rope_fusion=True,
fuse_allreduce_rms=True,
),
)
matches_check = [
"norm_rope_fusion",
"ar_rms_fusion",
]
run_e2e_fusion_test(
model_name,
matches,
model_kwargs,
attn_backend,
compilation_config,
matches_check,
tp_size=2,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/compile/fusions_e2e/test_tp2_ar_rms.py",
"license": "Apache License 2.0",
"lines": 177,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/compile/fusions_e2e/test_tp2_async_tp.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Callable
import pytest
from vllm.config import PassConfig
from ...utils import multi_gpu_test
from .common import (
INDUCTOR_GRAPH_PARTITION,
AttentionBackendCase,
Matches,
custom_ops_combos,
is_blackwell,
)
from .models import (
FLASHINFER_ATTN,
TRITON_ATTN,
llama3_8b,
llama3_8b_fp8,
llama4_scout_fp8,
qwen3_a3b,
)
@multi_gpu_test(num_gpus=2)
@pytest.mark.parametrize(
"model_name, matches_fn, model_kwargs, hf_overrides",
[llama3_8b_fp8, llama4_scout_fp8],
)
@pytest.mark.parametrize("attn_backend", [TRITON_ATTN, FLASHINFER_ATTN])
@pytest.mark.parametrize("n_layers", [4])
@pytest.mark.parametrize("custom_ops", custom_ops_combos("quant_fp8", "rms_norm"))
@pytest.mark.parametrize("inductor_graph_partition", INDUCTOR_GRAPH_PARTITION)
def test_tp2_async_tp_fp8_fusions(
model_name: str,
matches_fn: Callable[[int], Matches],
model_kwargs: dict,
hf_overrides: Callable[[int], dict],
attn_backend: AttentionBackendCase,
n_layers: int,
custom_ops: str,
inductor_graph_partition: bool,
run_e2e_fusion_test,
monkeypatch,
):
matches = matches_fn(n_layers)
if is_blackwell():
# Disable FlashInfer scaled_mm FP8 as it's not supported in async tp patterns
monkeypatch.setenv("VLLM_DISABLED_KERNELS", "FlashInferFP8ScaledMMLinearKernel")
# Reduce size of model and skip weight loading time
model_kwargs["hf_overrides"] = hf_overrides(n_layers)
model_kwargs["load_format"] = "dummy"
model_kwargs["max_model_len"] = 1024
compilation_config = dict(
use_inductor_graph_partition=inductor_graph_partition,
custom_ops=custom_ops.split(","),
pass_config=PassConfig(
fuse_norm_quant=True,
fuse_act_quant=True,
fuse_attn_quant=True,
enable_qk_norm_rope_fusion=True,
enable_sp=True,
fuse_gemm_comms=True,
fuse_allreduce_rms=False,
# Override threshold for testing (models have small hidden_size)
sp_min_token_num=512,
),
)
matches_check = [
"rms_quant_fusion",
"act_quant_fusion",
"norm_rope_fusion",
"attn_quant_fusion",
"sequence_parallel",
"async_tp",
]
run_e2e_fusion_test(
model_name,
matches,
model_kwargs,
attn_backend,
compilation_config,
matches_check,
tp_size=2,
)
@multi_gpu_test(num_gpus=2)
@pytest.mark.parametrize(
"model_name, matches_fn, model_kwargs, hf_overrides",
[llama3_8b, qwen3_a3b],
)
@pytest.mark.parametrize("attn_backend", [TRITON_ATTN])
@pytest.mark.parametrize("n_layers", [4])
@pytest.mark.parametrize("custom_ops", custom_ops_combos("rms_norm"))
@pytest.mark.parametrize("inductor_graph_partition", INDUCTOR_GRAPH_PARTITION)
def test_tp2_async_tp_fusions(
model_name: str,
matches_fn: Callable[[int], Matches],
model_kwargs: dict,
hf_overrides: Callable[[int], dict],
attn_backend: AttentionBackendCase,
n_layers: int,
custom_ops: str,
inductor_graph_partition: bool,
run_e2e_fusion_test,
):
matches = matches_fn(n_layers)
# Reduce size of model and skip weight loading time
model_kwargs["hf_overrides"] = hf_overrides(n_layers)
model_kwargs["load_format"] = "dummy"
model_kwargs["max_model_len"] = 1024
compilation_config = dict(
use_inductor_graph_partition=inductor_graph_partition,
custom_ops=custom_ops.split(","),
pass_config=PassConfig(
enable_qk_norm_rope_fusion=True,
enable_sp=True,
fuse_gemm_comms=True,
fuse_allreduce_rms=False,
# Override threshold for testing (models have small hidden_size)
sp_min_token_num=512,
),
)
matches_check = [
"norm_rope_fusion",
"sequence_parallel",
"async_tp",
]
run_e2e_fusion_test(
model_name,
matches,
model_kwargs,
attn_backend,
compilation_config,
matches_check,
tp_size=2,
)
@multi_gpu_test(num_gpus=2)
@pytest.mark.parametrize(
"model_name, matches_fn, model_kwargs, hf_overrides",
[llama3_8b_fp8, llama4_scout_fp8],
)
@pytest.mark.parametrize("attn_backend", [TRITON_ATTN, FLASHINFER_ATTN])
@pytest.mark.parametrize("n_layers", [4])
@pytest.mark.parametrize("custom_ops", custom_ops_combos("quant_fp8", "rms_norm"))
@pytest.mark.parametrize("inductor_graph_partition", INDUCTOR_GRAPH_PARTITION)
def test_tp2_sp_ar_rms_fp8_fusions(
model_name: str,
matches_fn: Callable[[int], Matches],
model_kwargs: dict,
hf_overrides: Callable[[int], dict],
attn_backend: AttentionBackendCase,
n_layers: int,
custom_ops: str,
inductor_graph_partition: bool,
run_e2e_fusion_test,
monkeypatch,
):
matches = matches_fn(n_layers)
if is_blackwell():
# Disable FlashInfer scaled_mm FP8 as it's not supported in async tp patterns
monkeypatch.setenv("VLLM_DISABLED_KERNELS", "FlashInferFP8ScaledMMLinearKernel")
# Reduce size of model and skip weight loading time
model_kwargs["hf_overrides"] = hf_overrides(n_layers)
model_kwargs["load_format"] = "dummy"
model_kwargs["max_model_len"] = 1024
compilation_config = dict(
use_inductor_graph_partition=inductor_graph_partition,
custom_ops=custom_ops.split(","),
pass_config=PassConfig(
fuse_norm_quant=True,
fuse_act_quant=True,
fuse_attn_quant=True,
enable_qk_norm_rope_fusion=True,
enable_sp=True,
fuse_gemm_comms=True,
fuse_allreduce_rms=True,
# Override threshold for testing (models have small hidden_size)
sp_min_token_num=512,
),
)
matches_check = [
"rms_quant_fusion",
"act_quant_fusion",
"norm_rope_fusion",
"attn_quant_fusion",
"ar_rms_fusion",
"sequence_parallel",
"async_tp",
]
run_e2e_fusion_test(
model_name,
matches,
model_kwargs,
attn_backend,
compilation_config,
matches_check,
tp_size=2,
)
@multi_gpu_test(num_gpus=2)
@pytest.mark.parametrize(
"model_name, matches_fn, model_kwargs, hf_overrides",
[llama3_8b, qwen3_a3b],
)
@pytest.mark.parametrize("attn_backend", [TRITON_ATTN])
@pytest.mark.parametrize("n_layers", [4])
@pytest.mark.parametrize("custom_ops", custom_ops_combos("rms_norm"))
@pytest.mark.parametrize("inductor_graph_partition", INDUCTOR_GRAPH_PARTITION)
def test_tp2_sp_ar_rms_fusions(
model_name: str,
matches_fn: Callable[[int], Matches],
model_kwargs: dict,
hf_overrides: Callable[[int], dict],
attn_backend: AttentionBackendCase,
n_layers: int,
custom_ops: str,
inductor_graph_partition: bool,
run_e2e_fusion_test,
):
matches = matches_fn(n_layers)
# Reduce size of model and skip weight loading time
model_kwargs["hf_overrides"] = hf_overrides(n_layers)
model_kwargs["load_format"] = "dummy"
model_kwargs["max_model_len"] = 1024
compilation_config = dict(
use_inductor_graph_partition=inductor_graph_partition,
custom_ops=custom_ops.split(","),
pass_config=PassConfig(
enable_qk_norm_rope_fusion=True,
enable_sp=True,
fuse_gemm_comms=True,
fuse_allreduce_rms=True,
# Override threshold for testing (models have small hidden_size)
sp_min_token_num=512,
),
)
matches_check = [
"norm_rope_fusion",
"ar_rms_fusion",
"sequence_parallel",
"async_tp",
]
run_e2e_fusion_test(
model_name,
matches,
model_kwargs,
attn_backend,
compilation_config,
matches_check,
tp_size=2,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/compile/fusions_e2e/test_tp2_async_tp.py",
"license": "Apache License 2.0",
"lines": 247,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:examples/pooling/score/colbert_rerank_online.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Example of using ColBERT late interaction models for reranking and scoring.
ColBERT (Contextualized Late Interaction over BERT) uses per-token embeddings
and MaxSim scoring for document reranking, providing better accuracy than
single-vector models while being more efficient than cross-encoders.
vLLM supports ColBERT with multiple encoder backbones. Start the server
with one of the following:
# BERT backbone (works out of the box)
vllm serve answerdotai/answerai-colbert-small-v1
# ModernBERT backbone
vllm serve lightonai/GTE-ModernColBERT-v1 \
--hf-overrides '{"architectures": ["ColBERTModernBertModel"]}'
# Jina XLM-RoBERTa backbone
vllm serve jinaai/jina-colbert-v2 \
--hf-overrides '{"architectures": ["ColBERTJinaRobertaModel"]}' \
--trust-remote-code
Then run this script:
python colbert_rerank_online.py
"""
import json
import requests
# Change this to match the model you started the server with
MODEL = "answerdotai/answerai-colbert-small-v1"
BASE_URL = "http://127.0.0.1:8000"
headers = {"accept": "application/json", "Content-Type": "application/json"}
documents = [
"Machine learning is a subset of artificial intelligence.",
"Python is a programming language.",
"Deep learning uses neural networks for complex tasks.",
"The weather today is sunny.",
]
def rerank_example():
"""Use the /rerank endpoint to rank documents by query relevance."""
print("=== Rerank Example ===")
data = {
"model": MODEL,
"query": "What is machine learning?",
"documents": documents,
}
response = requests.post(f"{BASE_URL}/rerank", headers=headers, json=data)
result = response.json()
print(json.dumps(result, indent=2))
print("\nRanked documents (most relevant first):")
for item in result["results"]:
doc_idx = item["index"]
score = item["relevance_score"]
print(f" Score {score:.4f}: {documents[doc_idx]}")
def score_example():
"""Use the /score endpoint for pairwise query-document scoring."""
print("\n=== Score Example ===")
data = {
"model": MODEL,
"text_1": "What is machine learning?",
"text_2": [
"Machine learning is a subset of AI.",
"The weather is sunny.",
],
}
response = requests.post(f"{BASE_URL}/score", headers=headers, json=data)
result = response.json()
print(json.dumps(result, indent=2))
def main():
rerank_example()
score_example()
if __name__ == "__main__":
main()
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/pooling/score/colbert_rerank_online.py",
"license": "Apache License 2.0",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/entrypoints/pooling/score/test_online_colbert.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Online API tests for ColBERT late interaction scoring."""
import pytest
import requests
from tests.utils import RemoteOpenAIServer
from vllm.entrypoints.pooling.score.protocol import RerankResponse, ScoreResponse
MODEL_NAME = "answerdotai/answerai-colbert-small-v1"
COLBERT_DIM = 96
MAX_MODEL_LEN = 512
@pytest.fixture(scope="module")
def server():
args = [
"--max-model-len",
str(MAX_MODEL_LEN),
]
with RemoteOpenAIServer(MODEL_NAME, args) as remote_server:
yield remote_server
class TestColBERTOnline:
def test_rerank(self, server: RemoteOpenAIServer):
"""Test ColBERT rerank endpoint."""
query = "What is the capital of France?"
documents = [
"The capital of Brazil is Brasilia.",
"The capital of France is Paris.",
]
rerank_response = requests.post(
server.url_for("rerank"),
json={
"model": MODEL_NAME,
"query": query,
"documents": documents,
},
)
rerank_response.raise_for_status()
rerank = RerankResponse.model_validate(rerank_response.json())
assert rerank.id is not None
assert rerank.results is not None
assert len(rerank.results) == 2
paris_result = next(r for r in rerank.results if r.index == 1)
brazil_result = next(r for r in rerank.results if r.index == 0)
assert paris_result.relevance_score > brazil_result.relevance_score
def test_rerank_top_n(self, server: RemoteOpenAIServer):
"""Test ColBERT rerank with top_n parameter."""
query = "What is the capital of France?"
documents = [
"The capital of Brazil is Brasilia.",
"The capital of France is Paris.",
"Machine learning is a field of AI.",
]
rerank_response = requests.post(
server.url_for("rerank"),
json={
"model": MODEL_NAME,
"query": query,
"documents": documents,
"top_n": 2,
},
)
rerank_response.raise_for_status()
rerank = RerankResponse.model_validate(rerank_response.json())
assert len(rerank.results) == 2
assert rerank.results[0].index == 1
def test_score(self, server: RemoteOpenAIServer):
"""Test ColBERT score endpoint."""
text_1 = "What is the capital of France?"
text_2 = ["The capital of France is Paris.", "Python is a language."]
score_response = requests.post(
server.url_for("score"),
json={
"model": MODEL_NAME,
"text_1": text_1,
"text_2": text_2,
},
)
score_response.raise_for_status()
score = ScoreResponse.model_validate(score_response.json())
assert score.id is not None
assert score.data is not None
assert len(score.data) == 2
assert score.data[0].score > score.data[1].score
def test_token_embed(self, server: RemoteOpenAIServer):
"""Test ColBERT token_embed task via pooling endpoint."""
text = "What is the capital of France?"
pooling_response = requests.post(
server.url_for("pooling"),
json={
"model": MODEL_NAME,
"input": text,
"task": "token_embed",
},
)
pooling_response.raise_for_status()
pooling = pooling_response.json()
assert "data" in pooling
assert len(pooling["data"]) == 1
embeddings = pooling["data"][0]["data"]
assert isinstance(embeddings, list)
assert len(embeddings) > 0
assert len(embeddings[0]) == COLBERT_DIM
def test_embed_not_supported(self, server: RemoteOpenAIServer):
"""Test that ColBERT model does not support 'embed' task."""
task = "embed"
text = "What is the capital of France?"
response = requests.post(
server.url_for("pooling"),
json={
"model": MODEL_NAME,
"input": text,
"task": task,
},
)
assert response.json()["error"]["type"] == "BadRequestError"
assert response.json()["error"]["message"].startswith(
f"Unsupported task: {task!r}"
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/entrypoints/pooling/score/test_online_colbert.py",
"license": "Apache License 2.0",
"lines": 116,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/models/language/pooling/test_colbert.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Tests for ColBERT late interaction scoring.
Tests are parametrized across multiple ColBERT backbones to ensure the
generic ColBERT support works with different encoder architectures.
"""
import pytest
import torch
from vllm.entrypoints.pooling.score.utils import compute_maxsim_score
# -----------------------------------------------------------------------
# Model definitions: (model_name, colbert_dim, extra vllm_runner kwargs)
# -----------------------------------------------------------------------
COLBERT_MODELS = {
"bert": {
"model": "answerdotai/answerai-colbert-small-v1",
"colbert_dim": 96,
"max_model_len": 512,
"extra_kwargs": {},
"hf_comparison": {
"weights_file": "model.safetensors",
"weights_key": "linear.weight",
"trust_remote_code": False,
"model_cls": "BertModel",
},
},
"modernbert": {
"model": "lightonai/GTE-ModernColBERT-v1",
"colbert_dim": 128,
"max_model_len": 299,
"extra_kwargs": {
"hf_overrides": {
"architectures": ["ColBERTModernBertModel"],
},
},
"hf_comparison": {
"weights_file": "1_Dense/model.safetensors",
"weights_key": "linear.weight",
"trust_remote_code": False,
"model_cls": "AutoModel",
},
},
"jina": {
"model": "jinaai/jina-colbert-v2",
"colbert_dim": 128,
"max_model_len": 8192,
"extra_kwargs": {
"hf_overrides": {
"architectures": ["ColBERTJinaRobertaModel"],
},
},
"hf_comparison": {
"weights_file": "model.safetensors",
"weights_key": "linear.weight",
"trust_remote_code": True,
"model_cls": "AutoModel",
},
},
}
TEXTS_1 = [
"What is the capital of France?",
"What is the capital of Germany?",
]
TEXTS_2 = [
"The capital of France is Paris.",
"The capital of Germany is Berlin.",
]
DTYPE = "half"
def _load_hf_model(model_name: str, hf_spec: dict, device: torch.device):
"""Load HF model on the given device with a compatible attention impl."""
from transformers import AutoModel, BertModel
cls = BertModel if hf_spec["model_cls"] == "BertModel" else AutoModel
trust = hf_spec.get("trust_remote_code", False)
# Flash / Triton kernels require GPU tensors; fall back to eager on CPU.
extra = {}
if device.type == "cpu":
extra["attn_implementation"] = "eager"
model = cls.from_pretrained(
model_name,
trust_remote_code=trust,
**extra,
).to(device)
model.eval()
return model
def _load_projection_weight(model_name: str, hf_spec: dict, device: torch.device):
"""Download and return the ColBERT linear projection weight."""
from huggingface_hub import hf_hub_download
from safetensors.torch import load_file
path = hf_hub_download(model_name, filename=hf_spec["weights_file"])
weights = load_file(path)
return weights[hf_spec["weights_key"]].to(device)
def _compute_hf_colbert_embeddings(model, tokenizer, linear_weight, texts, device):
"""Run HF model + projection and return L2-normalised token embeddings."""
import torch.nn.functional as F
embeddings = []
for text in texts:
inputs = tokenizer(text, return_tensors="pt").to(device)
with torch.no_grad():
hidden = model(**inputs).last_hidden_state.float()
projected = F.linear(hidden, linear_weight.float())
normalised = F.normalize(projected, p=2, dim=-1)
embeddings.append(normalised.squeeze(0).cpu())
return embeddings
def _assert_embeddings_close(vllm_outputs, hf_embeddings):
"""Assert that vLLM and HuggingFace embeddings match."""
for i, (hf_emb, vllm_out) in enumerate(zip(hf_embeddings, vllm_outputs)):
vllm_emb = torch.as_tensor(vllm_out).float()
assert hf_emb.shape == vllm_emb.shape, (
f"Shape mismatch for text {i}: HF {hf_emb.shape} vs vLLM {vllm_emb.shape}"
)
torch.testing.assert_close(
vllm_emb,
hf_emb,
rtol=1e-2,
atol=1e-2,
msg=f"Embedding mismatch for text {i}",
)
@pytest.fixture(params=list(COLBERT_MODELS.keys()), scope="module")
def colbert_spec(request):
"""Return the model spec dict for the current parametrization."""
return COLBERT_MODELS[request.param]
@pytest.fixture(scope="module")
def colbert_model_name(colbert_spec):
return colbert_spec["model"]
@pytest.fixture(scope="module")
def colbert_dim(colbert_spec):
return colbert_spec["colbert_dim"]
@pytest.fixture(scope="module")
def colbert_max_model_len(colbert_spec):
return colbert_spec["max_model_len"]
@pytest.fixture(scope="module")
def colbert_extra_kwargs(colbert_spec):
return colbert_spec["extra_kwargs"]
def test_colbert_token_embed(
vllm_runner,
colbert_model_name,
colbert_dim,
colbert_max_model_len,
colbert_extra_kwargs,
):
"""Test that ColBERT model produces token embeddings."""
with vllm_runner(
colbert_model_name,
runner="pooling",
dtype=DTYPE,
max_model_len=colbert_max_model_len,
enforce_eager=True,
**colbert_extra_kwargs,
) as vllm_model:
outputs = vllm_model.token_embed([TEXTS_1[0]])
assert len(outputs) == 1
emb = torch.as_tensor(outputs[0])
assert emb.dim() == 2
assert emb.shape[1] == colbert_dim
assert emb.shape[0] > 1
def test_colbert_late_interaction_1_to_1(
vllm_runner,
colbert_model_name,
colbert_max_model_len,
colbert_extra_kwargs,
):
"""Test ColBERT late interaction scoring with 1:1 query-document pair."""
with vllm_runner(
colbert_model_name,
runner="pooling",
dtype=DTYPE,
max_model_len=colbert_max_model_len,
enforce_eager=True,
**colbert_extra_kwargs,
) as vllm_model:
q_outputs = vllm_model.token_embed([TEXTS_1[0]])
d_outputs = vllm_model.token_embed([TEXTS_2[0]])
q_emb = torch.as_tensor(q_outputs[0])
d_emb = torch.as_tensor(d_outputs[0])
manual_score = compute_maxsim_score(q_emb, d_emb).item()
vllm_scores = vllm_model.score(TEXTS_1[0], TEXTS_2[0])
assert len(vllm_scores) == 1
assert vllm_scores[0] == pytest.approx(manual_score, rel=0.01)
def test_colbert_late_interaction_1_to_N(
vllm_runner,
colbert_model_name,
colbert_max_model_len,
colbert_extra_kwargs,
):
"""Test ColBERT late interaction scoring with 1:N query-documents."""
with vllm_runner(
colbert_model_name,
runner="pooling",
dtype=DTYPE,
max_model_len=colbert_max_model_len,
enforce_eager=True,
**colbert_extra_kwargs,
) as vllm_model:
q_outputs = vllm_model.token_embed([TEXTS_1[0]])
d_outputs = vllm_model.token_embed(TEXTS_2)
q_emb = torch.as_tensor(q_outputs[0])
manual_scores = []
for d_out in d_outputs:
d_emb = torch.as_tensor(d_out)
manual_scores.append(compute_maxsim_score(q_emb, d_emb).item())
vllm_scores = vllm_model.score(TEXTS_1[0], TEXTS_2)
assert len(vllm_scores) == 2
for i in range(2):
assert vllm_scores[i] == pytest.approx(manual_scores[i], rel=0.01)
def test_colbert_late_interaction_N_to_N(
vllm_runner,
colbert_model_name,
colbert_max_model_len,
colbert_extra_kwargs,
):
"""Test ColBERT late interaction scoring with N:N query-documents."""
with vllm_runner(
colbert_model_name,
runner="pooling",
dtype=DTYPE,
max_model_len=colbert_max_model_len,
enforce_eager=True,
**colbert_extra_kwargs,
) as vllm_model:
q_outputs = vllm_model.token_embed(TEXTS_1)
d_outputs = vllm_model.token_embed(TEXTS_2)
manual_scores = []
for q_out, d_out in zip(q_outputs, d_outputs):
q_emb = torch.as_tensor(q_out)
d_emb = torch.as_tensor(d_out)
manual_scores.append(compute_maxsim_score(q_emb, d_emb).item())
vllm_scores = vllm_model.score(TEXTS_1, TEXTS_2)
assert len(vllm_scores) == 2
for i in range(2):
assert vllm_scores[i] == pytest.approx(manual_scores[i], rel=0.01)
def test_colbert_relevance_ordering(
vllm_runner,
colbert_model_name,
colbert_max_model_len,
colbert_extra_kwargs,
):
"""Test that ColBERT scores relevant documents higher than irrelevant."""
query = "What is machine learning?"
documents = [
"Machine learning is a subset of artificial intelligence.",
"Python is a programming language.",
"Deep learning uses neural networks.",
]
with vllm_runner(
colbert_model_name,
runner="pooling",
dtype=DTYPE,
max_model_len=colbert_max_model_len,
enforce_eager=True,
**colbert_extra_kwargs,
) as vllm_model:
scores = vllm_model.score(query, documents)
assert len(scores) == 3
assert scores[0] > scores[1], "ML doc should score higher than Python doc"
assert scores[2] > scores[1], "DL doc should score higher than Python doc"
def test_colbert_embed_not_supported(
vllm_runner,
colbert_model_name,
colbert_max_model_len,
colbert_extra_kwargs,
):
"""Test that ColBERT model does not support 'embed' task."""
with (
vllm_runner(
colbert_model_name,
runner="pooling",
dtype=DTYPE,
max_model_len=colbert_max_model_len,
enforce_eager=True,
**colbert_extra_kwargs,
) as vllm_model,
pytest.raises(ValueError, match="Embedding API is not supported"),
):
vllm_model.embed([TEXTS_1[0]])
@pytest.mark.parametrize("backend", list(COLBERT_MODELS.keys()))
def test_colbert_hf_comparison(vllm_runner, backend):
"""Test that vLLM ColBERT embeddings match HuggingFace for each backend."""
from transformers import AutoTokenizer
spec = COLBERT_MODELS[backend]
hf_spec = spec["hf_comparison"]
model_name = spec["model"]
assert isinstance(model_name, str)
assert isinstance(hf_spec, dict)
test_texts = [TEXTS_1[0], TEXTS_2[0]]
with vllm_runner(
model_name,
runner="pooling",
dtype="float32",
max_model_len=spec["max_model_len"],
enforce_eager=True,
**spec["extra_kwargs"],
) as vllm_model:
vllm_outputs = vllm_model.token_embed(test_texts)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
hf_tokenizer = AutoTokenizer.from_pretrained(
model_name,
trust_remote_code=hf_spec.get("trust_remote_code", False),
)
hf_model = _load_hf_model(model_name, hf_spec, device)
linear_weight = _load_projection_weight(model_name, hf_spec, device)
hf_embeddings = _compute_hf_colbert_embeddings(
hf_model,
hf_tokenizer,
linear_weight,
test_texts,
device,
)
_assert_embeddings_close(vllm_outputs, hf_embeddings)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/models/language/pooling/test_colbert.py",
"license": "Apache License 2.0",
"lines": 307,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/compile/test_rotary_embedding_compile.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
import torch
import vllm.envs as envs
from vllm.compilation.decorators import support_torch_compile
from vllm.config import (
CompilationConfig,
ModelConfig,
VllmConfig,
set_current_vllm_config,
)
from vllm.config.compilation import CompilationMode, CUDAGraphMode
from vllm.model_executor.layers.rotary_embedding import get_rope
from vllm.platforms import current_platform
@support_torch_compile
class RotaryEmbeddingCompileModule(torch.nn.Module):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None:
super().__init__()
self.rotary_emb = get_rope(
head_size=32,
max_position=128,
dtype=torch.float32,
rope_parameters={"rope_type": "default", "rope_theta": 10000},
is_neox_style=True,
)
def forward(
self, positions: torch.Tensor, query: torch.Tensor, key: torch.Tensor
) -> torch.Tensor:
q_rot, k_rot = self.rotary_emb(positions, query, key)
return q_rot + k_rot
@pytest.mark.skipif(current_platform.is_cpu(), reason="Requires GPU for torch.compile")
def test_rotary_embedding_torch_compile_with_custom_op(monkeypatch):
# Ensure env toggles take effect for this test only.
# The bytecode hook is required to detect buffer mutation in compiled code,
# and AOT compile bypasses that hook entirely.
envs.disable_envs_cache()
monkeypatch.setenv("VLLM_USE_BYTECODE_HOOK", "1")
monkeypatch.setenv("VLLM_USE_AOT_COMPILE", "0")
device = "cuda"
positions = torch.arange(16, device=device)
query = torch.randn(16, 32, device=device, dtype=torch.bfloat16)
key = torch.randn(16, 32, device=device, dtype=torch.bfloat16)
vllm_config = VllmConfig(
model_config=ModelConfig(dtype=torch.bfloat16),
compilation_config=CompilationConfig(
mode=CompilationMode.VLLM_COMPILE,
backend="inductor",
custom_ops=["+rotary_embedding"],
cudagraph_mode=CUDAGraphMode.NONE,
cudagraph_num_of_warmups=0,
),
)
with set_current_vllm_config(vllm_config):
model = RotaryEmbeddingCompileModule(vllm_config=vllm_config)
model(positions, query, key)
assert model._compiled_bytecode is not None
assert "update" not in model._compiled_bytecode.co_names
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/compile/test_rotary_embedding_compile.py",
"license": "Apache License 2.0",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_executor/layers/fused_moe/xpu_fused_moe.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import torch
import vllm.model_executor.layers.fused_moe.modular_kernel as mk
from vllm.model_executor.layers.fused_moe.activation import MoEActivation
from vllm.model_executor.layers.fused_moe.config import (
FusedMoEConfig,
FusedMoEParallelConfig,
FusedMoEQuantConfig,
)
from vllm.model_executor.layers.fused_moe.topk_weight_and_reduce import (
TopKWeightAndReduceNoOP,
)
from vllm.model_executor.layers.quantization.utils.quant_utils import (
QuantKey,
kFp8DynamicTensorSym,
kFp8StaticTensorSym,
)
from vllm.platforms import current_platform
if current_platform.is_xpu():
from vllm_xpu_kernels.fused_moe_interface import xpu_fused_moe
class XPUExperts(mk.FusedMoEPermuteExpertsUnpermute):
def __init__(
self,
moe_config: FusedMoEConfig,
quant_config: FusedMoEQuantConfig,
max_num_tokens: int | None = None,
num_dispatchers: int | None = None,
):
super().__init__(
moe_config,
quant_config,
max_num_tokens,
num_dispatchers,
)
self.is_fp8 = False
@property
def expects_unquantized_inputs(self) -> bool:
return True
@staticmethod
def activation_format() -> mk.FusedMoEActivationFormat:
return mk.FusedMoEActivationFormat.Standard
@staticmethod
def _supports_current_device() -> bool:
return current_platform.is_xpu()
@staticmethod
def _supports_no_act_and_mul() -> bool:
return False
@staticmethod
def _supports_activation(activation: MoEActivation) -> bool:
return activation in [
MoEActivation.SILU,
MoEActivation.GELU,
MoEActivation.SWIGLUOAI,
]
@staticmethod
def _supports_parallel_config(moe_parallel_config: FusedMoEParallelConfig) -> bool:
return True
@staticmethod
def _supports_quant_scheme(
weight_key: QuantKey | None,
activation_key: QuantKey | None,
) -> bool:
SUPPORTED_W_A = [
(None, None),
(kFp8StaticTensorSym, None),
(kFp8StaticTensorSym, kFp8DynamicTensorSym),
]
return (weight_key, activation_key) in SUPPORTED_W_A
def supports_chunking(self) -> bool:
return False
def supports_expert_map(self) -> bool:
return True
def finalize_weight_and_reduce_impl(self) -> mk.TopKWeightAndReduce:
return TopKWeightAndReduceNoOP()
def workspace_shapes(
self,
M: int,
N: int,
K: int,
topk: int,
global_num_experts: int,
local_num_experts: int,
expert_tokens_meta: mk.ExpertTokensMetadata | None,
activation: MoEActivation,
) -> tuple[tuple[int, ...], tuple[int, ...], tuple[int, ...]]:
workspace1 = (0,)
workspace2 = (0,)
output = (M, K)
return (workspace1, workspace2, output)
def apply(
self,
output: torch.Tensor,
hidden_states: torch.Tensor,
w1: torch.Tensor,
w2: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
activation: MoEActivation,
global_num_experts: int,
expert_map: torch.Tensor | None,
a1q_scale: torch.Tensor | None,
a2_scale: torch.Tensor | None,
workspace13: torch.Tensor,
workspace2: torch.Tensor,
expert_tokens_meta: mk.ExpertTokensMetadata | None,
apply_router_weight_on_input: bool,
):
topk = topk_ids.size(-1)
xpu_fused_moe(
hidden_states=hidden_states,
w13=w1,
w13_scales=self.w1_scale,
w13_bias=self.w1_bias,
w2=w2,
w2_scales=self.w2_scale,
w2_bias=self.w2_bias,
topk_weights=topk_weights,
topk_ids=topk_ids,
n_experts_per_token=topk,
activation=activation.value,
num_experts=self.moe_config.num_local_experts,
ep_rank=self.moe_config.ep_rank,
ep_size=self.moe_config.ep_size,
output=output,
is_fp8=self.is_fp8,
)
class XPUExpertsFp8(XPUExperts):
def __init__(
self,
moe_config: FusedMoEConfig,
quant_config: FusedMoEQuantConfig,
max_num_tokens: int | None = None,
num_dispatchers: int | None = None,
):
super().__init__(
moe_config,
quant_config,
max_num_tokens,
num_dispatchers,
)
self.is_fp8 = True
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/fused_moe/xpu_fused_moe.py",
"license": "Apache License 2.0",
"lines": 142,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:csrc/cpu/generate_cpu_attn_dispatch.py | #!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Generate CPU attention dispatch switch cases and kernel instantiations.
"""
import os
# Head dimensions divisible by 32 (support all ISAs)
HEAD_DIMS_32 = [32, 64, 96, 128, 160, 192, 224, 256]
# Head dimensions divisible by 16 but not 32 (VEC16 only)
HEAD_DIMS_16 = [80, 112]
# ISA types
ISA_TYPES = {
"AMX": 0,
"VEC": 1,
"VEC16": 2,
"NEON": 3,
"VXE": 4,
}
# ISAs supported for head_dims divisible by 32
ISA_FOR_32 = ["AMX", "NEON", "VEC", "VEC16", "VXE"]
# ISAs supported for head_dims divisible by 16 only
ISA_FOR_16 = ["VEC16"]
def encode_params(head_dim: int, isa_type: str) -> int:
"""Encode head_dim and ISA type into a single int64_t."""
isa_val = ISA_TYPES[isa_type]
# Encoding: (head_dim << 8) | isa_type
# This allows head_dim up to 2^56 - 1 and 256 ISA types
return (head_dim << 8) | isa_val
def generate_cases_for_isa_group(isa_list: list[str]) -> str:
"""Generate switch cases for a specific ISA group."""
cases = []
# Generate cases for head_dims divisible by 32
for head_dim in HEAD_DIMS_32:
for isa in isa_list:
if isa not in ISA_FOR_32:
continue
encoded = encode_params(head_dim, isa)
case_str = (
f""" case {encoded}LL: {{ """
f"""/* head_dim={head_dim}, isa={isa} */ \\"""
f"""
constexpr size_t head_dim = {head_dim}; \\"""
f"""
using attn_impl = cpu_attention::AttentionImpl<"""
f"""cpu_attention::ISA::{isa}, \\"""
f"""
"""
f"""scalar_t, head_dim>; \\"""
f"""
return __VA_ARGS__(); \\"""
f"""
}} \\"""
)
cases.append(case_str)
# Generate cases for head_dims divisible by 16 only
for head_dim in HEAD_DIMS_16:
for isa in isa_list:
encoded = encode_params(head_dim, isa)
case_str = (
f""" case {encoded}LL: {{ """
f"""/* head_dim={head_dim}, isa={isa} """
f"""(using VEC16) */ \\"""
f"""
constexpr size_t head_dim = {head_dim}; \\"""
f"""
using attn_impl = cpu_attention::AttentionImpl<"""
f"""cpu_attention::ISA::VEC16, \\"""
f"""
"""
f"""scalar_t, head_dim>; \\"""
f"""
return __VA_ARGS__(); \\"""
f"""
}} \\"""
)
cases.append(case_str)
return "\n".join(cases)
def generate_helper_function() -> str:
"""Generate helper function to encode parameters."""
return """
inline int64_t encode_cpu_attn_params(int64_t head_dim, cpu_attention::ISA isa) {
return (head_dim << 8) | static_cast<int64_t>(isa);
}
"""
def generate_header_file() -> str:
"""Generate the complete header file content."""
header = """// auto generated by generate_cpu_attn_dispatch.py
// clang-format off
#ifndef CPU_ATTN_DISPATCH_GENERATED_H
#define CPU_ATTN_DISPATCH_GENERATED_H
#include "cpu_attn_vec.hpp"
#include "cpu_attn_vec16.hpp"
#ifdef CPU_CAPABILITY_AMXBF16
#include "cpu_attn_amx.hpp"
#endif
#ifdef __aarch64__
#include "cpu_attn_neon.hpp"
#endif
#ifdef __s390x__
#include "cpu_attn_vxe.hpp"
#endif
"""
header += generate_helper_function()
# Generate dispatch macro with conditional compilation for different ISA sets
header += """
// Dispatch macro using encoded parameters
"""
# x86_64 with AMX
header += """#if defined(CPU_CAPABILITY_AMXBF16)
#define CPU_ATTN_DISPATCH(HEAD_DIM, ISA_TYPE, ...) \\
[&] { \\
int64_t encoded_params = encode_cpu_attn_params(HEAD_DIM, ISA_TYPE); \\
switch (encoded_params) { \\
"""
header += generate_cases_for_isa_group(["AMX", "VEC", "VEC16"])
header += """
default: { \\
TORCH_CHECK(false, "Unsupported CPU attention configuration: head_dim=" + \\
std::to_string(HEAD_DIM) + " isa=" + \\
std::to_string(static_cast<int>(ISA_TYPE))); \\
} \\
} \\
}()
"""
# ARM64 with NEON
header += """#elif defined(__aarch64__)
#define CPU_ATTN_DISPATCH(HEAD_DIM, ISA_TYPE, ...) \\
[&] { \\
int64_t encoded_params = encode_cpu_attn_params(HEAD_DIM, ISA_TYPE); \\
switch (encoded_params) { \\
"""
header += generate_cases_for_isa_group(["NEON", "VEC", "VEC16"])
header += """
default: { \\
TORCH_CHECK(false, "Unsupported CPU attention configuration: head_dim=" + \\
std::to_string(HEAD_DIM) + " isa=" + \\
std::to_string(static_cast<int>(ISA_TYPE))); \\
} \\
} \\
}()
"""
# s390x with VXE
header += """#elif defined(__s390x__)
#define CPU_ATTN_DISPATCH(HEAD_DIM, ISA_TYPE, ...) \\
[&] { \\
int64_t encoded_params = encode_cpu_attn_params(HEAD_DIM, ISA_TYPE); \\
switch (encoded_params) { \\
"""
header += generate_cases_for_isa_group(["VXE", "VEC", "VEC16"])
header += """
default: { \\
TORCH_CHECK(false, "Unsupported CPU attention configuration: head_dim=" + \\
std::to_string(HEAD_DIM) + " isa=" + \\
std::to_string(static_cast<int>(ISA_TYPE))); \\
} \\
} \\
}()
"""
# Fallback: VEC and VEC16 only
header += """#else
#define CPU_ATTN_DISPATCH(HEAD_DIM, ISA_TYPE, ...) \\
[&] { \\
int64_t encoded_params = encode_cpu_attn_params(HEAD_DIM, ISA_TYPE); \\
switch (encoded_params) { \\
"""
header += generate_cases_for_isa_group(["VEC", "VEC16"])
header += """
default: { \\
TORCH_CHECK(false, "Unsupported CPU attention configuration: head_dim=" + \\
std::to_string(HEAD_DIM) + " isa=" + \\
std::to_string(static_cast<int>(ISA_TYPE))); \\
} \\
} \\
}()
#endif /* CPU_CAPABILITY_AMXBF16 / __aarch64__ / __s390x__ */
#endif // CPU_ATTN_DISPATCH_GENERATED_H
"""
return header
def main():
output_path = os.path.join(
os.path.dirname(__file__), "cpu_attn_dispatch_generated.h"
)
with open(output_path, "w") as f:
f.write(generate_header_file())
if __name__ == "__main__":
main()
| {
"repo_id": "vllm-project/vllm",
"file_path": "csrc/cpu/generate_cpu_attn_dispatch.py",
"license": "Apache License 2.0",
"lines": 187,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/parser/abstract_parser.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import json
from abc import abstractmethod
from collections.abc import Sequence
from functools import cached_property
from openai.types.responses import (
ResponseFunctionToolCall,
ResponseOutputItem,
ResponseOutputMessage,
ResponseOutputText,
ResponseReasoningItem,
ToolChoiceFunction,
)
from openai.types.responses.response_output_text import Logprob
from openai.types.responses.response_reasoning_item import (
Content as ResponseReasoningTextContent,
)
from pydantic import TypeAdapter
from vllm.entrypoints.chat_utils import make_tool_call_id
from vllm.entrypoints.openai.chat_completion.protocol import (
ChatCompletionNamedToolChoiceParam,
ChatCompletionRequest,
)
from vllm.entrypoints.openai.engine.protocol import (
DeltaMessage,
ExtractedToolCallInformation,
FunctionCall,
FunctionDefinition,
)
from vllm.entrypoints.openai.responses.protocol import (
ResponsesRequest,
)
from vllm.logger import init_logger
from vllm.reasoning.abs_reasoning_parsers import ReasoningParser
from vllm.tokenizers import TokenizerLike
from vllm.tool_parsers.abstract_tool_parser import ToolParser
from vllm.utils import random_uuid
logger = init_logger(__name__)
class Parser:
"""
Abstract Parser class that unifies ReasoningParser and ToolParser into
a single interface for parsing model output.
This class provides a unified way to handle both reasoning extraction
(e.g., chain-of-thought content in <think> tags) and tool call extraction
(e.g., function calls in XML/JSON format) from model outputs.
Subclasses can either:
1. Override the abstract methods directly for custom parsing logic
2. Set `reasoning_parser` and `tool_parser` properties to delegate to
existing parser implementations
Class Attributes:
reasoning_parser_cls: The ReasoningParser class to use (for compatibility
with code that needs the class, not instance).
tool_parser_cls: The ToolParser class to use (for compatibility with
code that needs the class, not instance).
"""
# Class-level parser classes for compatibility with existing patterns
# Subclasses should override these if they use specific parser classes
reasoning_parser_cls: type[ReasoningParser] | None = None
tool_parser_cls: type[ToolParser] | None = None
def __init__(self, tokenizer: TokenizerLike, *args, **kwargs):
"""
Initialize the Parser.
Args:
tokenizer: The tokenizer used by the model. This is required for
token-based parsing operations.
"""
self.model_tokenizer = tokenizer
self._reasoning_parser: ReasoningParser | None = None
self._tool_parser: ToolParser | None = None
@cached_property
def vocab(self) -> dict[str, int]:
"""Get the vocabulary mapping from tokens to IDs."""
return self.model_tokenizer.get_vocab()
@property
def reasoning_parser(self) -> ReasoningParser | None:
"""The underlying reasoning parser, if any."""
return self._reasoning_parser
@reasoning_parser.setter
def reasoning_parser(self, parser: ReasoningParser | None) -> None:
self._reasoning_parser = parser
@property
def tool_parser(self) -> ToolParser | None:
"""The underlying tool parser, if any."""
return self._tool_parser
@tool_parser.setter
def tool_parser(self, parser: ToolParser | None) -> None:
self._tool_parser = parser
# ========== Reasoning Parser Methods ==========
@abstractmethod
def is_reasoning_end(self, input_ids: list[int]) -> bool:
"""
Check if the reasoning content ends in the input_ids.
Used by structured engines like `xgrammar` to check if the
reasoning content ends in the model output.
Args:
input_ids: The token IDs of the model output.
Returns:
True if the reasoning content ends in the input_ids.
"""
def is_reasoning_end_streaming(
self, input_ids: list[int], delta_ids: list[int]
) -> bool:
"""
Check if the reasoning content ends during a decode step.
Args:
input_ids: The entire model output token IDs.
delta_ids: The last few computed tokens at the current decode step.
Returns:
True if the reasoning content ends in the delta_ids.
"""
return self.is_reasoning_end(input_ids)
@abstractmethod
def extract_content_ids(self, input_ids: list[int]) -> list[int]:
"""
Extract content token IDs from the input_ids.
This extracts the non-reasoning content (e.g., everything after
the </think> tag).
Args:
input_ids: The token IDs of the model output.
Returns:
The extracted content token IDs.
"""
@abstractmethod
def extract_response_outputs(
self,
model_output: str,
request: ResponsesRequest,
enable_auto_tools: bool = False,
tool_call_id_type: str = "random",
logprobs: list[Logprob] | None = None,
) -> list[ResponseOutputItem]:
"""
Extract reasoning, content, and tool calls from a complete
model-generated string and return as ResponseOutputItem objects.
Used for non-streaming responses where we have the entire model
response available before sending to the client.
Args:
model_output: The complete model-generated string.
request: The request object used to generate the output.
enable_auto_tools: Whether to enable automatic tool call parsing.
tool_call_id_type: Type of tool call ID generation ("random", etc).
logprobs: Pre-computed logprobs for the output text, if any.
Returns:
A list of ResponseOutputItem objects.
"""
@abstractmethod
def extract_reasoning(
self,
model_output: str,
request: ChatCompletionRequest | ResponsesRequest,
) -> tuple[str | None, str | None]:
"""
Extract reasoning content from a complete model-generated string.
Used for non-streaming responses where we have the entire model
response available before sending to the client.
Args:
model_output: The complete model-generated string.
request: The request object used to generate the output.
Returns:
A tuple of (reasoning_content, response_content).
"""
@abstractmethod
def extract_reasoning_streaming(
self,
previous_text: str,
current_text: str,
delta_text: str,
previous_token_ids: Sequence[int],
current_token_ids: Sequence[int],
delta_token_ids: Sequence[int],
) -> DeltaMessage | None:
"""
Extract reasoning content from a streaming delta message.
Args:
previous_text: Text from all previous tokens.
current_text: Text including the current delta.
delta_text: The new text in this delta.
previous_token_ids: Token IDs from previous generation.
current_token_ids: All token IDs including current.
delta_token_ids: The new token IDs in this delta.
Returns:
A DeltaMessage with reasoning and/or content fields, or None.
"""
# ========== Tool Parser Methods ==========
def adjust_request(self, request: ChatCompletionRequest) -> ChatCompletionRequest:
"""
Adjust the request parameters for tool calling.
Can be overridden by subclasses to modify request parameters
(e.g., setting structured output schemas for tool calling).
Args:
request: The original request.
Returns:
The adjusted request.
"""
return request
@abstractmethod
def extract_tool_calls(
self,
model_output: str,
request: ChatCompletionRequest,
) -> ExtractedToolCallInformation:
"""
Extract tool calls from a complete model-generated string.
Used for non-streaming responses.
Args:
model_output: The complete model-generated string.
request: The request object used to generate the output.
Returns:
ExtractedToolCallInformation containing the tool calls.
"""
@abstractmethod
def extract_tool_calls_streaming(
self,
previous_text: str,
current_text: str,
delta_text: str,
previous_token_ids: Sequence[int],
current_token_ids: Sequence[int],
delta_token_ids: Sequence[int],
request: ChatCompletionRequest,
) -> DeltaMessage | None:
"""
Extract tool calls from a streaming delta message.
Args:
previous_text: Text from all previous tokens.
current_text: Text including the current delta.
delta_text: The new text in this delta.
previous_token_ids: Token IDs from previous generation.
current_token_ids: All token IDs including current.
delta_token_ids: The new token IDs in this delta.
request: The request object.
Returns:
A DeltaMessage with tool_calls field, or None.
"""
class DelegatingParser(Parser):
"""
A Parser implementation that delegates to separate ReasoningParser and
ToolParser instances.
This is the recommended base class for creating model-specific parsers
that combine existing reasoning and tool parser implementations.
Subclasses should set `self._reasoning_parser` and `self._tool_parser`
in their `__init__` method.
If either parser is None, the corresponding methods will return default
values (no reasoning extraction, no tool calls).
"""
def extract_reasoning(
self,
model_output: str,
request: ChatCompletionRequest | ResponsesRequest,
) -> tuple[str | None, str | None]:
if self._reasoning_parser is None:
return None, model_output
return self._reasoning_parser.extract_reasoning(model_output, request)
def extract_response_outputs(
self,
model_output: str,
request: ResponsesRequest,
enable_auto_tools: bool = False,
tool_call_id_type: str = "random",
logprobs: list[Logprob] | None = None,
) -> list[ResponseOutputItem]:
# First extract reasoning
reasoning, content = self.extract_reasoning(model_output, request)
# Then parse tool calls from the content
tool_calls, content = self._parse_tool_calls(
request=request,
content=content,
enable_auto_tools=enable_auto_tools,
)
# Build output items
outputs: list[ResponseOutputItem] = []
# Add reasoning item if present
if reasoning:
reasoning_item = ResponseReasoningItem(
id=f"rs_{random_uuid()}",
summary=[],
type="reasoning",
content=[
ResponseReasoningTextContent(text=reasoning, type="reasoning_text")
],
status=None, # NOTE: Only the last output item has status.
)
outputs.append(reasoning_item)
# Add message item if there's content
if content:
res_text_part = ResponseOutputText(
text=content,
annotations=[],
type="output_text",
logprobs=logprobs,
)
message_item = ResponseOutputMessage(
id=f"msg_{random_uuid()}",
content=[res_text_part],
role="assistant",
status="completed",
type="message",
)
outputs.append(message_item)
if tool_calls:
# We use a simple counter for history_tool_call_count because
# we don't track the history of tool calls in the Responses API yet.
# This means that the tool call index will start from 0 for each
# request.
for history_tool_call_cnt, tool_call in enumerate(tool_calls):
tool_call_item = ResponseFunctionToolCall(
id=f"fc_{random_uuid()}",
call_id=tool_call.id
if tool_call.id
else make_tool_call_id(
id_type=tool_call_id_type,
func_name=tool_call.name,
idx=history_tool_call_cnt,
),
type="function_call",
status="completed",
name=tool_call.name,
arguments=tool_call.arguments,
)
outputs.append(tool_call_item)
return outputs
def _parse_tool_calls(
self,
request: ResponsesRequest,
content: str | None,
enable_auto_tools: bool,
) -> tuple[list[FunctionCall], str | None]:
"""
TODO(qandrew): merge _parse_tool_calls_from_content
for ChatCompletions into this function
Parse tool calls from content based on request tool_choice settings.
Returns:
A tuple of (function_calls, remaining_content) if tool calls
were parsed
"""
function_calls: list[FunctionCall] = []
if request.tool_choice and isinstance(request.tool_choice, ToolChoiceFunction):
# Forced Function Call (Responses API style)
assert content is not None
function_calls.append(
FunctionCall(name=request.tool_choice.name, arguments=content)
)
return function_calls, None # Clear content since tool is called.
if request.tool_choice and isinstance(
request.tool_choice, ChatCompletionNamedToolChoiceParam
):
# Forced Function Call (Chat Completion API style)
assert content is not None
function_calls.append(
FunctionCall(name=request.tool_choice.function.name, arguments=content)
)
return function_calls, None # Clear content since tool is called.
if request.tool_choice == "required":
# Required tool calls - parse JSON
assert content is not None
tool_calls = TypeAdapter(list[FunctionDefinition]).validate_json(content)
function_calls.extend(
FunctionCall(
name=tool_call.name,
arguments=json.dumps(tool_call.parameters, ensure_ascii=False),
)
for tool_call in tool_calls
)
return function_calls, None # Clear content since tool is called.
if (
self._tool_parser is not None
and enable_auto_tools
and (request.tool_choice == "auto" or request.tool_choice is None)
):
# Automatic Tool Call Parsing
tool_call_info = self._tool_parser.extract_tool_calls(
content if content is not None else "",
request=request, # type: ignore
)
if tool_call_info is not None and tool_call_info.tools_called:
function_calls.extend(
FunctionCall(
id=tool_call.id,
name=tool_call.function.name,
arguments=tool_call.function.arguments,
)
for tool_call in tool_call_info.tool_calls
)
remaining_content = tool_call_info.content
if remaining_content and remaining_content.strip() == "":
remaining_content = None
return function_calls, remaining_content
# No tool calls
return [], content
def extract_reasoning_streaming(
self,
previous_text: str,
current_text: str,
delta_text: str,
previous_token_ids: Sequence[int],
current_token_ids: Sequence[int],
delta_token_ids: Sequence[int],
) -> DeltaMessage | None:
if self._reasoning_parser is None:
return DeltaMessage(content=delta_text)
return self._reasoning_parser.extract_reasoning_streaming(
previous_text,
current_text,
delta_text,
previous_token_ids,
current_token_ids,
delta_token_ids,
)
def extract_tool_calls(
self,
model_output: str,
request: ChatCompletionRequest,
) -> ExtractedToolCallInformation:
if self._tool_parser is None:
return ExtractedToolCallInformation(
tools_called=False, tool_calls=[], content=model_output
)
return self._tool_parser.extract_tool_calls(model_output, request)
def extract_tool_calls_streaming(
self,
previous_text: str,
current_text: str,
delta_text: str,
previous_token_ids: Sequence[int],
current_token_ids: Sequence[int],
delta_token_ids: Sequence[int],
request: ChatCompletionRequest,
) -> DeltaMessage | None:
if self._tool_parser is None:
return None
return self._tool_parser.extract_tool_calls_streaming(
previous_text,
current_text,
delta_text,
previous_token_ids,
current_token_ids,
delta_token_ids,
request,
)
class _WrappedParser(DelegatingParser):
"""
A DelegatingParser subclass that instantiates parsers from class attributes.
This class is used to dynamically create a parser that wraps individual
ReasoningParser and ToolParser classes. The class attributes
`reasoning_parser_cls` and `tool_parser_cls` should be set before
instantiation.
Usage:
_WrappedParser.reasoning_parser_cls = MyReasoningParser
_WrappedParser.tool_parser_cls = MyToolParser
parser = _WrappedParser(tokenizer)
"""
reasoning_parser_cls: type[ReasoningParser] | None = None
tool_parser_cls: type[ToolParser] | None = None
def __init__(self, tokenizer: TokenizerLike):
super().__init__(tokenizer)
# Instantiate the underlying parsers from class attributes
if self.__class__.reasoning_parser_cls is not None:
self._reasoning_parser = self.__class__.reasoning_parser_cls(tokenizer)
if self.__class__.tool_parser_cls is not None:
self._tool_parser = self.__class__.tool_parser_cls(tokenizer)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/parser/abstract_parser.py",
"license": "Apache License 2.0",
"lines": 461,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/parser/minimax_m2_parser.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
MiniMax M2 Parser - A unified parser for MiniMax M2 models.
This parser combines the existing MiniMaxM2ReasoningParser and
MinimaxM2ToolParser into a single unified interface by delegating
to those implementations.
"""
from vllm.logger import init_logger
from vllm.parser.abstract_parser import DelegatingParser
from vllm.reasoning.minimax_m2_reasoning_parser import MiniMaxM2ReasoningParser
from vllm.tokenizers import TokenizerLike
from vllm.tool_parsers.minimax_m2_tool_parser import MinimaxM2ToolParser
logger = init_logger(__name__)
class MiniMaxM2Parser(DelegatingParser):
"""
Unified parser for MiniMax M2 models that handles both reasoning
extraction and tool call parsing.
This parser delegates to the existing implementations:
- MiniMaxM2ReasoningParser for reasoning extraction
- MinimaxM2ToolParser for tool call parsing
MiniMax M2 models have two special behaviors:
1. Reasoning: They don't generate <think> start token, only </think> end
token. All content before </think> is reasoning, content after is the
actual response.
2. Tool Calls: They use <minimax:tool_call>...</minimax:tool_call> tags
with <invoke name="...">...</invoke> and <parameter name="...">...</parameter>
syntax.
"""
# Class-level parser classes for compatibility
reasoning_parser_cls = MiniMaxM2ReasoningParser
tool_parser_cls = MinimaxM2ToolParser
def __init__(self, tokenizer: TokenizerLike):
super().__init__(tokenizer)
# Initialize the underlying parsers
self._reasoning_parser = MiniMaxM2ReasoningParser(tokenizer)
self._tool_parser = MinimaxM2ToolParser(tokenizer)
logger.debug(
"vLLM Successfully initialized parser %s!", self.__class__.__name__
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/parser/minimax_m2_parser.py",
"license": "Apache License 2.0",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/parser/parser_manager.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from __future__ import annotations
import importlib
import os
from collections.abc import Callable
from typing import TYPE_CHECKING
from vllm.logger import init_logger
from vllm.utils.collection_utils import is_list_of
from vllm.utils.import_utils import import_from_path
if TYPE_CHECKING:
from vllm.parser.abstract_parser import Parser
from vllm.reasoning import ReasoningParser
from vllm.tool_parsers import ToolParser
logger = init_logger(__name__)
class ParserManager:
"""
Central registry for Parser implementations.
Supports two registration modes:
- Eager registration via `register_module`
- Lazy registration via `register_lazy_module`
"""
parsers: dict[str, type[Parser]] = {}
lazy_parsers: dict[str, tuple[str, str]] = {} # name -> (module_path, class_name)
@classmethod
def get_parser_internal(cls, name: str) -> type[Parser]:
"""
Retrieve a registered or lazily registered Parser class.
Args:
name: The registered name of the parser.
Returns:
The Parser class.
Raises:
KeyError: If no parser is found under the given name.
"""
if name in cls.parsers:
return cls.parsers[name]
if name in cls.lazy_parsers:
return cls._load_lazy_parser(name)
registered = ", ".join(cls.list_registered())
raise KeyError(f"Parser '{name}' not found. Available parsers: {registered}")
@classmethod
def _load_lazy_parser(cls, name: str) -> type[Parser]:
"""Import and register a lazily loaded parser."""
from vllm.parser.abstract_parser import Parser
module_path, class_name = cls.lazy_parsers[name]
try:
mod = importlib.import_module(module_path)
parser_cls = getattr(mod, class_name)
if not issubclass(parser_cls, Parser):
raise TypeError(
f"{class_name} in {module_path} is not a Parser subclass."
)
cls.parsers[name] = parser_cls # cache
return parser_cls
except Exception as e:
logger.exception(
"Failed to import lazy parser '%s' from %s: %s",
name,
module_path,
e,
)
raise
@classmethod
def _register_module(
cls,
module: type[Parser],
module_name: str | list[str] | None = None,
force: bool = True,
) -> None:
"""Register a Parser class immediately."""
from vllm.parser.abstract_parser import Parser
if not issubclass(module, Parser):
raise TypeError(
f"module must be subclass of Parser, but got {type(module)}"
)
if module_name is None:
module_names = [module.__name__]
elif isinstance(module_name, str):
module_names = [module_name]
elif is_list_of(module_name, str):
module_names = module_name
else:
raise TypeError("module_name must be str, list[str], or None.")
for name in module_names:
if not force and name in cls.parsers:
existed = cls.parsers[name]
raise KeyError(f"{name} is already registered at {existed.__module__}")
cls.parsers[name] = module
@classmethod
def register_lazy_module(cls, name: str, module_path: str, class_name: str) -> None:
"""
Register a lazy module mapping for delayed import.
Example:
ParserManager.register_lazy_module(
name="minimax_m2",
module_path="vllm.parser.minimax_m2_parser",
class_name="MiniMaxM2Parser",
)
"""
cls.lazy_parsers[name] = (module_path, class_name)
@classmethod
def register_module(
cls,
name: str | list[str] | None = None,
force: bool = True,
module: type[Parser] | None = None,
) -> type[Parser] | Callable[[type[Parser]], type[Parser]]:
"""
Register a Parser class.
Can be used as a decorator or called directly.
Usage:
@ParserManager.register_module("my_parser")
class MyParser(Parser):
...
Or:
ParserManager.register_module(module=MyParser)
"""
if not isinstance(force, bool):
raise TypeError(f"force must be a boolean, but got {type(force)}")
# Immediate registration
if module is not None:
cls._register_module(module=module, module_name=name, force=force)
return module
# Decorator usage
def _decorator(obj: type[Parser]) -> type[Parser]:
module_path = obj.__module__
class_name = obj.__name__
if isinstance(name, str):
names = [name]
elif is_list_of(name, str):
names = name
else:
names = [class_name]
for n in names:
cls.lazy_parsers[n] = (module_path, class_name)
return obj
return _decorator
@classmethod
def list_registered(cls) -> list[str]:
"""Return names of all registered parsers."""
return sorted(set(cls.parsers.keys()) | set(cls.lazy_parsers.keys()))
@classmethod
def import_parser(cls, plugin_path: str) -> None:
"""Import a user-defined parser from an arbitrary path."""
module_name = os.path.splitext(os.path.basename(plugin_path))[0]
try:
import_from_path(module_name, plugin_path)
except Exception:
logger.exception(
"Failed to load module '%s' from %s.", module_name, plugin_path
)
@classmethod
def get_tool_parser(
cls,
tool_parser_name: str | None = None,
enable_auto_tools: bool = False,
model_name: str | None = None,
) -> type[ToolParser] | None:
"""Get the tool parser based on the name."""
from vllm.tool_parsers import ToolParserManager
parser: type[ToolParser] | None = None
if not enable_auto_tools or tool_parser_name is None:
return parser
logger.info('"auto" tool choice has been enabled.')
try:
if (
tool_parser_name == "pythonic"
and model_name
and model_name.startswith("meta-llama/Llama-3.2")
):
logger.warning(
"Llama3.2 models may struggle to emit valid pythonic tool calls"
)
parser = ToolParserManager.get_tool_parser(tool_parser_name)
except Exception as e:
raise TypeError(
"Error: --enable-auto-tool-choice requires "
f"tool_parser:'{tool_parser_name}' which has not "
"been registered"
) from e
return parser
@classmethod
def get_reasoning_parser(
cls,
reasoning_parser_name: str | None,
) -> type[ReasoningParser] | None:
"""Get the reasoning parser based on the name."""
from vllm.reasoning import ReasoningParserManager
parser: type[ReasoningParser] | None = None
if not reasoning_parser_name:
return None
try:
parser = ReasoningParserManager.get_reasoning_parser(reasoning_parser_name)
assert parser is not None
except Exception as e:
raise TypeError(f"{reasoning_parser_name=} has not been registered") from e
return parser
@classmethod
def get_parser(
cls,
tool_parser_name: str | None = None,
reasoning_parser_name: str | None = None,
enable_auto_tools: bool = False,
model_name: str | None = None,
) -> type[Parser] | None:
"""
Get a unified Parser that handles both reasoning and tool parsing.
This method checks if a unified Parser exists that can handle both
reasoning extraction and tool call parsing. If no unified parser
exists, it creates a DelegatingParser that wraps the individual
reasoning and tool parsers.
Args:
tool_parser_name: The name of the tool parser.
reasoning_parser_name: The name of the reasoning parser.
enable_auto_tools: Whether auto tool choice is enabled.
model_name: The model name for parser-specific warnings.
Returns:
A Parser class, or None if neither parser is specified.
"""
from vllm.parser.abstract_parser import _WrappedParser
if not tool_parser_name and not reasoning_parser_name:
return None
# Strategy 1: If both names match, check for a unified parser with that name
if tool_parser_name and tool_parser_name == reasoning_parser_name:
try:
parser = cls.get_parser_internal(tool_parser_name)
logger.info(
"Using unified parser '%s' for both reasoning and tool parsing.",
tool_parser_name,
)
return parser
except KeyError:
pass # No unified parser with this name
# Strategy 2: Check for parser with either name
for name in [tool_parser_name, reasoning_parser_name]:
if name:
try:
parser = cls.get_parser_internal(name)
logger.info(
"Using unified parser '%s' for reasoning and tool parsing.",
name,
)
return parser
except KeyError:
pass
# Strategy 3: Create a DelegatingParser with the individual parser classes
reasoning_parser_cls = cls.get_reasoning_parser(reasoning_parser_name)
tool_parser_cls = cls.get_tool_parser(
tool_parser_name, enable_auto_tools, model_name
)
if reasoning_parser_cls is None and tool_parser_cls is None:
return None
# Set the class-level attributes on the imported _WrappedParser
_WrappedParser.reasoning_parser_cls = reasoning_parser_cls
_WrappedParser.tool_parser_cls = tool_parser_cls
return _WrappedParser
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/parser/parser_manager.py",
"license": "Apache License 2.0",
"lines": 257,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:examples/online_serving/disaggregated_serving/mooncake_connector/mooncake_connector_proxy.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import argparse
import asyncio
import ipaddress
import itertools
import os
import urllib
import uuid
from contextlib import asynccontextmanager
from typing import Any
import httpx
from fastapi import FastAPI, HTTPException, Request
from fastapi.responses import StreamingResponse
def maybe_wrap_ipv6_address(address: str) -> str:
try:
ipaddress.IPv6Address(address)
return f"[{address}]"
except ValueError:
return address
def make_http_path(host: str, port: int) -> str:
return f"http://{host}:{port}"
def prefiller_cycle(prefill_clients: list[Any]):
while True:
for prefill_client in prefill_clients:
for i in range(prefill_client["dp_size"]):
yield prefill_client, i
async def get_prefiller_info(prefill_clients: list, ready: asyncio.Event):
for prefill_client in prefill_clients:
while True:
try:
# Wait for prefill service to be ready
response = await prefill_client["client"].get("/health")
response.raise_for_status()
except Exception:
await asyncio.sleep(1)
continue
response = await prefill_client["client"].get(
prefill_client["bootstrap_addr"] + "/query"
)
response.raise_for_status()
data = response.json()
break
for dp_rank, dp_entry in data.items():
prefill_client["dp_engine_id"][int(dp_rank)] = dp_entry["engine_id"]
dp_size = len(data)
prefill_client["dp_size"] = dp_size
print(f"Inited prefiller {prefill_client['url']} with dp_size={dp_size}")
ready.set()
print("All prefiller instances are ready.")
@asynccontextmanager
async def lifespan(app: FastAPI):
"""
Lifespan context manager to handle startup and shutdown events.
"""
# Startup: Initialize client pools for prefiller and decoder services
app.state.prefill_clients = []
app.state.decode_clients = []
app.state.ready = asyncio.Event()
# Create prefill clients
for i, (url, bootstrap_port) in enumerate(global_args.prefill):
parsed_url = urllib.parse.urlparse(url)
hostname = maybe_wrap_ipv6_address(parsed_url.hostname)
app.state.prefill_clients.append(
{
"client": httpx.AsyncClient(
timeout=None,
base_url=url,
limits=httpx.Limits(
max_connections=None,
max_keepalive_connections=None,
),
),
"url": url,
"bootstrap_addr": make_http_path(hostname, bootstrap_port or 8998),
"dp_engine_id": {},
}
)
# Create decode clients
for i, url in enumerate(global_args.decode):
parsed_url = urllib.parse.urlparse(url)
hostname = maybe_wrap_ipv6_address(parsed_url.hostname)
app.state.decode_clients.append(
{
"client": httpx.AsyncClient(
timeout=None,
base_url=url,
limits=httpx.Limits(
max_connections=None,
max_keepalive_connections=None,
),
),
}
)
asyncio.create_task(get_prefiller_info(app.state.prefill_clients, app.state.ready))
# Initialize round-robin iterators
app.state.prefill_iterator = prefiller_cycle(app.state.prefill_clients)
app.state.decode_iterator = itertools.cycle(range(len(app.state.decode_clients)))
print(
f"Got {len(app.state.prefill_clients)} prefill clients "
f"and {len(app.state.decode_clients)} decode clients."
)
yield
# Shutdown: Close all clients
for client_info in app.state.prefill_clients:
await client_info["client"].aclose()
for client_info in app.state.decode_clients:
await client_info["client"].aclose()
# Update FastAPI app initialization to use lifespan
app = FastAPI(lifespan=lifespan)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--port", type=int, default=8000)
# Always use 127.0.0.1 as localhost binds to IPv6 which is blocked on CI
parser.add_argument("--host", type=str, default="127.0.0.1")
# For prefiller instances
parser.add_argument(
"--prefill",
nargs="+",
action="append",
dest="prefill_raw",
metavar=("URL", "bootstrap_port"),
help=(
"Prefill server URL and optional bootstrap port. "
"Can be specified multiple times. "
"Format: --prefill URL [BOOTSTRAP_PORT]. "
"BOOTSTRAP_PORT can be a port number, "
"'none', or omitted (defaults to none)."
),
)
# For decoder instances
parser.add_argument(
"--decode",
nargs=1,
action="append",
dest="decode_raw",
metavar=("URL",),
help="Decode server URL. Can be specified multiple times.",
)
args = parser.parse_args()
args.prefill = _parse_prefill_urls(args.prefill_raw)
args.decode = _parse_decode_urls(args.decode_raw)
return args
# From sglang router_args.py
def _parse_prefill_urls(prefill_list):
"""Parse prefill URLs from --prefill arguments.
Format: --prefill URL [BOOTSTRAP_PORT]
Example:
--prefill http://prefill1:8080 9000 # With bootstrap port
--prefill http://prefill2:8080 none # Explicitly no bootstrap port
--prefill http://prefill3:8080 # Defaults to no bootstrap port
"""
if not prefill_list:
return []
prefill_urls = []
for prefill_args in prefill_list:
url = prefill_args[0]
# Handle optional bootstrap port
if len(prefill_args) >= 2:
bootstrap_port_str = prefill_args[1]
# Handle 'none' as None
if bootstrap_port_str.lower() == "none":
bootstrap_port = None
else:
try:
bootstrap_port = int(bootstrap_port_str)
except ValueError as e:
raise ValueError(
f"Invalid bootstrap port: {bootstrap_port_str}. Must be a number or 'none'" # noqa: E501
) from e
else:
# No bootstrap port specified, default to None
bootstrap_port = None
prefill_urls.append((url, bootstrap_port))
return prefill_urls
def _parse_decode_urls(decode_list):
"""Parse decode URLs from --decode arguments.
Format: --decode URL
Example: --decode http://decode1:8081 --decode http://decode2:8081
"""
if not decode_list:
return []
# decode_list is a list of single-element lists due to nargs=1
return [url[0] for url in decode_list]
def get_next_client(app, service_type: str):
"""
Get the next client in round-robin fashion.
Args:
app: The FastAPI app instance
service_type: Either 'prefill' or 'decode'
Returns:
The next client to use
"""
if service_type == "prefill":
return next(app.state.prefill_iterator)
elif service_type == "decode":
client_idx = next(app.state.decode_iterator)
return app.state.decode_clients[client_idx]
else:
raise ValueError(f"Unknown service type: {service_type}")
async def send_request_to_service(
client_info: dict, dp_rank: int, endpoint: str, req_data: dict, request_id: str
):
"""
Send a request to a service using a client from the pool.
"""
req_data = req_data.copy()
req_data["kv_transfer_params"] = {
"do_remote_decode": True,
"do_remote_prefill": False,
"transfer_id": f"xfer-{request_id}",
}
req_data["stream"] = False
req_data["max_tokens"] = 1
if "max_completion_tokens" in req_data:
req_data["max_completion_tokens"] = 1
if "stream_options" in req_data:
del req_data["stream_options"]
headers = {
"Authorization": f"Bearer {os.environ.get('OPENAI_API_KEY')}",
"X-Request-Id": request_id,
"X-data-parallel-rank": str(dp_rank),
}
response = await client_info["client"].post(
endpoint, json=req_data, headers=headers
)
response.raise_for_status()
# CRITICAL: Release connection back to pool
await response.aclose()
async def stream_service_response(
prefill_client_info: dict,
prefill_dp_rank: int,
decode_client_info: dict,
endpoint: str,
req_data: dict,
request_id: str,
):
"""
Asynchronously stream response from a service using a client from the pool.
"""
headers = {
"Authorization": f"Bearer {os.environ.get('OPENAI_API_KEY')}",
"X-Request-Id": request_id,
}
req_data["kv_transfer_params"] = {
"do_remote_decode": False,
"do_remote_prefill": True,
"remote_bootstrap_addr": prefill_client_info["bootstrap_addr"],
"remote_engine_id": prefill_client_info["dp_engine_id"][prefill_dp_rank],
"transfer_id": f"xfer-{request_id}",
}
async with decode_client_info["client"].stream(
"POST", endpoint, json=req_data, headers=headers
) as response:
response.raise_for_status()
async for chunk in response.aiter_bytes():
yield chunk
async def _handle_completions(api: str, request: Request):
if not app.state.ready.is_set():
raise HTTPException(status_code=503, detail="Service Unavailable")
try:
req_data = await request.json()
request_id = str(uuid.uuid4())
# Get the next prefill client in round-robin fashion
prefill_client_info, prefill_dp_rank = get_next_client(request.app, "prefill")
# Send request to prefill service
asyncio.create_task(
send_request_to_service(
prefill_client_info, prefill_dp_rank, api, req_data, request_id
)
)
decode_client_info = get_next_client(request.app, "decode")
# Stream response from decode service
async def generate_stream():
async for chunk in stream_service_response(
prefill_client_info,
prefill_dp_rank,
decode_client_info,
api,
req_data,
request_id=request_id,
):
yield chunk
return StreamingResponse(generate_stream(), media_type="application/json")
except Exception as e:
import sys
import traceback
exc_info = sys.exc_info()
print(f"Error occurred in disagg prefill proxy server - {api} endpoint")
print(e)
print("".join(traceback.format_exception(*exc_info)))
raise
@app.post("/v1/completions")
async def handle_completions(request: Request):
return await _handle_completions("/v1/completions", request)
@app.post("/v1/chat/completions")
async def handle_chat_completions(request: Request):
return await _handle_completions("/v1/chat/completions", request)
if __name__ == "__main__":
global global_args
global_args = parse_args()
import uvicorn
uvicorn.run(app, host=global_args.host, port=global_args.port)
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/online_serving/disaggregated_serving/mooncake_connector/mooncake_connector_proxy.py",
"license": "Apache License 2.0",
"lines": 303,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/distributed/kv_transfer/kv_connector/v1/mooncake/mooncake_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import threading
import time
from dataclasses import dataclass
import uvicorn
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from vllm.config import VllmConfig
from vllm.distributed.kv_transfer.kv_connector.utils import EngineId
from vllm.logger import init_logger
WorkerAddr = str
logger = init_logger(__name__)
class RegisterWorkerPayload(BaseModel):
engine_id: EngineId
dp_rank: int
tp_rank: int
pp_rank: int
addr: WorkerAddr
@dataclass
class EngineEntry:
engine_id: EngineId
# {tp_rank: {pp_rank: worker_addr}}
worker_addr: dict[int, dict[int, WorkerAddr]]
class MooncakeBootstrapServer:
"""
A centralized server running on the global rank 0 prefiller worker.
Prefiller workers register their connection info (IP, port, ranks) here.
"""
def __init__(self, vllm_config: VllmConfig, host: str, port: int):
self.workers: dict[int, EngineEntry] = {}
self.host = host
self.port = port
self.app = FastAPI()
self._register_routes()
self.server_thread: threading.Thread | None = None
self.server: uvicorn.Server | None = None
def __del__(self):
self.shutdown()
def _register_routes(self):
# All methods are async. No need to use lock to protect data.
self.app.post("/register")(self.register_worker)
self.app.get("/query", response_model=dict[int, EngineEntry])(self.query)
def start(self):
if self.server_thread:
return
config = uvicorn.Config(app=self.app, host=self.host, port=self.port)
self.server = uvicorn.Server(config=config)
self.server_thread = threading.Thread(
target=self.server.run, name="mooncake_bootstrap_server", daemon=True
)
self.server_thread.start()
while not self.server.started:
time.sleep(0.1) # Wait for the server to start
logger.info("Mooncake Bootstrap Server started at %s:%d", self.host, self.port)
def shutdown(self):
if self.server_thread is None or self.server is None or not self.server.started:
return
self.server.should_exit = True
self.server_thread.join()
logger.info("Mooncake Bootstrap Server stopped.")
async def register_worker(self, payload: RegisterWorkerPayload):
"""Handles registration of a prefiller worker."""
if payload.dp_rank not in self.workers:
self.workers[payload.dp_rank] = EngineEntry(
engine_id=payload.engine_id,
worker_addr={},
)
dp_entry = self.workers[payload.dp_rank]
if dp_entry.engine_id != payload.engine_id:
raise HTTPException(
status_code=400,
detail=(
f"Engine ID mismatch for dp_rank={payload.dp_rank}: "
f"expected {dp_entry.engine_id}, got {payload.engine_id}"
),
)
if payload.tp_rank not in dp_entry.worker_addr:
dp_entry.worker_addr[payload.tp_rank] = {}
tp_entry = dp_entry.worker_addr[payload.tp_rank]
if payload.pp_rank in tp_entry:
raise HTTPException(
status_code=400,
detail=(
f"Worker with dp_rank={payload.dp_rank}, "
f"tp_rank={payload.tp_rank}, pp_rank={payload.pp_rank} "
f"is already registered at "
f"{tp_entry[payload.pp_rank]}, "
f"but still want to register at {payload.addr}"
),
)
tp_entry[payload.pp_rank] = payload.addr
logger.debug(
"Registered worker: engine_id=%s, dp_rank=%d, tp_rank=%d, pp_rank=%d at %s",
payload.engine_id,
payload.dp_rank,
payload.tp_rank,
payload.pp_rank,
payload.addr,
)
return {"status": "ok"}
async def query(self) -> dict[int, EngineEntry]:
return self.workers
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/distributed/kv_transfer/kv_connector/v1/mooncake/mooncake_utils.py",
"license": "Apache License 2.0",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/rotary_embedding/fope.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import torch
import torch.nn.functional as F
from torch import nn
from vllm.distributed import (
get_tensor_model_parallel_rank,
get_tensor_model_parallel_world_size,
)
from .base import RotaryEmbedding
from .common import rotate_neox
class FourierRotaryEmbedding(RotaryEmbedding):
def __init__(
self,
head_size: int,
rotary_dim: int,
max_position_embeddings: int,
base: float,
is_neox_style: bool,
dtype: torch.dtype,
init_cache: bool,
# extra parameters for FoPE
num_key_value_heads: int,
num_inv_freq: int,
fope_sep_head: bool,
fope_init_factor: float,
):
# fope related parameters
self.num_key_value_heads = num_key_value_heads
self.num_inv_freq = num_inv_freq
self.fope_sep_head = fope_sep_head
self.fope_init_factor = fope_init_factor
super().__init__(
head_size=head_size,
rotary_dim=rotary_dim,
max_position_embeddings=max_position_embeddings,
base=base,
is_neox_style=is_neox_style,
dtype=dtype,
init_cache=init_cache,
)
# setup buffers and parameters
self.inv_freq: torch.Tensor
self.register_buffer(
"inv_freq", self._compute_inv_freq(self.base), persistent=False
)
self.input_dim = self.inv_freq.shape[-1]
self.output_dim = self.inv_freq.shape[-1]
self.cos_coef = nn.Parameter(
torch.empty(num_key_value_heads, self.input_dim, self.output_dim),
requires_grad=False,
)
self.sin_coef = nn.Parameter(
torch.empty(num_key_value_heads, self.input_dim, self.output_dim),
requires_grad=False,
)
self.sin_coef.weight_loader = self.weight_loader
self.cos_coef.weight_loader = self.weight_loader
self.cos_sin_cache: torch.Tensor
cache = self._compute_cos_sin_cache().to(dtype)
self.register_buffer("cos_sin_cache", cache, persistent=False)
# update cache in the first forward, where sin/cos_coef weights are ready
self.update_cache = True
def _compute_inv_freq(self, base: float) -> torch.Tensor:
"""Compute the inverse frequency."""
inv_freq = 1.0 / (
base
** (
torch.arange(0, self.rotary_dim, 2, dtype=torch.float) / self.rotary_dim
)
)
inv_freq_idx_selected = torch.ones_like(inv_freq, dtype=torch.bool)
if self.num_inv_freq is not None:
inv_freq_idx_selected[self.num_inv_freq :] = False
else:
inv_freq_idx_selected = inv_freq > (
2.0 * torch.pi / self.max_position_embeddings
)
inv_freq = inv_freq[inv_freq_idx_selected]
return inv_freq
def _compute_cos_sin_cache(self) -> torch.Tensor:
"""Compute the cos and sin cache."""
device = self.inv_freq.device
t = torch.arange(self.max_position_embeddings, dtype=torch.float, device=device)
freqs = torch.einsum("j,i -> ji", t, self.inv_freq)
if self.fope_sep_head:
pos_cos = freqs.cos().unsqueeze(0).expand(self.num_key_value_heads, -1, -1)
pos_sin = freqs.sin().unsqueeze(0).expand(self.num_key_value_heads, -1, -1)
else:
pos_cos = freqs.cos()
pos_sin = freqs.sin()
if self.fope_sep_head:
sin = torch.einsum("htD, hDd -> thd", pos_sin, self.sin_coef.float())
cos = torch.einsum("htD, hDd -> thd", pos_cos, self.cos_coef.float())
else:
sin = torch.einsum("tD, Dd -> td", pos_sin, self.sin_coef.float())
cos = torch.einsum("tD, Dd -> td", pos_cos, self.cos_coef.float())
sin = F.pad(
input=sin,
pad=(0, self.head_size // 2 - sin.size(-1)),
mode="constant",
value=1,
)
cos = F.pad(
input=cos,
pad=(0, self.head_size // 2 - cos.size(-1)),
mode="constant",
value=1,
)
sin = torch.cat((sin, sin), dim=-1)
cos = torch.cat((cos, cos), dim=-1)
# cache: (max_position_embeddings, num_kv_heads, kv_size * 2)
cache = torch.cat((cos, sin), dim=-1)
return cache
def forward_native(
self,
positions: torch.Tensor,
query: torch.Tensor,
key: torch.Tensor | None = None,
offsets: torch.Tensor | None = None,
) -> tuple[torch.Tensor, torch.Tensor | None]:
# update cos/sin cache in the first forward
if self.update_cache:
cache = self._compute_cos_sin_cache().to(self.dtype)
self.cos_sin_cache.copy_(cache)
self.update_cache = False
positions = positions.flatten()
cos_sin = self.cos_sin_cache.index_select(0, positions)
cos, sin = cos_sin.chunk(2, dim=-1)
# apply rotary embedding
# query: (seq_len, num_heads, head_size)
# key: (seq_len, num_kv_heads, head_size)
query = query.unflatten(-1, (-1, self.head_size))
assert key is not None, "Key tensor is required for FoPE."
key = key.unflatten(-1, (-1, self.head_size))
assert query.dim() == key.dim() == 3, (
"Expected query key (seq_len, heads, head_dim)"
)
assert cos.dim() <= 3 and sin.dim() <= 3
need_reshape = False
if cos.dim() == 3:
# for fope
need_reshape = True
query_shape = query.shape
key_shape = key.shape
cos = cos.flatten(0, 1)
sin = sin.flatten(0, 1)
seq_len = cos.size(0)
query = query.view(seq_len, -1, query.size(-1))
key = key.view(seq_len, -1, key.size(-1))
# native implementation of apply rope for neox style
cos = cos.unsqueeze(1)
sin = sin.unsqueeze(1)
query = (query * cos) + (rotate_neox(query) * sin)
key = (key * cos) + (rotate_neox(key) * sin)
if need_reshape:
query = query.view(query_shape)
key = key.view(key_shape)
return query, key
def weight_loader(self, param: nn.Parameter, loaded_weight: torch.Tensor):
"""load fope weights"""
world_size = get_tensor_model_parallel_world_size()
rank = get_tensor_model_parallel_rank()
num_key_value_heads = loaded_weight.size(0)
if num_key_value_heads < world_size:
n_replicate = world_size // num_key_value_heads
world_size = num_key_value_heads
rank = rank // n_replicate
loaded_weight = loaded_weight.chunk(world_size, dim=0)[rank]
param.data.copy_(loaded_weight)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/rotary_embedding/fope.py",
"license": "Apache License 2.0",
"lines": 170,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/interns1_pro.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# Copyright 2025 The vLLM team.
# Copyright 2025 The Qwen Team.
# Copyright 2025 The HuggingFace Inc. team.
# All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inference-only InternS1Pro model compatible with HuggingFace weights."""
import functools
from collections.abc import Iterable
from typing import Any
import torch
from torch import nn
from transformers import AutoProcessor, PretrainedConfig
from vllm.config import CacheConfig, VllmConfig
from vllm.distributed import (
get_ep_group,
get_tensor_model_parallel_world_size,
tensor_model_parallel_all_gather,
)
from vllm.logger import init_logger
from vllm.model_executor.layers.activation import SiluAndMul
from vllm.model_executor.layers.attention import Attention
from vllm.model_executor.layers.fused_moe import FusedMoE
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.linear import (
MergedColumnParallelLinear,
QKVParallelLinear,
ReplicatedLinear,
RowParallelLinear,
)
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.rotary_embedding import get_rope
from vllm.model_executor.layers.vocab_parallel_embedding import (
ParallelLMHead,
)
from vllm.model_executor.models.utils import sequence_parallel_chunk
from vllm.multimodal import MULTIMODAL_REGISTRY
from .interfaces import MixtureOfExperts
from .qwen3_moe import (
Qwen3MoeForCausalLM,
)
from .qwen3_vl import (
Qwen3_VisionTransformer,
Qwen3VLDummyInputsBuilder,
Qwen3VLForConditionalGeneration,
Qwen3VLMultiModalProcessor,
Qwen3VLProcessingInfo,
)
from .qwen3_vl_moe import Qwen3MoeLLMModel
from .utils import (
AutoWeightsLoader,
WeightsMapper,
extract_layer_index,
maybe_prefix,
)
logger = init_logger(__name__)
class InternS1ProProcessingInfo(Qwen3VLProcessingInfo):
def get_hf_config(self):
return self.ctx.get_hf_config()
def get_hf_processor(self, **kwargs: object) -> AutoProcessor:
return self.ctx.get_hf_processor(**kwargs)
class InternS1ProMoeMLP(nn.Module):
def __init__(
self,
hidden_size: int,
intermediate_size: int,
hidden_act: str,
quant_config: QuantizationConfig | None = None,
reduce_results: bool = True,
prefix: str = "",
) -> None:
super().__init__()
self.gate_up_proj = MergedColumnParallelLinear(
hidden_size,
[intermediate_size] * 2,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.gate_up_proj",
)
self.down_proj = RowParallelLinear(
intermediate_size,
hidden_size,
bias=False,
quant_config=quant_config,
reduce_results=reduce_results,
prefix=f"{prefix}.down_proj",
)
if hidden_act != "silu":
raise ValueError(
f"Unsupported activation: {hidden_act}. Only silu is supported for now."
)
self.act_fn = SiluAndMul()
def forward(self, x):
gate_up, _ = self.gate_up_proj(x)
x = self.act_fn(gate_up)
x, _ = self.down_proj(x)
return x
class InternS1ProMoeSparseMoeBlock(nn.Module):
def __init__(
self,
vllm_config: VllmConfig,
prefix: str = "",
):
super().__init__()
config = vllm_config.model_config.hf_text_config
parallel_config = vllm_config.parallel_config
quant_config = vllm_config.quant_config
self.tp_size = get_tensor_model_parallel_world_size()
self.ep_group = get_ep_group().device_group
self.ep_rank = get_ep_group().rank_in_group
self.ep_size = self.ep_group.size()
self.n_routed_experts = config.num_experts
self.is_sequence_parallel = parallel_config.use_sequence_parallel_moe
if self.tp_size > config.num_experts:
raise ValueError(
f"Tensor parallel size {self.tp_size} is greater than "
f"the number of experts {config.num_experts}."
)
# Load balancing settings.
eplb_config = vllm_config.parallel_config.eplb_config
self.enable_eplb = parallel_config.enable_eplb
self.n_logical_experts = self.n_routed_experts
self.n_redundant_experts = eplb_config.num_redundant_experts
self.n_physical_experts = self.n_logical_experts + self.n_redundant_experts
self.n_local_physical_experts = self.n_physical_experts // self.ep_size
self.physical_expert_start = self.ep_rank * self.n_local_physical_experts
self.physical_expert_end = (
self.physical_expert_start + self.n_local_physical_experts
)
# For custom routing function
self.n_groups = getattr(config, "router_n_groups", -1)
self.experts = FusedMoE(
num_experts=self.n_routed_experts,
top_k=config.num_experts_per_tok,
hidden_size=config.hidden_size,
intermediate_size=config.moe_intermediate_size,
reduce_results=True,
renormalize=config.norm_topk_prob,
quant_config=quant_config,
prefix=f"{prefix}.experts",
enable_eplb=self.enable_eplb,
num_redundant_experts=self.n_redundant_experts,
is_sequence_parallel=self.is_sequence_parallel,
custom_routing_function=self._custom_routing_function,
)
self.gate = ReplicatedLinear(
config.hidden_size,
config.num_experts,
bias=False,
prefix=f"{prefix}.gate",
)
@staticmethod
@functools.lru_cache
def get_group_offsets(n_groups: int, group_size: int, device: str):
group_offsets = (torch.arange(n_groups, device=device) * group_size).view(
1, -1, 1
) # [1, n_groups, 1]
return group_offsets
# TODO: zhouxinyu, use vllm routing functions
def _custom_routing_function(
self,
hidden_states: torch.Tensor,
gating_output: torch.Tensor,
topk: int,
renormalize: bool,
) -> torch.Tensor:
routing_weights = torch.softmax(gating_output, dim=-1, dtype=torch.float32)
if self.n_groups > 0:
assert routing_weights.shape[-1] % self.n_groups == 0, (
f"{routing_weights.shape[-1]} cannot be divided by {self.n_groups}"
)
per_group_top_k = topk // self.n_groups
group_size = routing_weights.shape[-1] // self.n_groups
group_offsets = self.get_group_offsets(
self.n_groups, group_size, routing_weights.device
)
routing_weights = routing_weights.unflatten(-1, (self.n_groups, group_size))
topk_weights, topk_ids = torch.topk(
routing_weights, per_group_top_k, dim=-1
)
topk_ids = (topk_ids + group_offsets).flatten(-2, -1)
topk_weights = topk_weights.flatten(-2, -1)
else:
topk_weights, topk_ids = torch.topk(routing_weights, topk, dim=-1)
if renormalize:
topk_weights = topk_weights / topk_weights.sum(dim=-1, keepdim=True)
return topk_weights, topk_ids
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
assert hidden_states.dim() <= 2, (
"InternS1ProMoeSparseMoeBlock only supports 1D or 2D inputs"
)
is_input_1d = hidden_states.dim() == 1
num_tokens, hidden_dim = hidden_states.shape
hidden_states = hidden_states.view(-1, hidden_dim)
if self.is_sequence_parallel:
hidden_states = sequence_parallel_chunk(hidden_states)
# router_logits: (num_tokens, n_experts)
router_logits, _ = self.gate(hidden_states)
final_hidden_states = self.experts(
hidden_states=hidden_states, router_logits=router_logits
)
if self.is_sequence_parallel:
final_hidden_states = tensor_model_parallel_all_gather(
final_hidden_states, 0
)
final_hidden_states = final_hidden_states[:num_tokens]
# return to 1d if input is 1d
return final_hidden_states.squeeze(0) if is_input_1d else final_hidden_states
class InternS1ProMoeAttention(nn.Module):
def __init__(
self,
hidden_size: int,
num_heads: int,
num_kv_heads: int,
rope_parameters: dict[str, Any],
max_position_embeddings: int = 32768,
head_dim: int | None = None,
rms_norm_eps: float = 1e-06,
qkv_bias: bool = False,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
dual_chunk_attention_config: dict[str, Any] | None = None,
) -> None:
super().__init__()
self.hidden_size = hidden_size
tp_size = get_tensor_model_parallel_world_size()
self.total_num_heads = num_heads
assert self.total_num_heads % tp_size == 0
self.num_heads = self.total_num_heads // tp_size
self.total_num_kv_heads = num_kv_heads
if self.total_num_kv_heads >= tp_size:
# Number of KV heads is greater than TP size, so we partition
# the KV heads across multiple tensor parallel GPUs.
assert self.total_num_kv_heads % tp_size == 0
else:
# Number of KV heads is less than TP size, so we replicate
# the KV heads across multiple tensor parallel GPUs.
assert tp_size % self.total_num_kv_heads == 0
self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
self.head_dim = head_dim or (hidden_size // self.total_num_heads)
self.q_size = self.num_heads * self.head_dim
self.kv_size = self.num_kv_heads * self.head_dim
self.scaling = self.head_dim**-0.5
self.max_position_embeddings = max_position_embeddings
self.dual_chunk_attention_config = dual_chunk_attention_config
self.qkv_proj = QKVParallelLinear(
hidden_size,
self.head_dim,
self.total_num_heads,
self.total_num_kv_heads,
bias=qkv_bias,
quant_config=quant_config,
prefix=f"{prefix}.qkv_proj",
)
self.o_proj = RowParallelLinear(
self.total_num_heads * self.head_dim,
hidden_size,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.o_proj",
)
rope_parameters["num_key_value_heads"] = self.num_kv_heads
self.rotary_emb = get_rope(
self.head_dim,
max_position=max_position_embeddings,
rope_parameters=rope_parameters,
dual_chunk_attention_config=dual_chunk_attention_config,
)
self.attn = Attention(
self.num_heads,
self.head_dim,
self.scaling,
num_kv_heads=self.num_kv_heads,
cache_config=cache_config,
quant_config=quant_config,
prefix=f"{prefix}.attn",
**{
"layer_idx": extract_layer_index(prefix),
"dual_chunk_attention_config": dual_chunk_attention_config,
}
if dual_chunk_attention_config
else {},
)
self.q_norm = RMSNorm(self.head_dim, eps=rms_norm_eps)
self.k_norm = RMSNorm(self.head_dim, eps=rms_norm_eps)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
) -> torch.Tensor:
qkv, _ = self.qkv_proj(hidden_states)
q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
# Add qk-norm
q_by_head = q.view(*q.shape[:-1], q.shape[-1] // self.head_dim, self.head_dim)
q_by_head = self.q_norm(q_by_head)
q = q_by_head.view(q.shape)
k_by_head = k.view(*k.shape[:-1], k.shape[-1] // self.head_dim, self.head_dim)
k_by_head = self.k_norm(k_by_head)
k = k_by_head.view(k.shape)
q, k = self.rotary_emb.forward_native(positions, q, k)
attn_output = self.attn(q, k, v)
output, _ = self.o_proj(attn_output)
return output
class InternS1ProMoeDecoderLayer(nn.Module):
def __init__(self, vllm_config: VllmConfig, prefix: str = "") -> None:
super().__init__()
config = vllm_config.model_config.hf_text_config
cache_config = vllm_config.cache_config
quant_config = vllm_config.quant_config
self.hidden_size = config.hidden_size
max_position_embeddings = getattr(config, "max_position_embeddings", 32768)
dual_chunk_attention_config = getattr(
config, "dual_chunk_attention_config", None
)
# update rope related parameters
rope_scaling = config.rope_scaling
fope_keys = {"fope_init_factor", "fope_sep_head", "num_inv_freq"}
use_fope = any(rope_scaling.get(key) is not None for key in fope_keys)
fope_init_factor = rope_scaling.get("fope_init_factor", None)
fope_sep_head = rope_scaling.get("fope_sep_head", None)
num_inv_freq = rope_scaling.get("num_inv_freq", None)
config.rope_parameters["use_fope"] = use_fope
config.rope_parameters["fope_init_factor"] = fope_init_factor
config.rope_parameters["fope_sep_head"] = fope_sep_head
config.rope_parameters["num_inv_freq"] = num_inv_freq
assert use_fope, "should use FOPE for InternS1Pro model"
self.self_attn = InternS1ProMoeAttention(
hidden_size=self.hidden_size,
num_heads=config.num_attention_heads,
num_kv_heads=config.num_key_value_heads,
rope_parameters=config.rope_parameters,
max_position_embeddings=max_position_embeddings,
rms_norm_eps=config.rms_norm_eps,
qkv_bias=getattr(config, "attention_bias", False),
head_dim=getattr(config, "head_dim", None),
cache_config=cache_config,
quant_config=quant_config,
prefix=f"{prefix}.self_attn",
dual_chunk_attention_config=dual_chunk_attention_config,
)
# `mlp_only_layers` in the config.
layer_idx = extract_layer_index(prefix)
mlp_only_layers = (
[] if not hasattr(config, "mlp_only_layers") else config.mlp_only_layers
)
if (layer_idx not in mlp_only_layers) and (
config.num_experts > 0 and (layer_idx + 1) % config.decoder_sparse_step == 0
):
self.mlp = InternS1ProMoeSparseMoeBlock(
vllm_config=vllm_config, prefix=f"{prefix}.mlp"
)
else:
self.mlp = InternS1ProMoeMLP(
hidden_size=config.hidden_size,
intermediate_size=config.intermediate_size,
hidden_act=config.hidden_act,
quant_config=quant_config,
prefix=f"{prefix}.mlp",
)
self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = RMSNorm(
config.hidden_size, eps=config.rms_norm_eps
)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
residual: torch.Tensor | None,
) -> tuple[torch.Tensor, torch.Tensor]:
# Self Attention
if residual is None:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
else:
hidden_states, residual = self.input_layernorm(hidden_states, residual)
hidden_states = self.self_attn(
positions=positions,
hidden_states=hidden_states,
)
# Fully Connected
hidden_states, residual = self.post_attention_layernorm(hidden_states, residual)
hidden_states = self.mlp(hidden_states)
return hidden_states, residual
class InternS1ProMoeLLMModel(Qwen3MoeLLMModel):
def __init__(
self,
*,
vllm_config: VllmConfig,
prefix: str = "",
decoder_layer_type: type[torch.nn.Module] = InternS1ProMoeDecoderLayer,
):
super().__init__(
vllm_config=vllm_config,
prefix=prefix,
decoder_layer_type=decoder_layer_type,
)
class InternS1ProMoeLLMForCausalLM(Qwen3MoeForCausalLM):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super(Qwen3MoeForCausalLM, self).__init__()
self.config = vllm_config.model_config.hf_config.text_config
self.quant_config = vllm_config.quant_config
self.model = InternS1ProMoeLLMModel(
vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")
)
self.lm_head = ParallelLMHead(
self.config.vocab_size,
self.config.hidden_size,
quant_config=self.quant_config,
prefix=maybe_prefix(prefix, "lm_head"),
)
if self.config.tie_word_embeddings:
self.lm_head.weight = self.model.embed_tokens.weight
self.logits_processor = LogitsProcessor(self.config.vocab_size)
self.make_empty_intermediate_tensors = (
self.model.make_empty_intermediate_tensors
)
class InternS1ProMoeMixtureOfExperts(MixtureOfExperts):
def update_physical_experts_metadata(
self,
num_physical_experts: int,
num_local_physical_experts: int,
) -> None:
assert self.num_local_physical_experts == num_local_physical_experts
self.num_physical_experts = num_physical_experts
self.num_local_physical_experts = num_local_physical_experts
self.num_redundant_experts = num_physical_experts - self.num_logical_experts
for layer in self.language_model.model.layers:
if isinstance(layer.mlp, InternS1ProMoeSparseMoeBlock):
moe = layer.mlp
moe.n_local_physical_experts = num_local_physical_experts
moe.n_physical_experts = num_physical_experts
moe.n_redundant_experts = self.num_redundant_experts
moe.experts.update_expert_map()
def set_moe_parameters(self):
self.expert_weights = []
self.moe_layers = []
example_moe = None
for layer in self.language_model.model.layers:
if hasattr(layer, "mlp") and isinstance(
layer.mlp, InternS1ProMoeSparseMoeBlock
):
example_moe = layer.mlp
self.moe_layers.append(layer.mlp.experts)
if example_moe is None:
raise RuntimeError("No InternS1ProMoe layer found in the language_model.")
# Set MoE hyperparameters
self.num_moe_layers = len(self.moe_layers)
self.num_expert_groups = 1
self.num_shared_experts = 0
self.num_logical_experts = example_moe.n_logical_experts
self.num_physical_experts = example_moe.n_physical_experts
self.num_local_physical_experts = example_moe.n_local_physical_experts
self.num_routed_experts = example_moe.n_routed_experts
self.num_redundant_experts = example_moe.n_redundant_experts
@MULTIMODAL_REGISTRY.register_processor(
Qwen3VLMultiModalProcessor,
info=InternS1ProProcessingInfo,
dummy_inputs=Qwen3VLDummyInputsBuilder,
)
class InternS1ProForConditionalGeneration(
Qwen3VLForConditionalGeneration, InternS1ProMoeMixtureOfExperts
):
is_3d_moe_weight: bool = True
packed_modules_mapping = {
"qkv_proj": [
"q_proj",
"k_proj",
"v_proj",
],
}
# To ensure correct weight loading and mapping.
hf_to_vllm_mapper = WeightsMapper(
orig_to_new_prefix={
"model.visual.": "visual.",
"lm_head.": "language_model.lm_head.",
"model.language_model.": "language_model.model.",
},
)
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super(Qwen3VLForConditionalGeneration, self).__init__()
config: PretrainedConfig = vllm_config.model_config.hf_config
multimodal_config = vllm_config.model_config.multimodal_config
self.config = config
self.multimodal_config = multimodal_config
self.use_data_parallel = multimodal_config.mm_encoder_tp_mode == "data"
self.video_pruning_rate = multimodal_config.video_pruning_rate
self.is_multimodal_pruning_enabled = (
multimodal_config.is_multimodal_pruning_enabled()
)
if not multimodal_config.get_limit_per_prompt(
"image"
) and not multimodal_config.get_limit_per_prompt("video"):
self.visual = None
else:
self.visual = Qwen3_VisionTransformer(
config.vision_config,
norm_eps=getattr(config, "rms_norm_eps", 1e-6),
prefix=maybe_prefix(prefix, "visual"),
)
self.language_model = InternS1ProMoeLLMForCausalLM(
vllm_config=vllm_config, prefix=maybe_prefix(prefix, "language_model")
)
# Whether to include the gate_up_proj mapping is determined by
# the language model.
self.packed_modules_mapping = (
self.packed_modules_mapping | self.language_model.packed_modules_mapping
)
self.make_empty_intermediate_tensors = (
self.language_model.make_empty_intermediate_tensors
)
self.use_deepstack = hasattr(config.vision_config, "deepstack_visual_indexes")
self.deepstack_num_level = (
len(config.vision_config.deepstack_visual_indexes)
if self.use_deepstack
else 0
)
self.visual_dim = config.vision_config.out_hidden_size
self.multiscale_dim = self.visual_dim * self.deepstack_num_level
# Set MoE hyperparameters
self.set_moe_parameters()
def get_frope_params_map(self) -> str:
mapper = {}
for name, params in self.language_model.model.named_parameters():
if "rotary_emb.sin_coef" in name:
mapper["language_model.model.rotary_emb.sin_coef"] = (
f"language_model.model.{name}"
)
if "rotary_emb.cos_coef" in name:
mapper["language_model.model.rotary_emb.cos_coef"] = (
f"language_model.model.{name}"
)
return mapper
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]):
"""load weights"""
skip_prefixes = ["model.time_series."]
if self.visual is None:
skip_prefixes.append("visual.")
# FIXME(Isotr0py): See if we can avoid tighing FoPE to PP layers
weights_mapper = WeightsMapper(
orig_to_new_prefix={
"model.visual.": "visual.",
"lm_head.": "language_model.lm_head.",
"model.language_model.": "language_model.model.",
},
orig_to_new_suffix=self.get_frope_params_map(),
)
loader = AutoWeightsLoader(self, skip_prefixes=skip_prefixes)
return loader.load_weights(weights, mapper=weights_mapper)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/interns1_pro.py",
"license": "Apache License 2.0",
"lines": 567,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/entrypoints/pooling/utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import math
from dataclasses import dataclass
from typing import Any
import pybase64
import torch
from vllm.outputs import PoolingRequestOutput
from vllm.utils.serial_utils import (
EMBED_DTYPES,
EmbedDType,
Endianness,
binary2tensor,
tensor2binary,
)
@dataclass
class MetadataItem:
index: int
embed_dtype: EmbedDType
endianness: Endianness
start: int
end: int
shape: tuple[int, ...]
def build_metadata_items(
embed_dtype: EmbedDType,
endianness: Endianness,
shape: tuple[int, ...],
n_request: int,
) -> list[MetadataItem]:
n_bytes = EMBED_DTYPES[embed_dtype].nbytes
size = math.prod(shape)
return [
MetadataItem(
index=i,
embed_dtype=embed_dtype,
endianness=endianness,
start=i * size * n_bytes,
end=(i + 1) * size * n_bytes,
shape=shape,
)
for i in range(n_request)
]
def encode_pooling_output_float(output: PoolingRequestOutput) -> list[float]:
return output.outputs.data.tolist()
def encode_pooling_output_binary(
output: PoolingRequestOutput,
embed_dtype: EmbedDType,
endianness: Endianness,
) -> bytes:
return tensor2binary(output.outputs.data, embed_dtype, endianness)
def encode_pooling_output_base64(
output: PoolingRequestOutput,
embed_dtype: EmbedDType,
endianness: Endianness,
) -> str:
embedding_bytes = tensor2binary(output.outputs.data, embed_dtype, endianness)
return pybase64.b64encode(embedding_bytes).decode("utf-8")
def encode_pooling_bytes(
pooling_outputs: list[PoolingRequestOutput],
embed_dtype: EmbedDType,
endianness: Endianness,
) -> tuple[list[bytes], list[dict[str, Any]], dict[str, Any]]:
num_prompt_tokens = 0
items: list[dict[str, Any]] = []
body: list[bytes] = []
offset = 0
for idx, output in enumerate(pooling_outputs):
binary = tensor2binary(
tensor=output.outputs.data,
embed_dtype=embed_dtype,
endianness=endianness,
)
size = len(binary)
# Dictionary form of MetadataItem
item = dict(
index=idx,
embed_dtype=embed_dtype,
endianness=endianness,
start=offset,
end=offset + size,
shape=output.outputs.data.shape,
)
body.append(binary)
items.append(item)
prompt_token_ids = output.prompt_token_ids
num_prompt_tokens += len(prompt_token_ids)
offset += size
# Dictionary form of UsageInfo
usage = dict(
prompt_tokens=num_prompt_tokens,
total_tokens=num_prompt_tokens,
)
return body, items, usage
def decode_pooling_output(items: list[MetadataItem], body: bytes) -> list[torch.Tensor]:
return [
binary2tensor(
body[item.start : item.end],
item.shape,
item.embed_dtype,
item.endianness,
)
for item in sorted(items, key=lambda x: x.index)
]
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/entrypoints/pooling/utils.py",
"license": "Apache License 2.0",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/entrypoints/openai/responses/test_sampling_params.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Unit tests for ResponsesRequest.to_sampling_params() parameter mapping."""
import pytest
import torch
from openai.types.responses.response_format_text_json_schema_config import (
ResponseFormatTextJSONSchemaConfig,
)
from pydantic import ValidationError
from vllm.entrypoints.openai.responses.protocol import (
ResponsesRequest,
ResponseTextConfig,
)
from vllm.sampling_params import StructuredOutputsParams
class TestResponsesRequestSamplingParams:
"""Test that ResponsesRequest correctly maps parameters to SamplingParams."""
def test_basic_sampling_params(self):
"""Test basic sampling parameters are correctly mapped."""
request = ResponsesRequest(
model="test-model",
input="test input",
temperature=0.8,
top_p=0.95,
top_k=50,
max_output_tokens=100,
)
sampling_params = request.to_sampling_params(default_max_tokens=1000)
assert sampling_params.temperature == 0.8
assert sampling_params.top_p == 0.95
assert sampling_params.top_k == 50
assert sampling_params.max_tokens == 100
def test_extra_sampling_params(self):
"""Test extra sampling parameters are correctly mapped."""
request = ResponsesRequest(
model="test-model",
input="test input",
repetition_penalty=1.2,
seed=42,
stop=["END", "STOP"],
ignore_eos=True,
vllm_xargs={"custom": "value"},
)
sampling_params = request.to_sampling_params(default_max_tokens=1000)
assert sampling_params.repetition_penalty == 1.2
assert sampling_params.seed == 42
assert sampling_params.stop == ["END", "STOP"]
assert sampling_params.ignore_eos is True
assert sampling_params.extra_args == {"custom": "value"}
def test_stop_string_conversion(self):
"""Test that single stop string is converted to list."""
request = ResponsesRequest(
model="test-model",
input="test input",
stop="STOP",
)
sampling_params = request.to_sampling_params(default_max_tokens=1000)
assert sampling_params.stop == ["STOP"]
def test_default_values(self):
"""Test default values for optional parameters."""
request = ResponsesRequest(
model="test-model",
input="test input",
)
sampling_params = request.to_sampling_params(default_max_tokens=1000)
assert sampling_params.repetition_penalty == 1.0 # None → 1.0
assert sampling_params.stop == [] # Empty list
assert sampling_params.extra_args == {} # Empty dict
def test_seed_bounds_validation(self):
"""Test that seed values outside torch.long bounds are rejected."""
# Test seed below minimum
with pytest.raises(ValidationError) as exc_info:
ResponsesRequest(
model="test-model",
input="test input",
seed=torch.iinfo(torch.long).min - 1,
)
assert "greater_than_equal" in str(exc_info.value).lower()
# Test seed above maximum
with pytest.raises(ValidationError) as exc_info:
ResponsesRequest(
model="test-model",
input="test input",
seed=torch.iinfo(torch.long).max + 1,
)
assert "less_than_equal" in str(exc_info.value).lower()
# Test valid seed at boundaries
request_min = ResponsesRequest(
model="test-model",
input="test input",
seed=torch.iinfo(torch.long).min,
)
assert request_min.seed == torch.iinfo(torch.long).min
request_max = ResponsesRequest(
model="test-model",
input="test input",
seed=torch.iinfo(torch.long).max,
)
assert request_max.seed == torch.iinfo(torch.long).max
def test_structured_outputs_passed_through(self):
"""Test that structured_outputs field is passed to SamplingParams."""
structured_outputs = StructuredOutputsParams(grammar="root ::= 'hello'")
request = ResponsesRequest(
model="test-model",
input="test input",
structured_outputs=structured_outputs,
)
sampling_params = request.to_sampling_params(default_max_tokens=1000)
assert sampling_params.structured_outputs is not None
assert sampling_params.structured_outputs.grammar == "root ::= 'hello'"
def test_structured_outputs_and_json_schema_conflict(self):
"""Test that specifying both structured_outputs and json_schema raises."""
structured_outputs = StructuredOutputsParams(grammar="root ::= 'hello'")
text_config = ResponseTextConfig()
text_config.format = ResponseFormatTextJSONSchemaConfig(
type="json_schema",
name="test",
schema={"type": "object"},
)
request = ResponsesRequest(
model="test-model",
input="test input",
structured_outputs=structured_outputs,
text=text_config,
)
with pytest.raises(ValueError) as exc_info:
request.to_sampling_params(default_max_tokens=1000)
assert "Cannot specify both structured_outputs and text.format" in str(
exc_info.value
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/entrypoints/openai/responses/test_sampling_params.py",
"license": "Apache License 2.0",
"lines": 129,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/kernels/moe/test_shared_fused_moe_routed_transform.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Tests for SharedFusedMoE with routed_input_transform.
Verifies that applying routed_input_transform inside SharedFusedMoE
produces the same results as applying the transform manually outside.
"""
import pytest
import torch
import torch.nn as nn
from vllm.config import VllmConfig, set_current_vllm_config
from vllm.forward_context import set_forward_context
from vllm.model_executor.layers.fused_moe.shared_fused_moe import SharedFusedMoE
from vllm.utils.torch_utils import is_torch_equal_or_newer
class SimpleLinear(nn.Module):
"""A simple linear transform mimicking latent projection in latent MoE."""
def __init__(self, in_features: int, out_features: int, dtype: torch.dtype):
super().__init__()
self.weight = nn.Parameter(
torch.randn(out_features, in_features, device="cuda", dtype=dtype) / 10
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return nn.functional.linear(x, self.weight)
class SimpleSharedExperts(nn.Module):
"""A simple 2-layer MLP mimicking shared experts."""
def __init__(self, hidden_size: int, intermediate_size: int, dtype: torch.dtype):
super().__init__()
self.up = nn.Linear(
hidden_size, intermediate_size * 2, bias=False, device="cuda", dtype=dtype
)
self.down = nn.Linear(
intermediate_size, hidden_size, bias=False, device="cuda", dtype=dtype
)
with torch.no_grad():
self.up.weight.div_(10)
self.down.weight.div_(10)
def forward(self, x: torch.Tensor) -> torch.Tensor:
gate_up = self.up(x)
gate, up = gate_up.chunk(2, dim=-1)
return self.down(nn.functional.silu(gate) * up)
@pytest.fixture(autouse=True)
def setup_cuda():
if not torch.cuda.is_available():
pytest.skip("CUDA not available")
torch.set_default_device("cuda")
@pytest.mark.parametrize("num_tokens", [1, 32])
@pytest.mark.parametrize("hidden_size,latent_size", [(256, 128), (128, 64)])
@pytest.mark.parametrize("dtype", [torch.bfloat16])
@pytest.mark.skipif(
is_torch_equal_or_newer("2.10.0"),
reason="Test fails with PyTorch 2.10.0 see: https://github.com/vllm-project/vllm/issues/33995",
)
def test_routed_input_transform_inside_vs_outside(
num_tokens: int,
hidden_size: int,
latent_size: int,
dtype: torch.dtype,
dist_init,
workspace_init,
):
"""Compare SharedFusedMoE with transform inside vs manually applying outside.
Method A (inside): SharedFusedMoE with routed_input_transform
Method B (outside): Manually transform, then SharedFusedMoE without transform
"""
torch.manual_seed(42)
num_experts = 8
top_k = 2
intermediate_size = hidden_size * 2
vllm_config = VllmConfig()
vllm_config.compilation_config.static_forward_context = dict()
shared_experts = SimpleSharedExperts(hidden_size, intermediate_size, dtype)
routed_transform = SimpleLinear(hidden_size, latent_size, dtype)
with set_current_vllm_config(vllm_config):
# Method A: SharedFusedMoE WITH routed_input_transform
moe_with_transform = SharedFusedMoE(
shared_experts=shared_experts,
routed_input_transform=routed_transform,
num_experts=num_experts,
top_k=top_k,
hidden_size=latent_size,
intermediate_size=intermediate_size,
reduce_results=False,
renormalize=True,
params_dtype=dtype,
tp_size=1,
dp_size=1,
pcp_size=1,
prefix="moe_with_transform",
)
# Method B: SharedFusedMoE WITHOUT routed_input_transform
# Note: shared_experts=None because when transform is done outside,
moe_without_transform = SharedFusedMoE(
shared_experts=None,
routed_input_transform=None,
num_experts=num_experts,
top_k=top_k,
hidden_size=latent_size,
intermediate_size=intermediate_size,
reduce_results=False,
renormalize=True,
params_dtype=dtype,
tp_size=1,
dp_size=1,
pcp_size=1,
prefix="moe_without_transform",
)
with torch.no_grad():
moe_without_transform.w13_weight.copy_(moe_with_transform.w13_weight)
moe_without_transform.w2_weight.copy_(moe_with_transform.w2_weight)
moe_with_transform.quant_method.process_weights_after_loading(
moe_with_transform
)
moe_without_transform.quant_method.process_weights_after_loading(
moe_without_transform
)
hidden_states = torch.randn(num_tokens, hidden_size, device="cuda", dtype=dtype)
router_logits = torch.randn(num_tokens, num_experts, device="cuda", dtype=dtype)
with set_forward_context(None, vllm_config, num_tokens=num_tokens):
shared_out_A, routed_out_A = moe_with_transform(
hidden_states, router_logits
)
transformed_hidden = routed_transform(hidden_states)
shared_out_B, routed_out_B = moe_without_transform(
transformed_hidden, router_logits
)
torch.testing.assert_close(
routed_out_A,
routed_out_B,
atol=1e-3,
rtol=1e-3,
msg="Routed output should match: transform inside vs outside",
)
expected_shared_out = shared_experts(hidden_states)
torch.testing.assert_close(
shared_out_A,
expected_shared_out,
atol=1e-3,
rtol=1e-3,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/kernels/moe/test_shared_fused_moe_routed_transform.py",
"license": "Apache License 2.0",
"lines": 139,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_executor/models/deepencoder2.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# adapted from
# https://github.com/deepseek-ai/DeepSeek-OCR-2/blob/main/DeepSeek-OCR2-master/DeepSeek-OCR2-vllm/deepencoderv2/qwen2_d2e.py
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import transformers
class CustomQwen2Decoder(nn.Module):
"""
Qwen2 visual encoder
non-causal attention + causal attention
token_type_ids :0=non-causal, 1=causal
"""
def __init__(
self,
decoder_layer: int = 24,
max_position_embeddings: int = 131072,
hidden_dimension: int = 896,
num_attention_heads: int = 14,
num_key_value_heads: int = 2,
intermediate_size: int = 4864,
vocab_size: int = 151936,
attn_implementation: str = "sdpa",
rms_norm_eps: float = 1e-06,
rope_theta: float = 1000000.0,
attention_dropout: float = 0.0,
hidden_act: str = "silu",
initializer_range: float = 0.02,
):
super().__init__()
# load
Qwen2Model = transformers.models.qwen2.modeling_qwen2.Qwen2Model
Qwen2Config = transformers.Qwen2Config
# config
config = Qwen2Config(
hidden_size=hidden_dimension,
num_hidden_layers=decoder_layer,
num_attention_heads=num_attention_heads,
num_key_value_heads=num_key_value_heads,
intermediate_size=intermediate_size,
max_position_embeddings=max_position_embeddings,
vocab_size=vocab_size,
rms_norm_eps=rms_norm_eps,
rope_theta=rope_theta,
attention_dropout=attention_dropout,
hidden_act=hidden_act,
initializer_range=initializer_range,
_attn_implementation=attn_implementation, # ⭐
)
#
self.model = self._create_custom_model(Qwen2Model, config)
del self.model.embed_tokens
def _create_custom_model(self, Qwen2Model, config):
"""Qwen2Model"""
class CustomQwen2ModelInner(Qwen2Model):
def forward(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
past_key_values=None,
inputs_embeds=None,
token_type_ids=None, # ⭐
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
cache_position=None,
):
# token_type_ids
self._current_token_type_ids = token_type_ids
causal_mask_mapping = {
"full_attention": self._update_causal_mask(
attention_mask,
inputs_embeds,
cache_position,
past_key_values,
output_attentions,
)
}
outputs = super().forward(
input_ids=input_ids,
attention_mask=causal_mask_mapping,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
return outputs
def _update_causal_mask(
self,
attention_mask,
input_tensor,
cache_position,
past_key_values,
output_attentions,
):
dtype, device = input_tensor.dtype, input_tensor.device
min_dtype = torch.finfo(dtype).min
batch_size, sequence_length = (
input_tensor.shape[0],
input_tensor.shape[1],
)
token_type_ids = self._current_token_type_ids
# attention mask
causal_mask = self._create_custom_4d_mask(
sequence_length=sequence_length,
dtype=dtype,
device=device,
batch_size=batch_size,
token_type_ids=token_type_ids,
)
# padding mask
if attention_mask is not None and attention_mask.dim() == 2:
padding_mask = attention_mask[:, None, None, :].to(dtype=dtype)
padding_mask = (1.0 - padding_mask) * min_dtype
causal_mask = causal_mask + padding_mask
return causal_mask
def _create_custom_4d_mask(
self,
sequence_length,
dtype,
device,
batch_size,
token_type_ids,
):
min_dtype = torch.finfo(dtype).min
masks = []
for b in range(batch_size):
mask = torch.full(
(sequence_length, sequence_length),
fill_value=min_dtype,
dtype=dtype,
device=device,
)
type_ids = token_type_ids[b]
image_positions = (type_ids == 0).nonzero(as_tuple=True)[0]
text_positions = (type_ids == 1).nonzero(as_tuple=True)[0]
# non-casual
if len(image_positions) > 0:
mask[image_positions[:, None], image_positions] = 0.0
# causal
for i, text_pos in enumerate(text_positions):
if len(image_positions) > 0:
mask[text_pos, image_positions] = 0.0
mask[text_pos, text_positions[: i + 1]] = 0.0
masks.append(mask)
mask = torch.stack(masks, dim=0).unsqueeze(1)
return mask
return CustomQwen2ModelInner(config)
def forward(
self,
inputs_embeds: torch.Tensor,
token_type_ids: torch.Tensor,
attention_mask: torch.Tensor = None,
**kwargs,
):
"""
Args:
inputs_embeds: [batch_size, seq_len, hidden_dim]
token_type_ids: [batch_size, seq_len], 0=non-causal, 1=causal
attention_mask: [batch_size, seq_len], optional
"""
return self.model(
inputs_embeds=inputs_embeds,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
**kwargs,
)
class Qwen2Decoder2Encoder(nn.Module):
"""
Decoder based on Multilingual BART
Set the initial weights and configuration with a pretrained multilingual BART model,
and modify the detailed configurations as a Nougat decoder
"""
def __init__(
self,
decoder_layer: int,
hidden_dimension: int,
num_attention_heads: int,
num_key_value_heads: int,
intermediate_size: int,
):
super().__init__()
self.model = CustomQwen2Decoder(
decoder_layer=decoder_layer,
hidden_dimension=hidden_dimension,
num_attention_heads=num_attention_heads,
num_key_value_heads=num_key_value_heads,
intermediate_size=intermediate_size,
attn_implementation="sdpa",
)
self.query_768 = nn.Embedding(144, hidden_dimension)
self.query_1024 = nn.Embedding(256, hidden_dimension)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x.flatten(2).transpose(1, 2)
bs, n_query, _ = x.shape
if n_query == 144:
param_img = self.query_768.weight
elif n_query == 256:
param_img = self.query_1024.weight
batch_query_imgs = param_img.unsqueeze(0).expand(
bs, -1, -1
) # (batch_size, num_queries, hidden_size)
x_combined = torch.cat([x, batch_query_imgs], dim=1)
token_type_ids = torch.cat(
[
torch.zeros(bs, n_query, dtype=torch.long),
torch.ones(bs, n_query, dtype=torch.long),
],
dim=1,
)
y = self.model(x_combined, token_type_ids)[0]
y = y[:, n_query:, :] # causal flow query
return y
def build_qwen2_decoder_as_encoder(
decoder_layer=24,
hidden_dimension=896,
num_attention_heads=14,
num_key_value_heads=2,
intermediate_size=4864,
):
decoder_as_encoder = Qwen2Decoder2Encoder(
decoder_layer=decoder_layer,
hidden_dimension=hidden_dimension,
num_attention_heads=num_attention_heads,
num_key_value_heads=num_key_value_heads,
intermediate_size=intermediate_size,
)
return decoder_as_encoder
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/deepencoder2.py",
"license": "Apache License 2.0",
"lines": 238,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/deepseek_ocr2.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Inference-only Deepseek-OCR model compatible with HuggingFace weights."""
import math
from collections.abc import Iterable, Mapping, Sequence
from functools import partial
import torch
import torch.nn as nn
from transformers import BatchFeature
from vllm.config import VllmConfig
from vllm.config.multimodal import BaseDummyOptions
from vllm.model_executor.models.interfaces import (
MultiModalEmbeddings,
SupportsLoRA,
SupportsMultiModal,
SupportsPP,
)
from vllm.model_executor.models.module_mapping import MultiModelKeys
from vllm.model_executor.models.utils import (
AutoWeightsLoader,
WeightsMapper,
init_vllm_registered_model,
maybe_prefix,
)
from vllm.multimodal import MULTIMODAL_REGISTRY
from vllm.multimodal.inputs import (
MultiModalDataDict,
MultiModalFieldConfig,
MultiModalKwargsItems,
NestedTensors,
)
from vllm.multimodal.parse import (
ImageEmbeddingItems,
ImageProcessorItems,
ImageSize,
MultiModalDataItems,
)
from vllm.multimodal.processing import (
BaseDummyInputsBuilder,
BaseMultiModalProcessor,
BaseProcessingInfo,
PromptReplacement,
PromptUpdate,
)
from vllm.sequence import IntermediateTensors
from vllm.tokenizers import cached_tokenizer_from_config
from vllm.transformers_utils.configs.deepseek_vl2 import DeepseekVLV2Config
from vllm.transformers_utils.processors.deepseek_ocr import (
BASE_SIZE,
CROP_MODE,
DeepseekOCRProcessor,
)
from ...transformers_utils.processors.deepseek_ocr import count_tiles
from .deepencoder import ImageEncoderViT
from .deepencoder2 import build_qwen2_decoder_as_encoder
from .deepseek_ocr import DeepseekOCRImagePixelInputs
from .deepseek_vl2 import MlpProjector
# The image token id may be various
IMAGE_SIZE = 768 # different from deepseek-ocr
_IMAGE_TOKEN = "<image>"
class DeepseekOCR2ProcessingInfo(BaseProcessingInfo):
def get_hf_config(self):
return self.ctx.get_hf_config(DeepseekVLV2Config)
def get_hf_processor(self, **kwargs: object):
v2_processor_config = dict(
image_size=IMAGE_SIZE,
base_size=BASE_SIZE,
crop_mode=CROP_MODE,
strategy="v2",
)
return self.ctx.get_hf_processor(
DeepseekOCRProcessor, **{**kwargs, **v2_processor_config}
)
def get_supported_mm_limits(self) -> Mapping[str, int | None]:
return {"image": None}
def get_num_image_tokens(
self, *, image_width: int, image_height: int, cropping: bool = True
) -> int:
image_size = IMAGE_SIZE
base_size = BASE_SIZE
patch_size = 16
downsample_ratio = 4
if CROP_MODE:
if image_width <= 768 and image_height <= 768:
crop_ratio = [1, 1]
else:
# find the closest aspect ratio to the target
crop_ratio = count_tiles(
image_width, image_height, image_size=IMAGE_SIZE
)
num_width_tiles, num_height_tiles = crop_ratio
else:
num_width_tiles = num_height_tiles = 1
h = w = math.ceil((base_size // patch_size) / downsample_ratio)
h2 = w2 = math.ceil((image_size // patch_size) / downsample_ratio)
global_views_tokens = h * w
if num_width_tiles > 1 or num_height_tiles > 1:
local_views_tokens = (num_height_tiles * h2) * (num_width_tiles * w2)
else:
local_views_tokens = 0
return global_views_tokens + local_views_tokens + 1
def get_image_size_with_most_features(self) -> ImageSize:
if IMAGE_SIZE == 1024 and BASE_SIZE == 1280:
return ImageSize(width=1024 * 2, height=1024 * 2)
return ImageSize(width=768 * 2, height=768 * 2)
class DeepseekOCR2DummyInputsBuilder(
BaseDummyInputsBuilder[DeepseekOCR2ProcessingInfo]
):
def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str:
num_images = mm_counts.get("image", 0)
processor = self.info.get_hf_processor()
image_token = processor.image_token
return image_token * num_images
def get_dummy_mm_data(
self,
seq_len: int,
mm_counts: Mapping[str, int],
mm_options: Mapping[str, BaseDummyOptions],
) -> MultiModalDataDict:
num_images = mm_counts.get("image", 0)
max_image_size = self.info.get_image_size_with_most_features()
return {
"image": self._get_dummy_images(
width=max_image_size.width,
height=max_image_size.height,
num_images=num_images,
)
}
class DeepseekOCR2MultiModalProcessor(
BaseMultiModalProcessor[DeepseekOCR2ProcessingInfo]
):
def _call_hf_processor(
self,
prompt: str,
mm_data: Mapping[str, object],
mm_kwargs: Mapping[str, object],
tok_kwargs: Mapping[str, object],
) -> BatchFeature:
if mm_data:
processed_outputs = self.info.ctx.call_hf_processor(
self.info.get_hf_processor(**mm_kwargs),
dict(prompt=prompt, **mm_data),
mm_kwargs,
)
else:
tokenizer = self.info.get_tokenizer()
processed_outputs = tokenizer(
prompt, add_special_tokens=True, return_tensors="pt"
)
return processed_outputs
def _get_mm_fields_config(
self,
hf_inputs: BatchFeature,
hf_processor_mm_kwargs: Mapping[str, object],
) -> Mapping[str, MultiModalFieldConfig]:
images_spatial_crop = hf_inputs.get("images_spatial_crop", torch.empty((0, 2)))
is_tiled = (images_spatial_crop[:, 0] > 1) | (images_spatial_crop[:, 1] > 1)
patches_per_image = torch.where(is_tiled, images_spatial_crop.prod(dim=-1), 0)
return dict(
pixel_values=MultiModalFieldConfig.batched("image"),
images_spatial_crop=MultiModalFieldConfig.batched("image"),
images_crop=MultiModalFieldConfig.flat_from_sizes(
"image", patches_per_image
),
)
def _get_prompt_updates(
self,
mm_items: MultiModalDataItems,
hf_processor_mm_kwargs: Mapping[str, object],
out_mm_kwargs: MultiModalKwargsItems,
) -> Sequence[PromptUpdate]:
hf_processor = self.info.get_hf_processor(**hf_processor_mm_kwargs)
image_token_id = hf_processor.image_token_id
assert isinstance(image_token_id, int)
def get_replacement_deepseek_vl2(item_idx: int):
images = mm_items.get_items(
"image", (ImageEmbeddingItems, ImageProcessorItems)
)
if isinstance(images, ImageEmbeddingItems):
num_image_tokens = images.get_feature_size(item_idx)
else:
size = images.get_image_size(item_idx)
num_image_tokens = self.info.get_num_image_tokens(
image_width=size.width,
image_height=size.height,
cropping=CROP_MODE,
)
return [image_token_id] * num_image_tokens
return [
PromptReplacement(
modality="image",
target=[image_token_id],
replacement=get_replacement_deepseek_vl2,
)
]
@MULTIMODAL_REGISTRY.register_processor(
DeepseekOCR2MultiModalProcessor,
info=DeepseekOCR2ProcessingInfo,
dummy_inputs=DeepseekOCR2DummyInputsBuilder,
)
class DeepseekOCR2ForCausalLM(nn.Module, SupportsMultiModal, SupportsPP, SupportsLoRA):
hf_to_vllm_mapper = WeightsMapper(
orig_to_new_prefix={
# map prefix for language backbone
"model.embed_tokens.": "language_model.model.embed_tokens.",
"model.layers.": "language_model.model.layers.",
"model.norm.": "language_model.model.norm.",
"lm_head.": "language_model.lm_head.",
# remove "model." prefix for other components
"model.": "",
}
)
@classmethod
def get_placeholder_str(cls, modality: str, i: int) -> str | None:
if modality.startswith("image"):
return "<image>"
raise ValueError("Only image modality is supported")
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config: DeepseekVLV2Config = vllm_config.model_config.hf_config
multimodal_config = vllm_config.model_config.multimodal_config
self.config = config
self.multimodal_config = multimodal_config
self.vision_config = config.vision_config
self.projector_config = config.projector_config
self.text_config = config.text_config
model_config = vllm_config.model_config
tokenizer = cached_tokenizer_from_config(model_config)
self.image_token_id = tokenizer.vocab[_IMAGE_TOKEN]
with self._mark_tower_model(vllm_config, "image"):
self.sam_model = ImageEncoderViT(
depth=12,
embed_dim=768,
img_size=1024,
mlp_ratio=4,
norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
num_heads=12,
patch_size=16,
qkv_bias=True,
use_rel_pos=True,
global_attn_indexes=[2, 5, 8, 11],
window_size=14,
out_chans=256,
last_conv_output=896,
)
self.qwen2_model = build_qwen2_decoder_as_encoder()
self.projector = MlpProjector(self.projector_config)
self.tile_tag = config.tile_tag
self.global_view_pos = config.global_view_pos
# special token for image token sequence format
n_embed = self.projector_config.n_embed
embed_std = 1 / torch.sqrt(torch.tensor(n_embed, dtype=torch.float32))
if self.tile_tag == "2D":
# This is a typo in original implementation
self.view_seperator = nn.Parameter(torch.randn(n_embed) * embed_std)
else:
raise ValueError(
f"Only 2D tile_tag is supported currently, got: {self.tile_tag}"
)
with self._mark_language_model(vllm_config):
self.language_model = init_vllm_registered_model(
vllm_config=vllm_config,
hf_config=self.text_config,
prefix=maybe_prefix(prefix, "language_model"),
)
self.make_empty_intermediate_tensors = (
self.language_model.make_empty_intermediate_tensors
)
def _parse_and_validate_image_input(
self, **kwargs: object
) -> DeepseekOCRImagePixelInputs | None:
pixel_values = kwargs.pop("pixel_values", None)
images_spatial_crop = kwargs.pop("images_spatial_crop", None)
images_crop = kwargs.pop("images_crop", None)
if pixel_values is None or torch.sum(pixel_values).item() == 0:
return None
base_size = self.vision_config.image_size
return DeepseekOCRImagePixelInputs(
type="pixel_values",
data=pixel_values,
images_crop=images_crop,
images_spatial_crop=images_spatial_crop,
resolve_bindings={
"base_size": base_size,
},
)
def _encode_global_features(self, image_tensor: torch.Tensor) -> torch.Tensor:
global_features_1 = self.sam_model(image_tensor)
global_features_2 = self.qwen2_model(global_features_1)
features = self.projector(global_features_2)
_, hw, dim = features.shape
return features.view(-1, dim)
def _encode_local_features(self, patches: torch.Tensor) -> torch.Tensor | None:
if torch.sum(patches).item() == 0:
return None
local_features = self.sam_model(patches)
local_features = self.qwen2_model(local_features)
features = self.projector(local_features)
_, _, dim = features.shape
return features.view(-1, dim)
def _pixel_values_to_embedding(
self,
pixel_values: torch.Tensor,
images_crop: torch.Tensor,
images_spatial_crop: torch.Tensor,
) -> NestedTensors:
images_in_this_batch = []
is_tiled = (images_spatial_crop[:, 0] > 1) | (images_spatial_crop[:, 1] > 1)
patches_per_image = torch.where(is_tiled, images_spatial_crop.prod(dim=-1), 0)
images_crop = images_crop.split(patches_per_image.tolist())
for jdx in range(images_spatial_crop.size(0)):
patches = images_crop[jdx]
image_ori = pixel_values[[jdx]]
global_features = self._encode_global_features(image_ori)
local_features = self._encode_local_features(patches)
if local_features is not None:
combined = torch.cat(
[local_features, global_features, self.view_seperator[None, :]],
dim=0,
)
else:
combined = torch.cat(
[global_features, self.view_seperator[None, :]], dim=0
)
images_in_this_batch.append(combined)
return images_in_this_batch
def _process_image_input(
self, image_input: DeepseekOCRImagePixelInputs
) -> torch.Tensor:
pixel_values = image_input.data
images_crop = image_input.images_crop
images_spatial_crop = image_input.images_spatial_crop.to(dtype=torch.long)
vision_features = self._pixel_values_to_embedding(
pixel_values=pixel_values,
images_crop=images_crop,
images_spatial_crop=images_spatial_crop,
)
return vision_features
def embed_multimodal(self, **kwargs: object) -> MultiModalEmbeddings | None:
image_input = self._parse_and_validate_image_input(**kwargs)
if image_input is None:
return None
vision_embeddings = self._process_image_input(image_input)
return vision_embeddings
def forward(
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
**kwargs: object,
):
if intermediate_tensors is not None:
inputs_embeds = None
hidden_states = self.language_model(
input_ids, positions, intermediate_tensors, inputs_embeds=inputs_embeds
)
return hidden_states
def compute_logits(
self,
hidden_states: torch.Tensor,
) -> torch.Tensor | None:
return self.language_model.compute_logits(hidden_states)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
loader = AutoWeightsLoader(self)
autoloaded_weights = loader.load_weights(weights, mapper=self.hf_to_vllm_mapper)
return autoloaded_weights
def get_mm_mapping(self) -> MultiModelKeys:
"""
Get the module prefix in multimodal models
"""
return MultiModelKeys.from_string_field(
language_model="language_model",
connector="projector",
tower_model=["sam_model", "qwen2_model"],
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/deepseek_ocr2.py",
"license": "Apache License 2.0",
"lines": 376,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/step3p5.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Inference-only Jurassic model."""
import typing
from collections.abc import Callable, Iterable
from typing import Any
import torch
from torch import nn
from torch.nn.parameter import Parameter
from vllm.compilation.decorators import support_torch_compile
from vllm.config import CacheConfig, ModelConfig, VllmConfig
from vllm.distributed import (
get_dp_group,
get_ep_group,
get_pp_group,
get_tensor_model_parallel_rank,
get_tensor_model_parallel_world_size,
get_tp_group,
)
from vllm.logger import init_logger
from vllm.model_executor.layers.activation import SiluAndMul, SwigluStepAndMul
from vllm.model_executor.layers.attention import Attention
from vllm.model_executor.layers.fused_moe import FusedMoE
from vllm.model_executor.layers.fused_moe.shared_fused_moe import SharedFusedMoE
from vllm.model_executor.layers.layernorm import GemmaRMSNorm
from vllm.model_executor.layers.linear import (
ColumnParallelLinear,
MergedColumnParallelLinear,
QKVParallelLinear,
ReplicatedLinear,
RowParallelLinear,
)
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.quantization.base_config import QuantizationConfig
from vllm.model_executor.layers.rotary_embedding import get_rope
from vllm.model_executor.layers.vocab_parallel_embedding import (
ParallelLMHead,
VocabParallelEmbedding,
)
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
from vllm.sequence import IntermediateTensors
from vllm.v1.attention.backend import AttentionType
from .interfaces import MixtureOfExperts, SupportsPP
from .utils import (
AutoWeightsLoader,
PPMissingLayer,
WeightsMapper,
extract_layer_index,
is_pp_missing_parameter,
make_empty_intermediate_tensors_factory,
make_layers,
maybe_prefix,
)
logger = init_logger(__name__)
class FP32ReplicatedLinear(ReplicatedLinear):
"""
Use FP32 for higher precision.
"""
def forward(
self,
x: torch.Tensor,
) -> torch.Tensor | tuple[torch.Tensor, Parameter | None]:
assert self.params_dtype == torch.float32
return super().forward(x.to(torch.float32))
class Step3p5MLP(nn.Module):
def __init__(
self,
config: ModelConfig,
hidden_size: int,
intermediate_size: int,
hidden_act: str,
quant_config: QuantizationConfig | None = None,
reduce_results: bool = True,
prefix: str = "",
) -> None:
super().__init__()
self.gate_up_proj = MergedColumnParallelLinear(
hidden_size,
[intermediate_size] * 2,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.gate_up_proj",
)
self.down_proj = RowParallelLinear(
intermediate_size,
hidden_size,
bias=False,
quant_config=quant_config,
reduce_results=reduce_results,
prefix=f"{prefix}.down_proj",
)
if hidden_act != "silu":
raise ValueError(
f"Unsupported activation: {hidden_act}. Only silu is supported for now."
)
self.act_fn = SiluAndMul()
self.prefix = prefix
self.hidden_size = hidden_size
self.limit = None
layer_idx = extract_layer_index(prefix)
if (
config.swiglu_limits_shared
and config.swiglu_limits_shared[layer_idx] is not None
and config.swiglu_limits_shared[layer_idx] != 0
):
self.limit = config.swiglu_limits_shared[layer_idx]
self.act_fn = SwigluStepAndMul(limit=self.limit)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
gate_up, _ = self.gate_up_proj(hidden_states)
intermediate_act = self.act_fn(gate_up)
output, _ = self.down_proj(intermediate_act)
return output
class Step3p5Attention(nn.Module):
def __init__(
self,
hidden_size: int,
num_heads: int,
num_kv_heads: int,
max_position: int = 4096 * 32,
head_dim: int | None = None,
rms_norm_eps: float = 1e-06,
qkv_bias: bool = False,
rope_theta: float | list[float] | None = 10000,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
rope_scaling: dict[str, Any] | None = None,
prefix: str = "",
attn_type: str = AttentionType.DECODER,
# Step3p5 specific args
sliding_window: int | None = None,
use_head_wise_attn_gate: bool = False,
layer_types: list = None,
use_rope_layers: list = None,
yarn_only_types: list = None,
swa_num_attention_heads: int | None = None,
partial_rotary_factor: float = 1.0,
):
super().__init__()
self.hidden_size = hidden_size
self.total_num_heads = num_heads
tp_size = get_tensor_model_parallel_world_size()
self.layer_idx = extract_layer_index(prefix)
if layer_types:
enable_sliding_window = layer_types[self.layer_idx] == "sliding_attention"
else:
enable_sliding_window = self.layer_idx % 2 == 0
if yarn_only_types and layer_types[self.layer_idx] not in yarn_only_types:
rope_scaling = None
if sliding_window is not None and enable_sliding_window:
sliding_window = sliding_window
if swa_num_attention_heads is not None:
num_heads = swa_num_attention_heads
self.total_num_heads = swa_num_attention_heads
else:
sliding_window = None
if isinstance(rope_theta, list):
rope_theta = rope_theta[self.layer_idx]
self.rank = get_tensor_model_parallel_rank()
self.partial_rotary_factor = partial_rotary_factor
assert self.total_num_heads % tp_size == 0
self.num_heads = self.total_num_heads // tp_size
self.total_num_kv_heads = num_kv_heads
if self.total_num_kv_heads >= tp_size:
# Number of KV heads is greater than TP size, so we partition
# the KV heads across multiple tensor parallel GPUs.
assert self.total_num_kv_heads % tp_size == 0
else:
# Number of KV heads is less than TP size, so we replicate
# the KV heads across multiple tensor parallel GPUs.
assert tp_size % self.total_num_kv_heads == 0
self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
self.head_dim = head_dim or hidden_size // self.total_num_heads
self.q_size = self.num_heads * self.head_dim
self.kv_size = self.num_kv_heads * self.head_dim
self.scaling = self.head_dim**-0.5
self.rope_theta = rope_theta
self.qkv_proj = QKVParallelLinear(
hidden_size,
self.head_dim,
self.total_num_heads,
self.total_num_kv_heads,
bias=qkv_bias,
quant_config=quant_config,
prefix=f"{prefix}.qkv_proj",
)
self.o_proj = RowParallelLinear(
self.total_num_heads * self.head_dim,
hidden_size,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.o_proj",
)
if rope_scaling is not None and not isinstance(rope_scaling, dict):
raise ValueError("rope_scaling must be a dict for Step3p5Attention.")
rope_parameters: dict[str, Any] = (
dict(rope_scaling) if rope_scaling is not None else {}
)
rope_parameters.setdefault("rope_type", "default")
rope_parameters["rope_theta"] = self.rope_theta
rope_parameters["partial_rotary_factor"] = partial_rotary_factor
self.rotary_emb = get_rope(
head_size=self.head_dim,
max_position=max_position,
rope_parameters=rope_parameters,
)
self.q_norm = GemmaRMSNorm(self.head_dim, rms_norm_eps)
self.k_norm = GemmaRMSNorm(self.head_dim, rms_norm_eps)
self.use_head_wise_attn_gate = use_head_wise_attn_gate
if use_head_wise_attn_gate:
self.g_proj = ColumnParallelLinear(
hidden_size,
self.total_num_heads,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.g_proj",
)
self.use_rope = True
if use_rope_layers:
self.use_rope = use_rope_layers[self.layer_idx]
self.attn = Attention(
self.num_heads,
self.head_dim,
self.scaling,
num_kv_heads=self.num_kv_heads,
cache_config=cache_config,
quant_config=quant_config,
prefix=f"{prefix}.attn",
per_layer_sliding_window=sliding_window,
attn_type=attn_type,
)
self.max_position_embeddings = max_position
assert self.partial_rotary_factor == 1 or self.partial_rotary_factor == 0.5
self.rotary_dim = (
self.head_dim if self.partial_rotary_factor == 1 else self.head_dim // 2
)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
) -> torch.Tensor:
qkv, _ = self.qkv_proj(hidden_states)
q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
# Add qk-norm inline similar to Qwen3 MOE attention
q_by_head = q.view(*q.shape[:-1], q.shape[-1] // self.head_dim, self.head_dim)
q_by_head = self.q_norm(q_by_head.contiguous())
q = q_by_head.view(q.shape)
k_by_head = k.view(*k.shape[:-1], k.shape[-1] // self.head_dim, self.head_dim)
k_by_head = self.k_norm(k_by_head.contiguous())
k = k_by_head.view(k.shape)
if self.use_rope:
q, k = self.rotary_emb(positions, q, k)
attn_output = self.attn(q, k, v)
if self.use_head_wise_attn_gate:
extra_dims, _ = self.g_proj(hidden_states)
output = (
attn_output.view(*attn_output.shape[:-1], self.num_heads, self.head_dim)
* extra_dims.unsqueeze(-1).sigmoid()
)
attn_output = output.view(*attn_output.shape)
output, _ = self.o_proj(attn_output)
return output
class FusedMoEBlock(nn.Module):
def __init__(
self,
vllm_config: VllmConfig,
prefix: str = "",
):
super().__init__()
self.tp_size = get_tensor_model_parallel_world_size()
self.layer_idx = extract_layer_index(prefix)
self.ep_size = get_ep_group().device_group.size()
self.ep_rank = get_ep_group().device_group.rank()
config = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
parallel_config = vllm_config.parallel_config
self.hidden_size = config.hidden_size
self.enable_eplb = parallel_config.enable_eplb
self.n_routed_experts = config.moe_num_experts
self.n_logical_experts = self.n_routed_experts
self.n_redundant_experts = parallel_config.eplb_config.num_redundant_experts
self.n_physical_experts = self.n_logical_experts + self.n_redundant_experts
self.n_local_physical_experts = self.n_physical_experts // self.ep_size
self.physical_expert_start = self.ep_rank * self.n_local_physical_experts
self.physical_expert_end = (
self.physical_expert_start + self.n_local_physical_experts
)
if self.tp_size > config.moe_num_experts:
raise ValueError(
f"Tensor parallel size {self.tp_size} is greater than "
f"the number of experts {config.moe_num_experts}."
)
self.gate = FP32ReplicatedLinear(
config.hidden_size,
config.moe_num_experts,
bias=False,
quant_config=None,
params_dtype=torch.float32, # Use FP32 for higher precision.
prefix=f"{prefix}.gate",
)
self.use_moe_router_bias = config.use_moe_router_bias
assert self.use_moe_router_bias, "Only support use_moe_router_bias is true."
self.routed_scaling_factor = config.moe_router_scaling_factor
self.router_bias = nn.Parameter(
torch.zeros(config.moe_num_experts, dtype=torch.float32),
requires_grad=False,
)
self.need_fp32_gate = config.need_fp32_gate
assert self.need_fp32_gate, (
"Router logits must use FP32 precision for numerical stability."
)
activation = "silu"
swiglu_limits = config.swiglu_limits or []
swiglu_limit = (
swiglu_limits[self.layer_idx]
if self.layer_idx < len(swiglu_limits)
else None
)
if swiglu_limit not in (None, 0):
swiglu_limit = float(swiglu_limit)
assert swiglu_limit == 7.0, (
"Swiglu limit in fused moe block only suport 7.0 now."
)
activation = "swiglustep"
logger.debug(
"step3p5 layer_idx: %s, activation: %s, limit: %s",
self.layer_idx,
activation,
swiglu_limit,
)
self.share_expert = Step3p5MLP(
config=config,
hidden_size=self.hidden_size,
intermediate_size=config.share_expert_dim,
hidden_act="silu",
reduce_results=False,
quant_config=quant_config,
prefix=f"{prefix}.share_expert",
)
self.experts = SharedFusedMoE(
shared_experts=self.share_expert,
gate=self.gate,
num_experts=config.moe_num_experts,
top_k=config.moe_top_k,
hidden_size=config.hidden_size,
intermediate_size=config.moe_intermediate_size,
reduce_results=False,
renormalize=config.norm_expert_weight,
quant_config=quant_config,
activation=activation,
prefix=f"{prefix}.experts",
scoring_func=getattr(config, "moe_router_activation", "sigmoid"),
e_score_correction_bias=self.router_bias,
routed_scaling_factor=config.moe_router_scaling_factor,
enable_eplb=self.enable_eplb,
num_redundant_experts=self.n_redundant_experts,
router_logits_dtype=torch.float32,
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
num_tokens, hidden_dim = hidden_states.shape
hidden_states = hidden_states.view(-1, hidden_dim)
if self.experts.is_internal_router:
# In this case, the gate/router runs inside the FusedMoE class
fused_moe_out = self.experts(
hidden_states=hidden_states, router_logits=hidden_states
)
else:
# router_logits: (num_tokens, n_experts)
router_logits, _ = self.gate(hidden_states)
fused_moe_out = self.experts(
hidden_states=hidden_states, router_logits=router_logits
)
shared_output, final_hidden_states = fused_moe_out
if self.share_expert is None:
assert shared_output is None
if self.share_expert is not None:
assert shared_output is not None
final_hidden_states += shared_output
if self.tp_size > 1:
final_hidden_states = self.experts.maybe_all_reduce_tensor_model_parallel(
final_hidden_states
)
return final_hidden_states.view(num_tokens, hidden_dim)
class Step3p5DecoderLayer(nn.Module):
def __init__(
self,
vllm_config: VllmConfig,
prefix: str = "",
) -> None:
super().__init__()
config = vllm_config.model_config.hf_config
self.hidden_size = config.hidden_size
layer_idx = extract_layer_index(prefix)
self.layer_idx = layer_idx
cache_config = vllm_config.cache_config
quant_config = vllm_config.quant_config
if cache_config is not None:
cache_config.sliding_window = None
if config.att_impl_type == "GQA":
num_attention_heads = None
num_attention_groups = None
head_dim = None
if (
getattr(config, "attention_other_setting", None)
and getattr(config, "layer_types", [])
and config.layer_types[layer_idx]
== config.attention_other_setting["attention_type"]
):
num_attention_heads = config.attention_other_setting[
"num_attention_heads"
]
num_attention_groups = config.attention_other_setting[
"num_attention_groups"
]
head_dim = config.attention_other_setting["head_dim"]
partial_rotary_factors = getattr(config, "partial_rotary_factors", [])
self.self_attn = Step3p5Attention(
hidden_size=self.hidden_size,
num_heads=num_attention_heads
if num_attention_heads
else config.num_attention_heads,
max_position=config.max_position_embeddings,
num_kv_heads=num_attention_groups
if num_attention_groups
else config.num_attention_groups,
rope_theta=config.rope_theta,
rms_norm_eps=config.rms_norm_eps,
qkv_bias=getattr(config, "attention_bias", False),
head_dim=head_dim if head_dim else getattr(config, "head_dim", None),
cache_config=cache_config,
quant_config=quant_config,
rope_scaling=getattr(config, "rope_scaling", None),
sliding_window=getattr(config, "sliding_window", None),
use_head_wise_attn_gate=getattr(
config, "use_head_wise_attn_gate", False
),
layer_types=getattr(config, "layer_types", []),
use_rope_layers=getattr(config, "use_rope_layers", []),
yarn_only_types=getattr(config, "yarn_only_types", []),
partial_rotary_factor=partial_rotary_factors[layer_idx]
if partial_rotary_factors
else 1.0,
prefix=f"{prefix}.self_attn",
)
else:
raise ValueError(
f"Unsupported attention implementation: {config.att_impl_type}"
)
self.use_moe = False
self.tp_group = get_tp_group()
self.use_fused_all_reduce = (
get_tensor_model_parallel_world_size() > 1
and get_dp_group().world_size == 1
)
if self.use_fused_all_reduce:
logger.warning_once("Enable custom fused all reduce...")
else:
logger.warning_once("Disable custom fused all reduce...")
moe_layers_enum = getattr(config, "moe_layers_enum", None)
if moe_layers_enum is not None:
moe_layers_idx = [int(i) for i in moe_layers_enum.strip().split(",")]
else:
moe_layers_idx = [i for i in range(1, config.num_hidden_layers)]
if layer_idx in moe_layers_idx:
self.moe = FusedMoEBlock(
vllm_config,
prefix=f"{prefix}.moe",
)
self.use_moe = True
else:
self.mlp = Step3p5MLP(
config=config,
hidden_size=config.hidden_size,
intermediate_size=config.intermediate_size,
hidden_act="silu",
quant_config=quant_config,
reduce_results=True,
prefix=f"{prefix}.mlp",
)
self.input_layernorm = GemmaRMSNorm(config.hidden_size, config.rms_norm_eps)
self.post_attention_layernorm = GemmaRMSNorm(
config.hidden_size, config.rms_norm_eps
)
self.prefix = prefix
def add_and_maybe_inplace_all_reduce(
self, in1: torch.Tensor, in2: torch.Tensor
) -> torch.Tensor:
if not self.use_fused_all_reduce:
return in1 + in2
return self.tp_group.all_reduce(in1 + in2)
def forward(
self, positions: torch.Tensor, hidden_states: torch.Tensor
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states = self.self_attn(
positions=positions,
hidden_states=hidden_states,
)
hidden_states += residual
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
if self.use_moe:
ffn_output = self.moe(hidden_states)
else:
ffn_output = self.mlp(hidden_states)
hidden_states = ffn_output + residual
return hidden_states
@support_torch_compile
class Step3p5Model(nn.Module):
def __init__(self, vllm_config: VllmConfig, prefix: str = "") -> None:
super().__init__()
self.vllm_config = vllm_config
config = vllm_config.model_config.hf_config
self.vocab_size = config.vocab_size
self.config = config
self.moe_num_experts = config.moe_num_experts
if get_pp_group().is_first_rank or (
config.tie_word_embeddings and get_pp_group().is_last_rank
):
self.embed_tokens = VocabParallelEmbedding(
self.vocab_size,
config.hidden_size,
)
else:
self.embed_tokens = PPMissingLayer()
self.start_layer, self.end_layer, self.layers = make_layers(
config.num_hidden_layers,
lambda prefix: Step3p5DecoderLayer(
vllm_config,
prefix=prefix,
),
prefix=f"{prefix}.layers",
)
if get_pp_group().is_last_rank:
self.norm = GemmaRMSNorm(config.hidden_size, config.rms_norm_eps)
else:
self.norm = PPMissingLayer()
self.make_empty_intermediate_tensors = make_empty_intermediate_tensors_factory(
["hidden_states"], config.hidden_size
)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.embed_tokens(input_ids)
def forward(
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor:
if get_pp_group().is_first_rank:
if inputs_embeds is not None:
hidden_states = inputs_embeds
else:
hidden_states = self.embed_input_ids(input_ids)
else:
assert intermediate_tensors is not None
hidden_states = intermediate_tensors["hidden_states"]
for i in range(self.start_layer, self.end_layer):
layer = self.layers[i]
hidden_states = layer(positions, hidden_states)
if not get_pp_group().is_last_rank:
return IntermediateTensors(
{
"hidden_states": hidden_states,
}
)
return hidden_states
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
config = self.config
assert config.num_attention_groups > 1, "Only support GQA"
qkv_params_mapping = []
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
params_dict = dict(self.named_parameters())
loaded_params: set[str] = set()
# Old packed 3D format: .moe.gate_proj.weight [num_experts, out, in]
expert_params_mapping = [
(".moe.experts.w13_weight", ".moe.gate_proj.weight", "w1"),
(".moe.experts.w13_weight", ".moe.up_proj.weight", "w3"),
(".moe.experts.w2_weight", ".moe.down_proj.weight", "w2"),
]
# New per-expert format: .moe.experts.E.gate_proj.weight_packed [out, in]
per_expert_mapping = FusedMoE.make_expert_params_mapping(
self,
ckpt_gate_proj_name="gate_proj",
ckpt_down_proj_name="down_proj",
ckpt_up_proj_name="up_proj",
num_experts=self.moe_num_experts,
)
disable_moe_stacked_params = [data[1] for data in expert_params_mapping]
for name, loaded_weight in weights:
if name.startswith("model."):
local_name = name[len("model.") :]
full_name = name
else:
local_name = name
full_name = f"model.{name}" if name else "model"
spec_layer = get_spec_layer_idx_from_weight_name(config, full_name)
if spec_layer is not None:
continue # skip spec decode layers for main model
# Skip any layers beyond the main model's depth (e.g., MTP layers)
if full_name.startswith("model.layers."):
parts = full_name.split(".")
if len(parts) > 2 and parts[2].isdigit():
layer_idx = int(parts[2])
if layer_idx >= config.num_hidden_layers:
continue
# Per-expert MoE weights (new format from LLM Compressor):
# .moe.experts.{E}.{gate,up,down}_proj.{weight_packed,scale,...}
# Each weight is individual per-expert, not stacked 3D.
if ".moe.experts." in local_name:
is_expert_weight = False
for mapping in per_expert_mapping:
param_name, weight_name, expert_id, shard_id = mapping
if weight_name not in local_name:
continue
is_expert_weight = True
name_mapped = local_name.replace(weight_name, param_name)
if is_pp_missing_parameter(name_mapped, self):
continue
if name_mapped not in params_dict:
continue
param = params_dict[name_mapped]
weight_loader = typing.cast(
Callable[..., bool], param.weight_loader
)
success = weight_loader(
param,
loaded_weight,
name_mapped,
shard_id=shard_id,
expert_id=expert_id,
return_success=True,
)
if success:
loaded_params.add(name_mapped)
break
else:
if (
not is_expert_weight
and not is_pp_missing_parameter(local_name, self)
and local_name in params_dict
):
# Not an expert proj — use default loader
# (e.g. share_expert weights if they matched)
param = params_dict[local_name]
weight_loader = getattr(
param,
"weight_loader",
default_weight_loader,
)
weight_loader(param, loaded_weight)
loaded_params.add(local_name)
continue
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in local_name:
continue
if any(
disable_moe_stacked_param in local_name
for disable_moe_stacked_param in disable_moe_stacked_params
):
continue
replaced_name = local_name.replace(weight_name, param_name)
if is_pp_missing_parameter(replaced_name, self):
continue
if replaced_name not in params_dict:
continue
param = params_dict[replaced_name]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
loaded_params.add(replaced_name)
break
else:
for param_name, weight_name, shard_id in expert_params_mapping:
if weight_name not in local_name:
continue
replaced_name = local_name.replace(weight_name, param_name)
if is_pp_missing_parameter(replaced_name, self):
continue
if (
replaced_name.endswith(".bias")
or replaced_name.endswith("_bias")
) and replaced_name not in params_dict:
continue
if replaced_name not in params_dict:
continue
param = params_dict[replaced_name]
weight_loader = param.weight_loader
moe_expert_num = self.moe_num_experts
# Per-tensor global scales (e.g. weight_global_scale)
# have shape [1] in compressed-tensors NVFP4 checkpoints.
# Expand to per-expert before the iteration loop.
if (
loaded_weight.shape[0] == 1
and loaded_weight.shape[0] != moe_expert_num
):
loaded_weight = loaded_weight.expand(
moe_expert_num, *loaded_weight.shape[1:]
)
assert loaded_weight.shape[0] == moe_expert_num
for expert_id in range(moe_expert_num):
loaded_weight_expert = loaded_weight[expert_id]
weight_loader(
param,
loaded_weight_expert,
replaced_name,
shard_id=shard_id,
expert_id=expert_id,
)
loaded_params.add(replaced_name)
break
else:
for (
param_name,
weight_name,
start_idx,
end_idx,
) in qkv_params_mapping:
if weight_name not in local_name:
continue
replaced_name = local_name.replace(weight_name, param_name)
if is_pp_missing_parameter(replaced_name, self):
continue
if replaced_name not in params_dict:
continue
param = params_dict[replaced_name]
dim = param.shape[param.output_dim]
begin_idx = int(start_idx * dim)
end_idx = int(end_idx * dim)
param_slice = param.narrow(
param.output_dim, begin_idx, end_idx - begin_idx
)
param_slice.copy_(loaded_weight)
loaded_params.add(replaced_name)
break
else:
if is_pp_missing_parameter(local_name, self):
continue
if "expert_bias" in local_name:
logger.warning_once("ignore expert_bias")
continue
if local_name not in params_dict:
continue
param = params_dict[local_name]
weight_loader = getattr(
param, "weight_loader", default_weight_loader
)
weight_loader(param, loaded_weight)
loaded_params.add(local_name)
return loaded_params
class Step3p5ForCausalLM(nn.Module, SupportsPP, MixtureOfExperts):
hf_to_vllm_mapper = WeightsMapper(
orig_to_new_substr={".share_expert.": ".moe.share_expert."}
)
def __init__(
self,
*,
vllm_config: VllmConfig,
prefix: str = "",
):
super().__init__()
config = vllm_config.model_config.hf_config
self.model = Step3p5Model(
vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")
)
if get_pp_group().is_last_rank:
self.lm_head = ParallelLMHead(
config.vocab_size,
config.hidden_size,
quant_config=vllm_config.quant_config,
prefix=maybe_prefix(prefix, "lm_head"),
)
self.logits_processor = LogitsProcessor(config.vocab_size)
else:
self.lm_head = PPMissingLayer()
self.make_empty_intermediate_tensors = (
self.model.make_empty_intermediate_tensors
)
# Set MoE hyperparameters
self.moe_layers: list[FusedMoEBlock] = []
for layer in self.model.layers:
if isinstance(layer, PPMissingLayer):
continue
assert isinstance(layer, Step3p5DecoderLayer)
if hasattr(layer, "moe") and isinstance(layer.moe, FusedMoEBlock):
self.moe_layers.append(layer.moe)
self.expert_weights = []
assert len(self.moe_layers) > 0, "No MoE layers found in the model."
example_layer = self.moe_layers[0]
self.num_moe_layers = len(self.moe_layers)
self.num_expert_groups = 1
self.num_shared_experts = 0
self.num_logical_experts = example_layer.n_logical_experts
self.num_physical_experts = example_layer.n_physical_experts
self.num_local_physical_experts = example_layer.n_local_physical_experts
self.num_routed_experts = example_layer.n_routed_experts
self.num_redundant_experts = example_layer.n_redundant_experts
def forward(
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
):
hidden_states = self.model(
input_ids, positions, intermediate_tensors, inputs_embeds
)
return hidden_states
def compute_logits(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.model.norm(hidden_states)
logits = self.logits_processor(self.lm_head, hidden_states)
return logits
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.model.embed_tokens(input_ids)
def set_eplb_state(
self,
expert_load_view: torch.Tensor,
logical_to_physical_map: torch.Tensor,
logical_replica_count: torch.Tensor,
) -> None:
for layer_idx, layer in enumerate(self.moe_layers):
experts = layer.experts
assert isinstance(experts, FusedMoE)
# Register the expert weights.
self.expert_weights.append(experts.get_expert_weights())
experts.set_eplb_state(
moe_layer_idx=layer_idx,
expert_load_view=expert_load_view,
logical_to_physical_map=logical_to_physical_map,
logical_replica_count=logical_replica_count,
)
def update_physical_experts_metadata(
self,
num_physical_experts: int,
num_local_physical_experts: int,
) -> None:
assert self.num_local_physical_experts == num_local_physical_experts
self.num_physical_experts = num_physical_experts
self.num_local_physical_experts = num_local_physical_experts
self.num_redundant_experts = num_physical_experts - self.num_logical_experts
for layer in self.moe_layers:
assert isinstance(layer, FusedMoEBlock)
layer.n_local_physical_experts = num_local_physical_experts
layer.n_physical_experts = num_physical_experts
layer.n_redundant_experts = self.num_redundant_experts
layer.experts.update_expert_map()
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
loader = AutoWeightsLoader(self)
return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper)
def get_spec_layer_idx_from_weight_name(
config: ModelConfig, weight_name: str
) -> int | None:
if hasattr(config, "num_nextn_predict_layers") and (
config.num_nextn_predict_layers > 0
):
layer_idx = config.num_hidden_layers
for i in range(config.num_nextn_predict_layers):
if weight_name.startswith(
f"layers.{layer_idx + i}." # Step3p5Model
) or weight_name.startswith(f"model.layers.{layer_idx + i}."): # Step3p5MTP
return layer_idx + i
return None
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/step3p5.py",
"license": "Apache License 2.0",
"lines": 868,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/step3p5_mtp.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Iterable
import torch
import torch.nn as nn
from transformers import PretrainedConfig
from vllm.config import VllmConfig
from vllm.logger import init_logger
from vllm.model_executor.layers.layernorm import GemmaRMSNorm
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.vocab_parallel_embedding import (
ParallelLMHead,
VocabParallelEmbedding,
)
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
from vllm.sequence import IntermediateTensors
from .step3p5 import Step3p5DecoderLayer, get_spec_layer_idx_from_weight_name
from .utils import maybe_prefix
logger = init_logger(__name__)
class SharedHead(nn.Module):
def __init__(
self,
config: PretrainedConfig,
quant_config: QuantizationConfig | None = None,
) -> None:
super().__init__()
self.norm = GemmaRMSNorm(config.hidden_size, config.rms_norm_eps)
self.head = ParallelLMHead(
config.vocab_size, config.hidden_size, quant_config=quant_config
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return self.norm(hidden_states)
class Step3p5AMultiTokenPredictorLayer(nn.Module):
def __init__(
self,
vllm_config: VllmConfig,
prefix: str,
) -> None:
super().__init__()
config = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
self.enorm = GemmaRMSNorm(config.hidden_size, config.rms_norm_eps)
self.hnorm = GemmaRMSNorm(config.hidden_size, config.rms_norm_eps)
self.eh_proj = nn.Linear(config.hidden_size * 2, config.hidden_size, bias=False)
self.shared_head = SharedHead(config=config, quant_config=quant_config)
self.mtp_block = Step3p5DecoderLayer(
vllm_config,
prefix=f"{prefix}.mtp_block",
)
def forward(
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
previous_hidden_states: torch.Tensor,
inputs_embeds: torch.Tensor | None = None,
spec_step_index: int = 0,
) -> torch.Tensor:
assert inputs_embeds is not None
inputs_embeds = self.enorm(inputs_embeds)
previous_hidden_states = self.hnorm(previous_hidden_states)
hidden_states = self.eh_proj(
torch.cat([inputs_embeds, previous_hidden_states], dim=-1)
)
hidden_states = self.mtp_block(positions=positions, hidden_states=hidden_states)
return hidden_states
class Step3p5AMultiTokenPredictor(nn.Module):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
self.embed_tokens = VocabParallelEmbedding(
config.vocab_size,
config.hidden_size,
)
self.mtp_start_layer_idx = config.num_hidden_layers
self.num_mtp_layers = config.num_nextn_predict_layers
# to map the exact layer index from weights
self.layers = torch.nn.ModuleDict(
{
str(idx): Step3p5AMultiTokenPredictorLayer(
vllm_config,
f"{prefix}.layers.{idx}",
)
for idx in range(
self.mtp_start_layer_idx,
self.mtp_start_layer_idx + self.num_mtp_layers,
)
}
)
self.logits_processor = LogitsProcessor(config.vocab_size)
def forward(
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
previous_hidden_states: torch.Tensor,
inputs_embeds: torch.Tensor | None = None,
spec_step_idx: int = 0,
) -> torch.Tensor:
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
current_step_idx = spec_step_idx % self.num_mtp_layers
return self.layers[str(self.mtp_start_layer_idx + current_step_idx)](
input_ids,
positions,
previous_hidden_states,
inputs_embeds,
current_step_idx,
)
def compute_logits(
self,
hidden_states: torch.Tensor,
spec_step_idx: int = 0,
) -> torch.Tensor:
current_step_idx = spec_step_idx % self.num_mtp_layers
mtp_layer = self.layers[str(self.mtp_start_layer_idx + current_step_idx)]
logits = self.logits_processor(
mtp_layer.shared_head.head, mtp_layer.shared_head(hidden_states)
)
return logits
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.embed_tokens(input_ids)
class Step3p5MTP(nn.Module):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
self.config = vllm_config.model_config.hf_config
self.vllm_config = vllm_config
self.model = Step3p5AMultiTokenPredictor(
vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")
)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.model.embed_input_ids(input_ids)
def forward(
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
hidden_states: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
spec_step_idx: int = 0,
) -> torch.Tensor:
hidden_states = self.model(
input_ids, positions, hidden_states, inputs_embeds, spec_step_idx
)
return hidden_states
def compute_logits(
self,
hidden_states: torch.Tensor,
spec_step_idx: int = 0,
) -> torch.Tensor | None:
return self.model.compute_logits(hidden_states, spec_step_idx)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
expert_params_mapping = [
(".moe.experts.w13_weight", ".moe.gate_proj.weight", "w1"),
(".moe.experts.w13_weight", ".moe.up_proj.weight", "w3"),
(".moe.experts.w2_weight", ".moe.down_proj.weight", "w2"),
]
params_dict = dict(self.named_parameters())
loaded_params: set[str] = set()
for name, loaded_weight in weights:
if "rotary_emb.inv_freq" in name:
continue
spec_layer = get_spec_layer_idx_from_weight_name(self.config, name)
if "embed_tokens" not in name and spec_layer is None:
continue
name = self._rewrite_spec_layer_name(spec_layer, name)
for param_name, weight_name, shard_id in stacked_params_mapping:
# Skip non-stacked layers and experts (experts handled below).
if weight_name not in name:
continue
# We have mlp.experts[0].gate_proj in the checkpoint.
# Since we handle the experts below in expert_params_mapping,
# we need to skip here BEFORE we update the name, otherwise
# name will be updated to mlp.experts[0].gate_up_proj, which
# will then be updated below in expert_params_mapping
# for mlp.experts[0].gate_gate_up_proj, which breaks load.
if ("mlp.experts." in name) and name not in params_dict:
continue
if "experts" in name or "moe" in name:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
break
else:
for mapping in expert_params_mapping:
param_name, weight_name, shard_id = mapping
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if (
name.endswith(".bias") or name.endswith("_bias")
) and name not in params_dict:
continue
param = params_dict[name]
weight_loader = param.weight_loader
for expert_id in range(loaded_weight.shape[0]):
loaded_weight_expert = loaded_weight[expert_id]
weight_loader(
param,
loaded_weight_expert,
name,
shard_id=shard_id,
expert_id=expert_id,
)
loaded_params.add(name)
break
else:
# Skip loading extra bias for GPTQ models.
if (
name.endswith(".bias")
and name not in params_dict
or "tok_embeddings" in name
):
continue
if spec_layer is not None and ".transformer." in name:
name = name.replace(".transformer.", ".")
if "shared_head" in name:
name = name.replace("shared_head.output", "shared_head.head")
if "embed_tokens" in name:
assert (
hasattr(self.config, "num_nextn_predict_layers")
and self.config.num_nextn_predict_layers > 0
)
name = "model.embed_tokens.weight"
param = params_dict[name]
weight_loader = getattr(
param, "weight_loader", default_weight_loader
)
weight_loader(param, loaded_weight)
loaded_params.add(name)
params_need_to_load = set(params_dict.keys())
# Some KV cache scales are optional: checkpoints may omit them and vLLM
# will fall back to default scales during initialization.
optional_params = {
name
for name, param in params_dict.items()
if name.endswith((".k_scale", ".v_scale", ".q_scale", ".prob_scale"))
and getattr(param, "numel", lambda: 0)() == 1
and getattr(param, "requires_grad", False) is False
}
params_need_to_load -= optional_params
if params_need_to_load != loaded_params:
missing_params = list(params_need_to_load - loaded_params)
param_name_example = missing_params[0]
raise RuntimeError(
"Some parameters like "
f"{param_name_example} are not in the checkpoint and will falsely "
"use random initialization"
)
return loaded_params
def _rewrite_spec_layer_name(self, spec_layer: int, name: str) -> str:
"""
Rewrite the weight name to match the format of the original model.
Add .mtp_block for modules in transformer layer block for spec layer
"""
spec_layer_weight_names = [
"embed_tokens",
"enorm",
"hnorm",
"eh_proj",
"shared_head",
]
spec_layer_weight = False
for weight_name in spec_layer_weight_names:
if weight_name in name:
spec_layer_weight = True
break
if not spec_layer_weight:
# treat rest weights as weights for transformer layer block
name = name.replace(
f"model.layers.{spec_layer}.", f"model.layers.{spec_layer}.mtp_block."
)
return name
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/step3p5_mtp.py",
"license": "Apache License 2.0",
"lines": 286,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/reasoning/step3p5_reasoning_parser.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Iterable, Sequence
from vllm.entrypoints.openai.chat_completion.protocol import (
ChatCompletionRequest,
)
from vllm.entrypoints.openai.engine.protocol import DeltaMessage
from vllm.entrypoints.openai.responses.protocol import (
ResponsesRequest,
)
from vllm.reasoning.basic_parsers import BaseThinkingReasoningParser
from vllm.tokenizers import TokenizerLike
class Step3p5ReasoningParser(BaseThinkingReasoningParser):
"""
Reasoning parser for Step3p5 model.
Step3p5 uses the <think>...</think> format, but it tends to emit an extra
newline immediately before and/or after the </think> token. This parser trims:
- the newline right before </think>
- the newline right after </think>
"""
@property
def start_token(self) -> str:
return "<think>"
@property
def end_token(self) -> str:
return "</think>"
def __init__(self, tokenizer: TokenizerLike, *args, **kwargs):
super().__init__(tokenizer, *args, **kwargs)
# Used to hold a trailing "\n" from reasoning content so we can decide
# whether it is immediately before </think>.
self._pending_reasoning_newline = False
# Tracks whether we've seen </think> but are still waiting for one more
# token to confirm the end.
self._end_token_pending = False
def is_reasoning_end(self, input_ids: Sequence[int]) -> bool:
return self._is_reasoning_end_from_ids(input_ids)
def is_reasoning_end_streaming(
self, input_ids: Sequence[int], delta_ids: Iterable[int]
) -> bool:
# Only examine newly generated tokens; they may contain multiple ids.
return self._is_reasoning_end_from_ids(delta_ids)
def _is_reasoning_end_from_ids(self, input_ids: Sequence[int]) -> bool:
# Scan backwards to find the last special token, <think> or </think>.
last_special = None
last_idx = -1
for i in range(len(input_ids) - 1, -1, -1):
token_id = input_ids[i]
if token_id == self.start_token_id:
last_special = "start"
last_idx = i
break
if token_id == self.end_token_id:
last_special = "end"
last_idx = i
break
if last_special == "start":
# If we're already waiting for one token after </think>, do not
# clear the pending state just because the prompt contains <think>.
# Streaming deltas should not include <think> for this model.
if self._end_token_pending:
return False
# A start token after any end token means reasoning is ongoing.
self._end_token_pending = False
return False
if last_special == "end":
# Require at least one token after </think> before ending.
if last_idx < len(input_ids) - 1:
self._end_token_pending = False
return True
self._end_token_pending = True
return False
# No special tokens in this input. If we were waiting for one token
# after </think>, any new token completes the end.
if self._end_token_pending and input_ids:
self._end_token_pending = False
return True
return False
def extract_reasoning(
self,
model_output: str,
request: ChatCompletionRequest | ResponsesRequest,
) -> tuple[str | None, str | None]:
reasoning, content = super().extract_reasoning(model_output, request)
if reasoning is not None:
reasoning = reasoning.removesuffix("\n")
if content is not None:
content = content.removeprefix("\n")
return reasoning or None, content or None
def extract_reasoning_streaming(
self,
previous_text: str,
current_text: str,
delta_text: str,
previous_token_ids: Sequence[int],
current_token_ids: Sequence[int],
delta_token_ids: Sequence[int],
) -> DeltaMessage | None:
# Drop the immediate newline that models often emit after </think>.
if previous_text.endswith(self.end_token) and delta_text:
if delta_text == "\n":
return None
elif delta_text.startswith("\n"):
remaining = delta_text.removeprefix("\n")
return DeltaMessage(content=remaining) if remaining else None
ret = super().extract_reasoning_streaming(
previous_text,
current_text,
delta_text,
previous_token_ids,
current_token_ids,
delta_token_ids,
)
if ret is None:
return None
# Compatibility path for models that don't generate the start token:
# treat everything before </think> as reasoning and everything after
# as content.
if (
self.start_token_id not in previous_token_ids
and self.start_token_id not in delta_token_ids
):
if self.end_token_id in delta_token_ids:
end_index = delta_text.find(self.end_token)
reasoning = delta_text[:end_index]
content = delta_text[end_index + len(self.end_token) :]
ret = DeltaMessage(reasoning=reasoning, content=content or None)
elif self.end_token_id in previous_token_ids:
ret = DeltaMessage(content=delta_text)
else:
ret = DeltaMessage(reasoning=delta_text)
reasoning_to_output = ret.reasoning
content_to_output = ret.content
# Reasoning: handle the newline immediately before </think>.
if reasoning_to_output is not None:
if self._pending_reasoning_newline:
reasoning_to_output = "\n" + reasoning_to_output
self._pending_reasoning_newline = False
if reasoning_to_output.endswith("\n"):
reasoning_to_output = reasoning_to_output.removesuffix("\n")
if self.end_token in delta_text:
# Trailing "\n" is right before </think>, drop it.
self._pending_reasoning_newline = False
else:
# Hold the trailing "\n" until we know whether </think> follows.
self._pending_reasoning_newline = True
# Content: handle the newline immediately after </think>.
if content_to_output is not None:
# If we have content, reasoning must have ended.
self._pending_reasoning_newline = False
if self.end_token in delta_text and content_to_output.startswith("\n"):
content_to_output = content_to_output.removeprefix("\n")
reasoning_to_output = reasoning_to_output or None
content_to_output = content_to_output or None
if reasoning_to_output is None and content_to_output is None:
return None
return DeltaMessage(reasoning=reasoning_to_output, content=content_to_output)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/reasoning/step3p5_reasoning_parser.py",
"license": "Apache License 2.0",
"lines": 156,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/transformers_utils/configs/step3p5.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from typing import Any
from transformers.configuration_utils import PretrainedConfig
class Step3p5Config(PretrainedConfig):
model_type = "step3p5"
def __init__(
self,
hidden_size: int = 5120,
intermediate_size: int = 13312,
num_attention_heads: int = 40,
num_attention_groups: int = 8,
num_hidden_layers: int = 48,
max_seq_len: int = 4096,
vocab_size: int = 65536,
rms_norm_eps: float = 1e-5,
moe_every_n_layer: int = 2,
use_moe: bool = False,
moe_intermediate_size: int = 10240,
moe_num_experts: int = 16,
moe_top_k: int = 4,
moe_layer_offset: int = 0,
rope_theta: float | list[float] | None = 500000,
rope_scaling: dict[str, Any] | None = None,
head_dim: int | None = None,
share_expert_dim: int | None = None,
norm_expert_weight: bool = True,
bos_token_id: list[int] | int | None = None,
eos_token_id: list[int] | int | None = None,
moe_router_activation: str = "softmax",
moe_router_scaling_factor: float = 1.0,
att_impl_type: str = "GQA",
use_head_wise_attn_gate: bool = False,
use_moe_router_bias: bool = True,
need_fp32_gate: bool = True,
layer_types: list[str] | None = None,
use_rope_layers: list[bool] | None = None,
yarn_only_types: list[str] | None = None,
attention_other_setting: dict[str, Any] | None = None,
num_nextn_predict_layers: int = 0,
swiglu_limits: list[float] | None = None,
swiglu_limits_shared: list[float] | None = None,
max_position_embeddings: int | None = None,
**kwargs,
):
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_attention_heads = num_attention_heads
self.num_attention_groups = num_attention_groups
self.num_hidden_layers = num_hidden_layers
self.max_seq_len = max_seq_len
self.vocab_size = vocab_size
self.rms_norm_eps = rms_norm_eps
self.use_moe = use_moe
self.moe_intermediate_size = moe_intermediate_size
self.moe_every_n_layer = moe_every_n_layer
self.moe_num_experts = moe_num_experts
self.num_experts_per_tok = moe_top_k
self.moe_top_k = moe_top_k
self.moe_layer_offset = moe_layer_offset
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self.head_dim = head_dim
if share_expert_dim is None:
self.share_expert_dim = self.moe_intermediate_size * self.moe_top_k
else:
self.share_expert_dim = share_expert_dim
self.norm_expert_weight = norm_expert_weight
self.max_position_embeddings = max_position_embeddings
self.moe_router_activation = moe_router_activation
self.moe_router_scaling_factor = moe_router_scaling_factor
self.use_moe_router_bias = use_moe_router_bias
self.need_fp32_gate = need_fp32_gate
self.att_impl_type = att_impl_type
self.use_head_wise_attn_gate = use_head_wise_attn_gate
self.layer_types = layer_types
self.use_rope_layers = use_rope_layers
self.yarn_only_types = yarn_only_types
self.attention_other_setting = attention_other_setting
self.num_nextn_predict_layers = num_nextn_predict_layers
self.swiglu_limits = swiglu_limits
self.swiglu_limits_shared = swiglu_limits_shared
resolved_bos_token_id = 1 if bos_token_id is None else bos_token_id
resolved_eos_token_id = [2, 3] if eos_token_id is None else eos_token_id
self.bos_token_id = resolved_bos_token_id
self.eos_token_id = resolved_eos_token_id
super().__init__(
bos_token_id=resolved_bos_token_id,
eos_token_id=resolved_eos_token_id,
**kwargs,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/transformers_utils/configs/step3p5.py",
"license": "Apache License 2.0",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/v1/worker/gpu/spec_decode/utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import numpy as np
import torch
from vllm.v1.outputs import DraftTokenIds
from vllm.v1.worker.gpu.async_utils import async_copy_to_np
from vllm.v1.worker.gpu.input_batch import InputBatch
class DraftTokensHandler:
def __init__(self, device: torch.device | None = None):
self.device = device
self.copy_stream = torch.cuda.Stream(device)
self.copy_event = torch.cuda.Event()
self.req_ids: list[str] = []
self.draft_tokens_np: np.ndarray | None = None
self.num_draft_tokens: int = 0
def set_draft_tokens(
self, input_batch: InputBatch, draft_tokens: torch.Tensor
) -> None:
self.req_ids = input_batch.req_ids
self.num_draft_tokens = draft_tokens.shape[1]
if not input_batch.has_structured_output_reqs:
# No draft token validation needs to be performed by
# the scheduler for this batch.
self.draft_tokens_np = None
return
# For spec decoding + structured outputs, we must transfer the
# draft tokens back to the scheduler for grammar validation.
current_stream = torch.cuda.current_stream(self.device)
self.copy_stream.wait_stream(current_stream)
with torch.cuda.stream(self.copy_stream):
self.draft_tokens_np = async_copy_to_np(draft_tokens)
self.copy_event.record()
def get_draft_tokens(self) -> DraftTokenIds | None:
if self.draft_tokens_np is not None:
self.copy_event.synchronize()
draft_token_ids = self.draft_tokens_np.tolist()
else:
# This case only happens when async scheduling is disabled.
draft_token_ids = [[-1] * self.num_draft_tokens for _ in self.req_ids]
return DraftTokenIds(self.req_ids, draft_token_ids)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/worker/gpu/spec_decode/utils.py",
"license": "Apache License 2.0",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/model_executor/test_routed_experts_capture.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import types
import pytest
import torch
from vllm.distributed.eplb.eplb_state import EplbLayerState
from vllm.model_executor.layers.fused_moe.config import RoutingMethodType
from vllm.model_executor.layers.fused_moe.router.base_router import BaseRouter
pytestmark = pytest.mark.cpu_test
class DummyRouter(BaseRouter):
@property
def routing_method_type(self) -> RoutingMethodType:
return RoutingMethodType.FUSED_TOPK
def _compute_routing(self, hidden_states, router_logits, indices_type):
topk_ids = torch.tensor([[1, 2], [3, 4]], dtype=torch.int64)
topk_weights = torch.ones_like(topk_ids, dtype=torch.float32)
return topk_weights, topk_ids
def _apply_eplb_mapping(self, topk_ids: torch.Tensor) -> torch.Tensor:
# Make mapping observable without requiring CUDA EPLB path.
return topk_ids + 10
def _make_router() -> DummyRouter:
return DummyRouter(
top_k=2,
global_num_experts=16,
eplb_state=EplbLayerState(),
enable_eplb=False,
indices_type_getter=None,
)
def test_base_router_capture_pre_eplb_mapping():
router = _make_router()
captured = []
def capture_fn(ids):
captured.append(ids.clone())
router.set_capture_fn(capture_fn)
topk_weights, topk_ids = router.select_experts(
hidden_states=torch.empty(1),
router_logits=torch.empty(1),
)
assert topk_weights.shape == topk_ids.shape
assert len(captured) == 1
assert torch.equal(captured[0], torch.tensor([[1, 2], [3, 4]]))
assert torch.equal(topk_ids, torch.tensor([[11, 12], [13, 14]]))
def test_base_router_capture_with_eplb_enabled():
router = _make_router()
router.enable_eplb = True
router.eplb_state.expert_load_view = torch.zeros(32, dtype=torch.int64)
router.eplb_state.logical_to_physical_map = torch.arange(32).view(32, 1)
router.eplb_state.logical_replica_count = torch.ones(32, dtype=torch.int64)
captured = []
def capture_fn(ids):
captured.append(ids.clone())
router.set_capture_fn(capture_fn)
_, topk_ids = router.select_experts(
hidden_states=torch.empty(1),
router_logits=torch.empty(1),
)
assert len(captured) == 1
# Capture should see logical ids pre-EPLB mapping.
assert torch.equal(captured[0], torch.tensor([[1, 2], [3, 4]]))
# Our DummyRouter mapping adds +10.
assert torch.equal(topk_ids, torch.tensor([[11, 12], [13, 14]]))
def test_gpu_model_runner_binds_router_capture(monkeypatch):
from vllm.v1.worker import gpu_model_runner as gmr
class DummyFusedMoE:
def __init__(self):
self.layer_id = 7
self.router = _make_router()
class DummyCapturer:
def __init__(self):
self.calls = []
def capture(self, layer_id, topk_ids):
self.calls.append((layer_id, topk_ids))
dummy_module = DummyFusedMoE()
# Patch the runtime import inside _bind_routed_experts_capturer.
import vllm.model_executor.layers.fused_moe.layer as fused_moe_layer
monkeypatch.setattr(fused_moe_layer, "FusedMoE", DummyFusedMoE)
dummy_self = types.SimpleNamespace(
compilation_config=types.SimpleNamespace(
static_forward_context={"dummy": dummy_module}
)
)
capturer = DummyCapturer()
gmr.GPUModelRunner._bind_routed_experts_capturer(dummy_self, capturer)
assert dummy_module.router.capture_fn is not None
dummy_module.router.capture_fn(torch.tensor([[5, 6]]))
assert len(capturer.calls) == 1
layer_id, topk_ids = capturer.calls[0]
assert layer_id == 7
assert torch.equal(topk_ids, torch.tensor([[5, 6]]))
def test_gpu_model_runner_binding_stage(monkeypatch):
from vllm.v1.worker import gpu_model_runner as gmr
class DummyFusedMoE:
def __init__(self):
self.layer_id = 11
self.router = _make_router()
class DummyCapturer:
def __init__(self):
self.calls = []
def capture(self, layer_id, topk_ids):
self.calls.append((layer_id, topk_ids))
dummy_module = DummyFusedMoE()
import vllm.model_executor.layers.fused_moe.layer as fused_moe_layer
monkeypatch.setattr(fused_moe_layer, "FusedMoE", DummyFusedMoE)
dummy_self = types.SimpleNamespace(
compilation_config=types.SimpleNamespace(
static_forward_context={"dummy": dummy_module}
)
)
# Before binding, no capture hook.
assert dummy_module.router.capture_fn is None
capturer = DummyCapturer()
gmr.GPUModelRunner._bind_routed_experts_capturer(dummy_self, capturer)
# After binding, hook should exist and be callable.
assert callable(dummy_module.router.capture_fn)
dummy_module.router.capture_fn(torch.tensor([[9, 10]]))
assert len(capturer.calls) == 1
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/model_executor/test_routed_experts_capture.py",
"license": "Apache License 2.0",
"lines": 116,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/compile/test_cold_start.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from torch._dynamo.utils import counters
from vllm import LLM
from vllm.config import CompilationConfig, CompilationMode, CUDAGraphMode
def test_moe_compilation_cold_start(monkeypatch, use_fresh_inductor_cache):
# Run in same process so we can access PyTorch's internal counters
monkeypatch.setenv("VLLM_ENABLE_V1_MULTIPROCESSING", "0")
# I'm not sure if this is going to affect the numbers
monkeypatch.setenv("VLLM_USE_AOT_COMPILE", "0")
# Force cold compilation
monkeypatch.setenv("VLLM_DISABLE_COMPILE_CACHE", "1")
compilation_config = CompilationConfig(
mode=CompilationMode.VLLM_COMPILE,
cudagraph_mode=CUDAGraphMode.NONE, # make the model loading faster
)
counters.clear()
_ = LLM(
model="microsoft/Phi-tiny-MoE-instruct",
max_model_len=256,
load_format="dummy", # make the model loading faster
compilation_config=compilation_config,
num_gpu_blocks_override=8, # make the model loading faster
)
# vLLM-compile cold start is special. By default, we do
# one full dynamo capture of the entire forward pass.
# The forward pass consists of 32 transformer layers.
# Then, we split on the attention operation. This results in
# 33 subgraphs (not including the attention operation).
# We then generate compiled artifacts for the unique subgraphs.
#
# There are actually only 3 unique subgraphs for this model
# (all of its transformer layers are the same modulo weights);
# this is true for most vLLM models.
# So we test that during cold start, we are only compling
# for 3 unique subgraphs.
assert counters["aot_autograd"]["autograd_cache_miss"] == 3
assert counters["aot_autograd"]["autograd_cache_hit"] == 0
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/compile/test_cold_start.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/compile/test_structured_logging.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from unittest.mock import patch
import pytest
import regex as re
import torch
from torch import nn
import tests.compile.silly_attention # noqa
from vllm.compilation.decorators import support_torch_compile
from vllm.config import VllmConfig, set_current_vllm_config
from vllm.config.compilation import (
CompilationConfig,
CompilationMode,
CUDAGraphMode,
)
from vllm.config.scheduler import SchedulerConfig
from vllm.forward_context import set_forward_context
MLP_SIZE = 64
@support_torch_compile
class SimpleModel(nn.Module):
"""A simple model with a splitting op for piecewise compilation."""
def __init__(self, *, vllm_config: VllmConfig, prefix: str = "", **kwargs):
super().__init__()
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x + x
attn_output = torch.empty_like(x)
torch.ops.silly.attention(x, x, x, attn_output)
x = attn_output * 2
return x
class TraceStructuredCapture:
"""Captures trace_structured calls for testing."""
def __init__(self):
self.calls: list[dict] = []
def __call__(self, event_type: str, metadata_fn=None, payload_fn=None, **kwargs):
"""Capture a trace_structured call."""
metadata = metadata_fn() if metadata_fn else {}
self.calls.append(
{
"event_type": event_type,
"metadata": metadata,
}
)
def get(self, event_type: str, name_pattern: str) -> list[dict]:
"""Get all calls with the given event type and name matching pattern.
Args:
event_type: The event type to filter by (e.g., "artifact", "graph_dump")
name_pattern: Regex pattern to match against the artifact name
"""
regex = re.compile(name_pattern)
return [
c
for c in self.calls
if c["event_type"] == event_type
and regex.fullmatch(c.get("metadata", {}).get("name", ""))
]
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA required")
def test_vllm_structured_logging_artifacts(use_fresh_inductor_cache):
"""Test that all expected vLLM artifacts are logged during compilation."""
torch.set_default_device("cuda")
capture = TraceStructuredCapture()
vllm_config = VllmConfig(
compilation_config=CompilationConfig(
mode=CompilationMode.VLLM_COMPILE,
cudagraph_mode=CUDAGraphMode.PIECEWISE,
compile_sizes=[8],
splitting_ops=["silly::attention"],
),
scheduler_config=SchedulerConfig(
max_num_seqs=8,
max_model_len=8192,
is_encoder_decoder=False,
),
)
# Patch trace_structured to capture calls
with (
patch("vllm.compilation.backends.trace_structured", capture),
patch("vllm.compilation.piecewise_backend.trace_structured", capture),
set_current_vllm_config(vllm_config),
):
model = SimpleModel(vllm_config=vllm_config, prefix="test")
with set_forward_context({}, vllm_config=vllm_config):
model(torch.randn(8, MLP_SIZE))
config_artifacts = capture.get("artifact", "vllm_compilation_config")
assert len(config_artifacts) == 1, (
f"Expected 1 vllm_compilation_config, got {len(config_artifacts)}"
)
vllm_piecewise_split_graph = capture.get("graph_dump", "vllm_piecewise_split_graph")
assert len(vllm_piecewise_split_graph) == 1, (
"Expected 1 toplevel piecewise split graph, "
f"got {len(vllm_piecewise_split_graph)}"
)
compile_start_artifacts = capture.get("artifact", "vllm_piecewise_compile_start")
assert len(compile_start_artifacts) == 2, (
"Expected 2 vllm_piecewise_compile_start "
"(one for dynamic ranges, one for compile size), "
f"got {len(compile_start_artifacts)}"
)
submod_dumps = capture.get("graph_dump", r"vllm_submod_.*")
assert len(submod_dumps) == 2, (
"Expected 2 submods (one before attention, one after attention), "
f"got {len(submod_dumps)}"
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/compile/test_structured_logging.py",
"license": "Apache License 2.0",
"lines": 102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/renderers/test_completions.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import io
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
import pybase64
import pytest
import torch
from vllm.config import ModelConfig
from vllm.inputs import SingletonPrompt
from vllm.renderers import TokenizeParams
from vllm.renderers.hf import HfRenderer
from vllm.renderers.inputs.preprocess import parse_model_prompt, prompt_to_seq
from vllm.tokenizers.registry import tokenizer_args_from_config
MODEL_NAME = "openai-community/gpt2"
@dataclass
class MockHFConfig:
model_type: str = "any"
@dataclass
class MockModelConfig:
runner_type = "generate"
model: str = MODEL_NAME
tokenizer: str = MODEL_NAME
trust_remote_code: bool = False
tokenizer_revision = None
tokenizer_mode = "auto"
hf_config = MockHFConfig()
encoder_config: dict[str, Any] | None = None
enable_prompt_embeds: bool = True
skip_tokenizer_init: bool = False
is_encoder_decoder: bool = False
is_multimodal_model: bool = False
@dataclass
class MockVllmConfig:
model_config: MockModelConfig
@dataclass
class DummyTokenizer:
truncation_side: str = "left"
max_chars_per_token: int = 1
def __post_init__(self) -> None:
self._captured_encode_kwargs: dict = {}
def decode(self, tokens: list[int]):
return str(tokens)
def encode(self, text: str, **kwargs):
self._captured_encode_kwargs = kwargs
in_length = len(text)
truncation = kwargs.get("truncation")
max_length = kwargs.get("max_length")
if truncation and max_length is not None:
return list(range(min(in_length, max_length)))
return list(range(in_length))
def _build_renderer(
model_config: MockModelConfig,
*,
truncation_side: str = "left",
max_chars_per_token: int = 1,
):
_, tokenizer_name, _, kwargs = tokenizer_args_from_config(model_config)
renderer = HfRenderer(
MockVllmConfig(model_config),
tokenizer=(
None
if model_config.skip_tokenizer_init
else DummyTokenizer(
truncation_side=truncation_side,
max_chars_per_token=max_chars_per_token,
)
),
)
return renderer
def _preprocess_prompt(
model_config: ModelConfig,
prompt_or_prompts: SingletonPrompt | bytes | Sequence[SingletonPrompt | bytes],
):
return [
(
prompt
if isinstance(prompt, bytes)
else parse_model_prompt(model_config, prompt)
)
for prompt in prompt_to_seq(prompt_or_prompts)
]
class TestValidatePrompt:
def test_empty_input(self):
renderer = _build_renderer(MockModelConfig())
with pytest.raises(ValueError, match="at least one prompt"):
renderer.render_prompts(_preprocess_prompt(renderer.model_config, []))
def test_invalid_type(self):
renderer = _build_renderer(MockModelConfig())
with pytest.raises(TypeError, match="should be a list of integers"):
renderer.render_prompts(
_preprocess_prompt(renderer.model_config, [[1, 2], ["foo", "bar"]]) # type: ignore[arg-type]
)
class TestRenderPrompt:
def test_token_input(self):
renderer = _build_renderer(MockModelConfig())
tokens = [101, 7592, 2088]
prompts = renderer.render_prompts(
_preprocess_prompt(renderer.model_config, tokens)
)
results = renderer.tokenize_prompts(
prompts,
TokenizeParams(max_total_tokens=100),
)
assert len(results) == 1
assert results[0]["prompt_token_ids"] == tokens
def test_token_list_input(self):
renderer = _build_renderer(MockModelConfig())
token_lists = [[101, 7592, 2088], [102, 1234, 5678, 9012], [103, 4567]]
prompts = renderer.render_prompts(
_preprocess_prompt(renderer.model_config, token_lists)
)
results = renderer.tokenize_prompts(
prompts,
TokenizeParams(max_total_tokens=100),
)
assert len(results) == 3
assert results[0]["prompt_token_ids"] == [101, 7592, 2088]
assert results[1]["prompt_token_ids"] == [102, 1234, 5678, 9012]
assert results[2]["prompt_token_ids"] == [103, 4567]
def test_text_input(self):
renderer = _build_renderer(MockModelConfig())
text_input = "x" * 10
prompts = renderer.render_prompts(
_preprocess_prompt(renderer.model_config, text_input)
)
results = renderer.tokenize_prompts(
prompts,
TokenizeParams(max_total_tokens=100),
)
assert len(results) == 1
assert len(results[0]["prompt_token_ids"]) == 10
def test_text_list_input(self):
renderer = _build_renderer(MockModelConfig())
text_list_input = ["x" * 10, "x" * 12, "x" * 14]
prompts = renderer.render_prompts(
_preprocess_prompt(renderer.model_config, text_list_input)
)
results = renderer.tokenize_prompts(
prompts,
TokenizeParams(max_total_tokens=100),
)
assert len(results) == 3
for text_input, result in zip(text_list_input, results):
assert len(result["prompt_token_ids"]) == len(text_input)
def test_zero_truncation(self):
renderer = _build_renderer(MockModelConfig())
prompts = renderer.render_prompts(
_preprocess_prompt(renderer.model_config, "x" * 200)
)
results = renderer.tokenize_prompts(
prompts,
TokenizeParams(max_total_tokens=100, truncate_prompt_tokens=0),
)
assert len(results) == 1
assert len(results[0]["prompt_token_ids"]) == 0
def test_pos_truncation(self):
renderer = _build_renderer(MockModelConfig())
prompts = renderer.render_prompts(
_preprocess_prompt(renderer.model_config, "x" * 200)
)
results = renderer.tokenize_prompts(
prompts,
TokenizeParams(max_total_tokens=100, truncate_prompt_tokens=50),
)
assert len(results) == 1
assert len(results[0]["prompt_token_ids"]) == 50
def test_neg_truncation(self):
renderer = _build_renderer(MockModelConfig())
prompts = renderer.render_prompts(
_preprocess_prompt(renderer.model_config, "x" * 200)
)
results = renderer.tokenize_prompts(
prompts,
TokenizeParams(max_total_tokens=100, truncate_prompt_tokens=-1),
)
assert len(results) == 1
assert len(results[0]["prompt_token_ids"]) == 100 # max_total_tokens
def test_truncation_left(self):
renderer = _build_renderer(MockModelConfig(), truncation_side="left")
long_tokens = [100, 101, 102, 103, 104, 105, 106, 107, 108, 109] # 10 tokens
prompts = renderer.render_prompts(
_preprocess_prompt(renderer.model_config, long_tokens)
)
results = renderer.tokenize_prompts(
prompts,
TokenizeParams(max_total_tokens=100, truncate_prompt_tokens=5),
)
assert len(results) == 1
# Should keep the last 5 tokens: [105, 106, 107, 108, 109]
assert results[0]["prompt_token_ids"] == [105, 106, 107, 108, 109]
def test_truncation_right(self):
renderer = _build_renderer(MockModelConfig(), truncation_side="right")
long_tokens = [100, 101, 102, 103, 104, 105, 106, 107, 108, 109] # 10 tokens
prompts = renderer.render_prompts(
_preprocess_prompt(renderer.model_config, long_tokens)
)
results = renderer.tokenize_prompts(
prompts,
TokenizeParams(max_total_tokens=100, truncate_prompt_tokens=5),
)
assert len(results) == 1
# Should keep the first 5 tokens: [100, 101, 102, 103, 104]
assert results[0]["prompt_token_ids"] == [100, 101, 102, 103, 104]
def test_text_max_length_exceeded_obvious(self):
renderer = _build_renderer(MockModelConfig(), max_chars_per_token=1)
# Exceeds max_total_tokens and max_total_tokens * VLLM_MAX_CHARS_PER_TOKEN
long_tokens = "x" * 150
prompts = renderer.render_prompts(
_preprocess_prompt(renderer.model_config, long_tokens)
)
with pytest.raises(
ValueError,
match="input characters and requested .* context length is only",
):
renderer.tokenize_prompts(
prompts,
TokenizeParams(max_total_tokens=100),
)
# Should not even attempt tokenization
assert renderer.tokenizer._captured_encode_kwargs == {}
def test_text_max_length_exceeded_nonobvious(self):
renderer = _build_renderer(MockModelConfig(), max_chars_per_token=2)
# Exceeds max_total_tokens but not max_total_tokens * VLLM_MAX_CHARS_PER_TOKEN
long_tokens = "x" * 150
prompts = renderer.render_prompts(
_preprocess_prompt(renderer.model_config, long_tokens)
)
with pytest.raises(
ValueError,
match="input tokens and requested .* context length is only",
):
renderer.tokenize_prompts(
prompts,
TokenizeParams(max_total_tokens=100),
)
# Should only tokenize the first max_total_tokens + 1 tokens
assert renderer.tokenizer._captured_encode_kwargs["truncation"] is True
assert renderer.tokenizer._captured_encode_kwargs["max_length"] == 101
def test_token_max_length_exceeded(self):
renderer = _build_renderer(MockModelConfig())
long_tokens = list(range(150)) # Exceeds max_total_tokens=100
prompts = renderer.render_prompts(
_preprocess_prompt(renderer.model_config, long_tokens)
)
with pytest.raises(
ValueError,
match="input tokens and requested .* context length is only",
):
renderer.tokenize_prompts(
prompts,
TokenizeParams(max_total_tokens=100, truncate_prompt_tokens=None),
)
def test_no_tokenizer_for_text(self):
renderer = _build_renderer(MockModelConfig(skip_tokenizer_init=True))
prompts = renderer.render_prompts(
_preprocess_prompt(renderer.model_config, "Hello world")
)
with pytest.raises(ValueError, match="`skip_tokenizer_init=True`"):
renderer.tokenize_prompts(
prompts,
TokenizeParams(max_total_tokens=100),
)
def test_token_input_with_needs_detokenization(self):
renderer = _build_renderer(MockModelConfig())
tokens = [1, 2, 3, 4]
prompts = renderer.render_prompts(
_preprocess_prompt(renderer.model_config, tokens)
)
results = renderer.tokenize_prompts(
prompts,
TokenizeParams(
max_total_tokens=100,
needs_detokenization=True,
),
)
assert len(results) == 1
assert results[0]["prompt_token_ids"] == tokens
assert results[0]["prompt"] == "[1, 2, 3, 4]"
class TestRenderEmbedPrompt:
def _create_test_embed_bytes(self, tensor: torch.Tensor) -> bytes:
"""Helper to create base64-encoded tensor bytes"""
buffer = io.BytesIO()
torch.save(tensor, buffer)
buffer.seek(0)
return pybase64.b64encode(buffer.read())
def test_single_prompt_embed(self):
renderer = _build_renderer(MockModelConfig())
# Create a test tensor
tensor_input = torch.randn(10, 768, dtype=torch.float32)
embed_bytes = self._create_test_embed_bytes(tensor_input)
prompts = renderer.render_prompts(
_preprocess_prompt(renderer.model_config, embed_bytes)
)
results = renderer.tokenize_prompts(
prompts,
TokenizeParams(max_total_tokens=100),
)
assert len(results) == 1
assert torch.equal(results[0]["prompt_embeds"], tensor_input)
def test_multiple_prompt_embeds(self):
renderer = _build_renderer(MockModelConfig())
# Create multiple test tensors
tensor_inputs = [
torch.randn(8, 512, dtype=torch.float32),
torch.randn(12, 512, dtype=torch.float32),
]
prompts = renderer.render_prompts(
_preprocess_prompt(
renderer.model_config,
[self._create_test_embed_bytes(t) for t in tensor_inputs],
)
)
results = renderer.tokenize_prompts(
prompts,
TokenizeParams(max_total_tokens=100),
)
assert len(results) == 2
for i, result in enumerate(results):
assert torch.allclose(result["prompt_embeds"], tensor_inputs[i])
def test_prompt_embed_truncation(self):
renderer = _build_renderer(MockModelConfig())
# Create tensor with more tokens than truncation limit
tensor_input = torch.randn(20, 768, dtype=torch.float32)
prompts = renderer.render_prompts(
_preprocess_prompt(
renderer.model_config, self._create_test_embed_bytes(tensor_input)
)
)
results = renderer.tokenize_prompts(
prompts,
TokenizeParams(
max_total_tokens=100,
truncate_prompt_tokens=10,
),
)
assert len(results) == 1
# Should keep last 10 tokens
expected = tensor_input[-10:]
assert torch.equal(results[0]["prompt_embeds"], expected)
def test_prompt_embed_different_dtypes(self):
renderer = _build_renderer(MockModelConfig())
# Test different supported dtypes
dtypes = [torch.float32, torch.float16, torch.bfloat16]
for dtype in dtypes:
tensor_input = torch.randn(5, 256, dtype=dtype)
prompts = renderer.render_prompts(
_preprocess_prompt(
renderer.model_config, self._create_test_embed_bytes(tensor_input)
)
)
results = renderer.tokenize_prompts(
prompts,
TokenizeParams(max_total_tokens=100),
)
assert len(results) == 1
assert results[0]["prompt_embeds"].dtype == dtype
def test_prompt_embed_squeeze_batch_dim(self):
renderer = _build_renderer(MockModelConfig())
# Test tensor with batch dimension gets squeezed
tensor_input = torch.randn(1, 10, 768, dtype=torch.float32)
prompts = renderer.render_prompts(
_preprocess_prompt(
renderer.model_config, self._create_test_embed_bytes(tensor_input)
)
)
results = renderer.tokenize_prompts(
prompts,
TokenizeParams(max_total_tokens=100),
)
assert len(results) == 1
# Should be squeezed to 2D
assert results[0]["prompt_embeds"].shape == (10, 768)
def test_both_prompts_and_embeds(self):
renderer = _build_renderer(MockModelConfig())
text_input = "Hello world"
tensor_input = torch.randn(5, 256, dtype=torch.float32)
prompts = renderer.render_prompts(
_preprocess_prompt(
renderer.model_config,
[text_input, self._create_test_embed_bytes(tensor_input)],
)
)
results = renderer.tokenize_prompts(
prompts,
TokenizeParams(max_total_tokens=100),
)
assert len(results) == 2
# First should be tokens prompt
assert "prompt_token_ids" in results[0]
assert len(results[0]["prompt_token_ids"]) == len(text_input)
# Second should be embed prompt
assert torch.equal(results[1]["prompt_embeds"], tensor_input)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/renderers/test_completions.py",
"license": "Apache License 2.0",
"lines": 394,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/renderers/embed_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from io import BytesIO
from typing import TYPE_CHECKING
import pybase64
import torch
from vllm.exceptions import VLLMValidationError
if TYPE_CHECKING:
from vllm.config import ModelConfig
def safe_load_prompt_embeds(
model_config: "ModelConfig",
embed: bytes,
) -> torch.Tensor:
if not model_config.enable_prompt_embeds:
raise VLLMValidationError(
"You must set `--enable-prompt-embeds` to input `prompt_embeds`.",
parameter="prompt_embeds",
)
# Enable sparse tensor integrity checks to prevent out-of-bounds
# writes from maliciously crafted tensors
with torch.sparse.check_sparse_tensor_invariants():
tensor = torch.load(
BytesIO(pybase64.b64decode(embed, validate=True)),
weights_only=True,
map_location=torch.device("cpu"),
)
assert isinstance(tensor, torch.Tensor) and tensor.dtype in (
torch.float32,
torch.bfloat16,
torch.float16,
)
tensor = tensor.to_dense()
if tensor.dim() > 2:
tensor = tensor.squeeze(0)
assert tensor.dim() == 2
return tensor
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/renderers/embed_utils.py",
"license": "Apache License 2.0",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/renderers/params.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, TypeVar
from vllm.exceptions import VLLMValidationError
from vllm.inputs import EmbedsPrompt, TextPrompt, TokensPrompt
from vllm.logger import init_logger
from vllm.tokenizers import TokenizerLike
from vllm.utils.import_utils import LazyLoader
if TYPE_CHECKING:
import torch
from vllm.entrypoints.chat_utils import ChatTemplateContentFormatOption
else:
torch = LazyLoader("torch", globals(), "torch")
ChatTemplateContentFormatOption = object
logger = init_logger(__name__)
_S = TypeVar("_S", list[int], "torch.Tensor")
def merge_kwargs(
defaults: dict[str, Any] | None,
overrides: dict[str, Any] | None,
/,
*,
unset_values: tuple[object, ...] = (None, "auto"),
) -> dict[str, Any]:
if defaults is None:
defaults = {}
if overrides is None:
overrides = {}
return defaults | {k: v for k, v in overrides.items() if v not in unset_values}
@dataclass(frozen=True)
class ChatParams:
"""Configuration to control how to parse chat messages."""
chat_template: str | None = None
"""The chat template to apply."""
chat_template_content_format: "ChatTemplateContentFormatOption" = "auto"
"""The format of the chat template."""
chat_template_kwargs: dict[str, Any] = field(default_factory=dict)
"""The kwargs to pass to the chat template."""
def with_defaults(self, default_chat_template_kwargs: dict[str, Any] | None):
if not default_chat_template_kwargs:
return self
return ChatParams(
chat_template=self.chat_template,
chat_template_content_format=self.chat_template_content_format,
chat_template_kwargs=merge_kwargs(
default_chat_template_kwargs,
self.chat_template_kwargs,
),
)
def get_apply_chat_template_kwargs(self) -> dict[str, Any]:
"""The arguments to pass to `tokenizer.apply_chat_template`."""
return merge_kwargs(
self.chat_template_kwargs,
dict(chat_template=self.chat_template, return_dict=False),
)
@dataclass(frozen=True)
class TokenizeParams:
"""Configuration to control how prompts are tokenized."""
max_total_tokens: int | None
"""
Maximum allowed number of input + output tokens.
Usually, this refers to the model's context length.
"""
max_output_tokens: int = 0
"""Maximum requested number of output tokens."""
pad_prompt_tokens: int | None = None
"""
Number of tokens to pad to:
- `None` means no padding.
- `-1` maps to `max_input_tokens`.
"""
truncate_prompt_tokens: int | None = None
"""
Number of tokens to keep:
- `None` means no truncation.
- `-1` maps to `max_input_tokens`.
"""
do_lower_case: bool = False
"""Whether to normalize text to lower case before tokenization."""
add_special_tokens: bool = True
"""Whether to add special tokens."""
needs_detokenization: bool = False
"""
Whether the tokenized prompt needs to contain the original text.
Not to be confused with `SamplingParams.detokenize` which deals
with the output generated by the model.
"""
max_total_tokens_param: str = "max_total_tokens"
"""Override this to edit the message for validation errors."""
max_output_tokens_param: str = "max_output_tokens"
"""Override this to edit the message for validation errors."""
truncate_prompt_tokens_param: str = "truncate_prompt_tokens"
"""Override this to edit the message for validation errors."""
@property
def max_input_tokens(self) -> int | None:
"""Maximum allowed number of input tokens."""
if self.max_total_tokens is None:
return None
return self.max_total_tokens - self.max_output_tokens
def __post_init__(self) -> None:
max_total_tokens = self.max_total_tokens
max_output_tokens = self.max_output_tokens
max_input_tokens = self.max_input_tokens
truncate_prompt_tokens = self.truncate_prompt_tokens
if (
max_output_tokens is not None
and max_total_tokens is not None
and max_output_tokens > max_total_tokens
):
raise VLLMValidationError(
f"{self.max_output_tokens_param}={max_output_tokens}"
f"cannot be greater than "
f"{self.max_total_tokens_param}={max_total_tokens=}. "
f"Please request fewer output tokens.",
parameter=self.max_output_tokens_param,
value=max_output_tokens,
)
if (
max_input_tokens is not None
and truncate_prompt_tokens is not None
and truncate_prompt_tokens > max_input_tokens
):
raise VLLMValidationError(
f"{self.truncate_prompt_tokens_param}={truncate_prompt_tokens} "
f"cannot be greater than {self.max_total_tokens_param} - "
f"{self.max_output_tokens_param} = {max_input_tokens}. "
f"Please request a smaller truncation size.",
parameter=self.truncate_prompt_tokens_param,
value=truncate_prompt_tokens,
)
def with_kwargs(self, **tokenization_kwargs: Any):
max_length = tokenization_kwargs.pop("max_length", self.max_input_tokens)
pad_prompt_tokens = tokenization_kwargs.pop(
"pad_prompt_tokens", self.pad_prompt_tokens
)
truncate_prompt_tokens = tokenization_kwargs.pop(
"truncate_prompt_tokens", self.truncate_prompt_tokens
)
do_lower_case = tokenization_kwargs.pop("do_lower_case", self.do_lower_case)
add_special_tokens = tokenization_kwargs.pop(
"add_special_tokens", self.add_special_tokens
)
needs_detokenization = tokenization_kwargs.pop(
"needs_detokenization", self.needs_detokenization
)
# https://huggingface.co/docs/transformers/en/pad_truncation
if padding := tokenization_kwargs.pop("padding", None):
if padding == "max_length":
pad_prompt_tokens = max_length
elif padding in (False, "do_not_pad"):
pad_prompt_tokens = None
else:
# To emit the below warning
tokenization_kwargs["padding"] = padding
if truncation := tokenization_kwargs.pop("truncation", None):
if truncation in (True, "longest_first"):
truncate_prompt_tokens = max_length
elif truncation in (False, "do_not_truncate"):
truncate_prompt_tokens = None
else:
# To emit the below warning
tokenization_kwargs["truncation"] = truncation
if tokenization_kwargs:
logger.warning(
"The following tokenization arguments are not supported "
"by vLLM Renderer and will be ignored: %s",
tokenization_kwargs,
)
max_total_tokens = self.max_total_tokens
return TokenizeParams(
max_total_tokens=max_total_tokens,
max_output_tokens=(
0
if max_total_tokens is None or max_length is None
else max_total_tokens - max_length
),
pad_prompt_tokens=pad_prompt_tokens,
truncate_prompt_tokens=truncate_prompt_tokens,
do_lower_case=do_lower_case,
add_special_tokens=add_special_tokens,
needs_detokenization=needs_detokenization,
)
def get_encode_kwargs(self) -> dict[str, Any]:
"""The arguments to pass to `tokenizer.encode`."""
max_length = self.truncate_prompt_tokens
if max_length is not None and max_length < 0:
max_length = self.max_input_tokens
elif max_length is None and self.max_input_tokens is not None:
# This prevents tokenization from taking up more resources than necessary
# while still failing `self._token_len_check` as expected by users
max_length = self.max_input_tokens + 1
return dict(
truncation=max_length is not None,
max_length=max_length,
add_special_tokens=self.add_special_tokens,
)
def _text_len_check(self, tokenizer: TokenizerLike | None, text: str) -> str:
"""Apply length checks to prompt text if necessary."""
max_input_tokens = self.max_input_tokens
if max_input_tokens is None:
return text
if self.truncate_prompt_tokens is None and tokenizer is not None:
max_input_chars = max_input_tokens * tokenizer.max_chars_per_token
if len(text) > max_input_chars:
# To save resources, fail the request outright without even
# attempting tokenization
raise VLLMValidationError(
f"You passed {len(text)} input characters "
f"and requested {self.max_output_tokens} output tokens. "
f"However, the model's context length is only "
f"{self.max_total_tokens} tokens, resulting in a maximum "
f"input length of {max_input_tokens} tokens "
f"(at most {max_input_chars} characters). "
f"Please reduce the length of the input prompt.",
parameter="input_text",
value=len(text),
)
return text
def _text_lowercase(self, tokenizer: TokenizerLike | None, text: str) -> str:
"""Apply lowercase to prompt text if necessary."""
return text.lower() if self.do_lower_case else text
def _validate_text(self, tokenizer: TokenizerLike | None, text: str) -> str:
"""Apply all validators to prompt text."""
for validator in (
self._text_len_check,
self._text_lowercase,
):
text = validator(tokenizer, text)
return text
def apply_pre_tokenization(
self,
tokenizer: TokenizerLike | None,
prompt: TextPrompt,
) -> TextPrompt:
"""
Ensure that the prompt meets the requirements set out by this config.
If that is not possible, raise a `VLLMValidationError`.
This method is run before tokenization occurs.
"""
prompt["prompt"] = self._validate_text(tokenizer, prompt["prompt"])
return prompt
def _token_padding(self, tokenizer: TokenizerLike | None, tokens: _S) -> _S:
"""Apply padding to prompt tokens if necessary."""
pad_length = self.pad_prompt_tokens
if pad_length is not None and pad_length < 0:
pad_length = self.max_input_tokens
if pad_length is None or pad_length <= len(tokens):
return tokens
if tokenizer is None:
raise ValueError("Cannot pad tokens when `skip_tokenizer_init=True`")
if not isinstance(tokens, list):
raise ValueError("Cannot pad tokens for embedding inputs")
return tokens + [tokenizer.pad_token_id] * (pad_length - len(tokens))
def _token_truncation(self, tokenizer: TokenizerLike | None, tokens: _S) -> _S:
"""Apply truncation to prompt tokens if necessary."""
max_length = self.truncate_prompt_tokens
if max_length is not None and max_length < 0:
max_length = self.max_input_tokens
if max_length is None or max_length >= len(tokens):
return tokens
if max_length == 0:
return tokens[:0]
if getattr(tokenizer, "truncation_side", "left") == "left":
return tokens[-max_length:]
return tokens[:max_length]
def _token_len_check(self, tokenizer: TokenizerLike | None, tokens: _S) -> _S:
"""Apply length checks to prompt tokens if necessary."""
max_input_tokens = self.max_input_tokens
if max_input_tokens is None:
return tokens
if len(tokens) > max_input_tokens:
raise VLLMValidationError(
f"You passed {len(tokens)} input tokens "
f"and requested {self.max_output_tokens} output tokens. "
f"However, the model's context length is only "
f"{self.max_total_tokens} tokens, resulting in a maximum "
f"input length of {max_input_tokens} tokens. "
f"Please reduce the length of the input prompt.",
parameter="input_tokens",
value=len(tokens),
)
return tokens
def _validate_tokens(self, tokenizer: TokenizerLike | None, tokens: _S) -> _S:
"""Apply all validators to a token sequence."""
for validator in (
self._token_padding,
self._token_truncation,
self._token_len_check,
):
tokens = validator(tokenizer, tokens)
return tokens
def apply_post_tokenization(
self,
tokenizer: TokenizerLike | None,
prompt: TokensPrompt | EmbedsPrompt,
) -> TokensPrompt | EmbedsPrompt:
"""
Ensure that the prompt meets the requirements set out by this config.
If that is not possible, raise a `VLLMValidationError`.
This method is run after tokenization occurs.
"""
if "prompt_token_ids" in prompt:
prompt["prompt_token_ids"] = self._validate_tokens( # type: ignore[typeddict-unknown-key]
tokenizer,
prompt["prompt_token_ids"], # type: ignore[typeddict-item]
)
if "prompt_embeds" in prompt:
prompt["prompt_embeds"] = self._validate_tokens( # type: ignore[typeddict-unknown-key]
tokenizer,
prompt["prompt_embeds"], # type: ignore[typeddict-item]
)
return prompt
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/renderers/params.py",
"license": "Apache License 2.0",
"lines": 312,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/kernels/helion/test_register.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Unit tests for Helion kernel registration.
Tests ConfiguredHelionKernel, HelionKernelWrapper, and PresetConfigSearch
including config picker registration and custom autotuner integration.
"""
from unittest.mock import Mock, patch
import pytest
import torch
from vllm.utils.import_utils import has_helion
if not has_helion():
pytest.skip(
"Helion is not installed. Install with: pip install vllm[helion]",
allow_module_level=True,
)
import helion
from vllm.kernels.helion.config_manager import ConfigManager
from vllm.kernels.helion.register import (
_HOP_AVAILABLE,
ConfiguredHelionKernel,
HelionKernelWrapper,
get_kernel_by_name,
get_registered_kernels,
register_kernel,
validate_helion_settings,
)
@pytest.fixture
def sample_configs():
"""Create real Helion config objects for testing."""
return {
"hiddensize_4096_batchsize_32": helion.Config(
block_sizes=[128],
num_warps=4,
num_stages=3,
),
"hiddensize_4096_batchsize_64": helion.Config(
block_sizes=[256],
num_warps=8,
num_stages=4,
),
"hiddensize_4096_batchsize_128": helion.Config(
block_sizes=[512],
num_warps=16,
num_stages=2,
),
"default": helion.Config(
block_sizes=[64],
num_warps=2,
num_stages=2,
),
}
@pytest.fixture
def sample_kernel():
"""Create a simple test kernel function."""
def test_kernel(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
"""Simple test kernel that adds two tensors."""
return x + y
return test_kernel
@pytest.fixture
def config_manager_with_test_configs(sample_configs):
"""Set up ConfigManager with test configs for nvidia_h200 platform."""
mock_config_manager = Mock(spec=ConfigManager)
mock_config_manager.get_platform_configs = Mock(return_value=sample_configs)
return mock_config_manager
@pytest.fixture
def configured_kernel(sample_kernel, sample_configs, config_manager_with_test_configs):
"""Create a ConfiguredHelionKernel for testing."""
def test_config_picker(args, config_keys):
"""Simple config picker that returns default."""
return "default"
with (
patch(
"vllm.kernels.helion.config_manager.ConfigManager.get_instance",
return_value=config_manager_with_test_configs,
),
patch(
"vllm.kernels.helion.utils.get_canonical_gpu_name",
return_value="nvidia_h200",
),
patch("vllm.kernels.helion.register.helion.kernel") as mock_kernel,
):
# Mock just the helion.kernel decorator to avoid actual kernel compilation
mock_decorated = Mock()
mock_kernel.return_value = Mock(return_value=mock_decorated)
return ConfiguredHelionKernel(
op_name="test_kernel",
config_picker=test_config_picker,
raw_kernel_func=sample_kernel,
helion_settings=None,
)
class TestValidateHelionSettings:
"""Test suite for validate_helion_settings utility function."""
def test_accepts_none_settings(self):
"""Test that None settings are accepted without error."""
validate_helion_settings(None, "test_kernel") # Should not raise
def test_accepts_valid_settings(self):
"""Test that valid settings without conflicts are accepted."""
settings = helion.Settings()
settings.static_shapes = False
settings.print_output_code = True
validate_helion_settings(settings, "test_kernel") # Should not raise
def test_rejects_autotuner_fn(self):
"""Test that settings with custom autotuner_fn raise ValueError."""
settings = helion.Settings()
settings.autotuner_fn = lambda *args: None # Set custom autotuner function
with pytest.raises(ValueError, match="uses a custom autotuner"):
validate_helion_settings(settings, "test_kernel")
def test_warns_on_static_shapes_true(self):
"""Test that static_shapes=True emits a warning."""
settings = helion.Settings()
settings.static_shapes = True
with patch("vllm.kernels.helion.register.logger") as mock_logger:
validate_helion_settings(settings, "test_kernel")
mock_logger.warning.assert_called_once()
assert "static_shapes=True" in mock_logger.warning.call_args[0][0]
def create_configured_kernel_with_configs(
op_name,
config_picker,
kernel_func,
configs,
platform="nvidia_h200",
helion_settings=None,
):
"""Helper to create ConfiguredHelionKernel with real config objects."""
mock_config_manager = Mock(spec=ConfigManager)
mock_config_manager.get_platform_configs = Mock(return_value=configs)
with (
patch(
"vllm.kernels.helion.config_manager.ConfigManager.get_instance",
return_value=mock_config_manager,
),
patch(
"vllm.kernels.helion.utils.get_canonical_gpu_name",
return_value=platform,
),
patch("vllm.kernels.helion.register.helion.kernel") as mock_kernel,
):
mock_decorated = Mock()
mock_kernel.return_value = Mock(return_value=mock_decorated)
return ConfiguredHelionKernel(
op_name=op_name,
config_picker=config_picker,
raw_kernel_func=kernel_func,
helion_settings=helion_settings,
)
class TestConfiguredHelionKernel:
"""Test suite for ConfiguredHelionKernel."""
def test_init_raises_without_picker(self, sample_kernel, sample_configs):
"""Test that __init__ raises when no picker registered."""
configs = {"default": sample_configs["default"]}
mock_config_manager = Mock(spec=ConfigManager)
mock_config_manager.get_platform_configs = Mock(return_value=configs)
with (
patch(
"vllm.kernels.helion.config_manager.ConfigManager.get_instance",
return_value=mock_config_manager,
),
patch(
"vllm.kernels.helion.utils.get_canonical_gpu_name",
return_value="nvidia_h200",
),
pytest.raises(RuntimeError, match="No config picker registered"),
):
ConfiguredHelionKernel(
op_name="test_kernel",
config_picker=None, # No picker registered
raw_kernel_func=sample_kernel,
helion_settings=None,
)
def test_config_selector_validates_picker_result(
self, sample_kernel, sample_configs
):
"""Test that config selector validates picker returns valid key."""
def invalid_picker(args, config_keys):
return "invalid_key"
kernel = create_configured_kernel_with_configs(
op_name="test_kernel",
config_picker=invalid_picker,
kernel_func=sample_kernel,
configs=sample_configs,
)
key_computer = kernel._create_key_computer()
selector = kernel._create_config_selector(key_computer)
with pytest.raises(
ValueError, match="Config picker returned invalid config key"
):
selector((torch.randn(32, 4096),))
def test_config_selector_handles_none_from_picker(
self, sample_kernel, sample_configs
):
"""Test that config selector falls back to 'default' on None."""
def none_picker(args, config_keys):
return None
kernel = create_configured_kernel_with_configs(
op_name="test_kernel",
config_picker=none_picker,
kernel_func=sample_kernel,
configs=sample_configs,
)
key_computer = kernel._create_key_computer()
selector = kernel._create_config_selector(key_computer)
result = selector((torch.randn(32, 4096),))
assert result is kernel.configs["default"]
def test_create_decorated_kernel_passes_helion_settings(
self, sample_kernel, sample_configs
):
"""Test that _create_decorated_kernel passes helion_settings."""
def default_picker(args, config_keys):
return "default"
settings = helion.Settings()
settings.print_output_code = True
# Note: helion.Settings() defaults static_shapes to True
mock_config_manager = Mock(spec=ConfigManager)
mock_config_manager.get_platform_configs = Mock(return_value=sample_configs)
with (
patch("vllm.kernels.helion.register.helion.kernel") as mock_kernel,
patch(
"vllm.kernels.helion.config_manager.ConfigManager.get_instance",
return_value=mock_config_manager,
),
patch(
"vllm.kernels.helion.utils.get_canonical_gpu_name",
return_value="nvidia_h200",
),
):
mock_decorated = Mock()
mock_kernel.return_value = Mock(return_value=mock_decorated)
ConfiguredHelionKernel(
op_name="test_kernel",
config_picker=default_picker,
raw_kernel_func=sample_kernel,
helion_settings=settings,
)
call_kwargs = mock_kernel.call_args[1]
assert "print_output_code" in call_kwargs
assert call_kwargs["print_output_code"] is True
# helion.Settings() defaults to static_shapes=True, so it should remain True
assert call_kwargs["static_shapes"] is True
def test_create_decorated_kernel_preserves_static_shapes_true(
self, sample_kernel, sample_configs
):
"""Test that explicit static_shapes=True is preserved."""
def default_picker(args, config_keys):
return "default"
settings = helion.Settings()
settings.static_shapes = True
mock_config_manager = Mock(spec=ConfigManager)
mock_config_manager.get_platform_configs = Mock(return_value=sample_configs)
with (
patch("vllm.kernels.helion.register.helion.kernel") as mock_kernel,
patch(
"vllm.kernels.helion.config_manager.ConfigManager.get_instance",
return_value=mock_config_manager,
),
patch(
"vllm.kernels.helion.utils.get_canonical_gpu_name",
return_value="nvidia_h200",
),
):
mock_decorated = Mock()
mock_kernel.return_value = Mock(return_value=mock_decorated)
ConfiguredHelionKernel(
op_name="test_kernel",
config_picker=default_picker,
raw_kernel_func=sample_kernel,
helion_settings=settings,
)
call_kwargs = mock_kernel.call_args[1]
assert call_kwargs["static_shapes"] is True
def test_key_and_config_selector_use_same_logic(
self, sample_kernel, sample_configs
):
"""Test that key and config_selector produce identical results."""
def tracking_picker(args, config_keys):
x = args[0]
batch_size = x.shape[0]
if batch_size <= 32:
return "hiddensize_4096_batchsize_32"
elif batch_size <= 64:
return "hiddensize_4096_batchsize_64"
return "hiddensize_4096_batchsize_128"
mock_config_manager = Mock(spec=ConfigManager)
mock_config_manager.get_platform_configs = Mock(return_value=sample_configs)
with (
patch("vllm.kernels.helion.register.helion.kernel") as mock_helion_kernel,
patch(
"vllm.kernels.helion.config_manager.ConfigManager.get_instance",
return_value=mock_config_manager,
),
patch(
"vllm.kernels.helion.utils.get_canonical_gpu_name",
return_value="nvidia_h200",
),
):
mock_decorated = Mock()
mock_helion_kernel.return_value = Mock(return_value=mock_decorated)
kernel = ConfiguredHelionKernel(
op_name="test_kernel",
config_picker=tracking_picker,
raw_kernel_func=sample_kernel,
helion_settings=None,
)
call_kwargs = mock_helion_kernel.call_args[1]
key_fn = call_kwargs["key"]
autotuner_fn = call_kwargs["autotuner_fn"]
tensor = torch.randn(50, 4096) # batch=50, should select batchsize_64
# key receives unpacked args, autotuner receives args as tuple
key_result = key_fn(tensor)
autotuner = autotuner_fn(None, (tensor,))
config = autotuner.autotune()
assert key_result == "hiddensize_4096_batchsize_64"
assert config is kernel.configs["hiddensize_4096_batchsize_64"]
class TestHelionKernelWrapper:
"""Test suite for HelionKernelWrapper."""
def test_get_configured_op_validates_configs_available(self, sample_kernel):
"""Test get_configured_op validates configs are available."""
def fake_impl(*args, **kwargs):
return torch.zeros_like(args[0])
wrapper = HelionKernelWrapper(
raw_kernel_func=sample_kernel,
op_name="test_kernel",
fake_impl=fake_impl,
)
def default_picker(args, config_keys):
return "default"
wrapper._config_picker = default_picker
mock_config_manager = Mock(spec=ConfigManager)
mock_config_manager.get_platform_configs = Mock(
return_value={}
) # Empty configs
with (
patch(
"vllm.kernels.helion.config_manager.ConfigManager.get_instance",
return_value=mock_config_manager,
),
patch(
"vllm.kernels.helion.utils.get_canonical_gpu_name",
return_value="nvidia_h200",
),
pytest.raises(ValueError, match="No configs available"),
):
wrapper.get_configured_op()
def test_get_configured_op_validates_config_picker(
self, sample_kernel, sample_configs
):
"""Test get_configured_op validates config picker."""
def fake_impl(*args, **kwargs):
return torch.zeros_like(args[0])
wrapper = HelionKernelWrapper(
raw_kernel_func=sample_kernel,
op_name="test_kernel",
fake_impl=fake_impl,
)
# Don't set config picker - should raise assertion error
mock_config_manager = Mock(spec=ConfigManager)
mock_config_manager.get_platform_configs = Mock(return_value=sample_configs)
with (
patch(
"vllm.kernels.helion.config_manager.ConfigManager.get_instance",
return_value=mock_config_manager,
),
patch(
"vllm.kernels.helion.utils.get_canonical_gpu_name",
return_value="nvidia_h200",
),
pytest.raises(AssertionError, match="No config picker registered"),
):
wrapper.get_configured_op()
def test_get_configured_op_returns_cached_kernel(
self, sample_kernel, sample_configs
):
"""Test get_configured_op returns cached ConfiguredHelionKernel."""
def fake_impl(*args, **kwargs):
return torch.zeros_like(args[0])
def default_picker(args, config_keys):
return "default"
wrapper = HelionKernelWrapper(
raw_kernel_func=sample_kernel,
op_name="test_kernel",
fake_impl=fake_impl,
)
wrapper._config_picker = default_picker
mock_config_manager = Mock(spec=ConfigManager)
mock_config_manager.get_platform_configs = Mock(return_value=sample_configs)
with (
patch(
"vllm.kernels.helion.config_manager.ConfigManager.get_instance",
return_value=mock_config_manager,
),
patch(
"vllm.kernels.helion.utils.get_canonical_gpu_name",
return_value="nvidia_h200",
),
patch("vllm.kernels.helion.register.helion.kernel") as mock_kernel,
):
mock_decorated = Mock()
mock_kernel.return_value = Mock(return_value=mock_decorated)
result1 = wrapper.get_configured_op()
result2 = wrapper.get_configured_op()
assert result1 is result2
@pytest.mark.skipif(
_HOP_AVAILABLE, reason="CustomOp path not used when HOP available"
)
def test_get_or_register_custom_op_returns_cached_op(
self, sample_kernel, sample_configs
):
def fake_impl(*args, **kwargs):
return torch.zeros_like(args[0])
def default_picker(args, config_keys):
return "default"
wrapper = HelionKernelWrapper(
raw_kernel_func=sample_kernel,
op_name="test_kernel",
fake_impl=fake_impl,
)
wrapper._config_picker = default_picker
mock_config_manager = Mock(spec=ConfigManager)
mock_config_manager.get_platform_configs = Mock(return_value=sample_configs)
existing_op = Mock()
mock_namespace = Mock()
mock_namespace.test_kernel = existing_op
with (
patch(
"vllm.kernels.helion.config_manager.ConfigManager.get_instance",
return_value=mock_config_manager,
),
patch(
"vllm.kernels.helion.utils.get_canonical_gpu_name",
return_value="nvidia_h200",
),
patch.object(torch.ops, "vllm_helion", mock_namespace),
patch("vllm.kernels.helion.register.helion.kernel") as mock_kernel,
):
mock_decorated = Mock()
mock_kernel.return_value = Mock(return_value=mock_decorated)
result = wrapper._get_or_register_custom_op()
assert result is existing_op
@pytest.mark.skipif(
_HOP_AVAILABLE, reason="CustomOp path not used when HOP available"
)
def test_get_or_register_custom_op_registers_new_op(
self, sample_kernel, sample_configs
):
def fake_impl(*args, **kwargs):
return torch.zeros_like(args[0])
def default_picker(args, config_keys):
return "default"
wrapper = HelionKernelWrapper(
raw_kernel_func=sample_kernel,
op_name="test_kernel",
fake_impl=fake_impl,
)
wrapper._config_picker = default_picker
mock_config_manager = Mock(spec=ConfigManager)
mock_config_manager.get_platform_configs = Mock(return_value=sample_configs)
new_op = Mock()
registered_ops: dict[str, Mock] = {}
class MockNamespace:
def __getattr__(self, name):
if name in registered_ops:
return registered_ops[name]
raise AttributeError(name)
mock_namespace = MockNamespace()
def register_side_effect(op_name, op_func, **kwargs):
registered_ops[op_name] = new_op
with (
patch(
"vllm.kernels.helion.config_manager.ConfigManager.get_instance",
return_value=mock_config_manager,
),
patch(
"vllm.kernels.helion.utils.get_canonical_gpu_name",
return_value="nvidia_h200",
),
patch.object(torch.ops, "vllm_helion", mock_namespace),
patch(
"vllm.kernels.helion.register.direct_register_custom_op",
side_effect=register_side_effect,
) as mock_register,
patch("vllm.kernels.helion.register.helion.kernel") as mock_kernel,
):
mock_decorated = Mock()
mock_kernel.return_value = Mock(return_value=mock_decorated)
result = wrapper._get_or_register_custom_op()
mock_register.assert_called_once()
assert result is new_op
assert mock_register.call_args[1]["op_func"] is mock_decorated
class TestKernelRegistry:
"""Test suite for kernel registry functionality."""
def setup_method(self):
"""Save and clear the registry before each test."""
from vllm.kernels.helion.register import _REGISTERED_KERNELS
self._saved_registry = dict(_REGISTERED_KERNELS)
_REGISTERED_KERNELS.clear()
def teardown_method(self):
"""Restore the registry after each test."""
from vllm.kernels.helion.register import _REGISTERED_KERNELS
_REGISTERED_KERNELS.clear()
_REGISTERED_KERNELS.update(self._saved_registry)
def test_get_registered_kernels_returns_copy(self):
"""Test get_registered_kernels returns copy of registry."""
result1 = get_registered_kernels()
result2 = get_registered_kernels()
# Should be separate objects
assert result1 is not result2
# Should have same content
assert result1 == result2
def test_get_kernel_by_name_returns_kernel(self):
"""Test get_kernel_by_name returns registered kernel."""
wrapper = HelionKernelWrapper(
raw_kernel_func=Mock(),
op_name="test_kernel",
fake_impl=Mock(),
)
from vllm.kernels.helion.register import _REGISTERED_KERNELS
_REGISTERED_KERNELS["test_kernel"] = wrapper
result = get_kernel_by_name("test_kernel")
assert result is wrapper
def test_get_kernel_by_name_returns_none_for_missing(self):
"""Test get_kernel_by_name returns None for missing kernel."""
result = get_kernel_by_name("nonexistent")
assert result is None
def test_register_kernel_auto_generates_fake_impl(self):
"""Test register_kernel auto-generates fake_impl when not provided."""
with patch("vllm.kernels.helion.register.infer_fake_impl") as mock_infer:
mock_fake = Mock()
mock_infer.return_value = mock_fake
def original_kernel(x):
return x
wrapper = register_kernel(original_kernel)
mock_infer.assert_called_once_with(original_kernel, None)
assert wrapper._fake_impl is mock_fake
def test_register_kernel_creates_wrapper(self):
"""Test register_kernel creates HelionKernelWrapper."""
def test_kernel(x):
return x
result = register_kernel("test_name")(test_kernel)
assert isinstance(result, HelionKernelWrapper)
assert result.op_name == "test_name"
assert result.raw_kernel_func is test_kernel
def test_register_kernel_auto_detects_name(self):
"""Test register_kernel uses function name when no name provided."""
@register_kernel
def my_test_kernel(x):
return x
assert my_test_kernel.op_name == "my_test_kernel"
def test_register_kernel_registers_in_global_registry(self):
"""Test register_kernel adds wrapper to global registry."""
@register_kernel
def test_kernel(x):
return x
registered_kernels = get_registered_kernels()
assert "test_kernel" in registered_kernels
assert registered_kernels["test_kernel"] is test_kernel
def test_register_kernel_passes_helion_settings(self):
"""Test register_kernel passes helion_settings to wrapper."""
mock_settings = Mock()
mock_settings.to_dict.return_value = {"debug": True}
@register_kernel("test_name", helion_settings=mock_settings)
def test_kernel(x):
return x
assert test_kernel.helion_settings is mock_settings
def test_register_kernel_supports_decorator_syntax(self):
"""Test register_kernel works with decorator arguments."""
mock_fake = Mock()
wrapper = register_kernel("custom_name", fake_impl=mock_fake)
def test_kernel(x):
return x
result = wrapper(test_kernel)
assert result.op_name == "custom_name"
assert result._fake_impl is mock_fake
def test_register_kernel_bare_decorator(self):
"""Test register_kernel works as bare decorator."""
@register_kernel
def test_kernel(x):
return x
assert isinstance(test_kernel, HelionKernelWrapper)
assert test_kernel.op_name == "test_kernel"
def test_registered_wrapper_can_register_config_picker(self):
"""Test that registered wrapper can register config picker."""
@register_kernel
def test_kernel(x):
return x
def my_picker(args, config_keys):
return "default"
result = test_kernel.register_config_picker(my_picker)
assert result is my_picker
assert test_kernel._config_picker is my_picker
def test_register_kernel_raises_on_duplicate_registration(self):
"""Test register_kernel raises error on duplicate names."""
@register_kernel("duplicate_name")
def kernel1(x):
return x
with pytest.raises(ValueError, match="already registered"):
@register_kernel("duplicate_name")
def kernel2(x):
return x
def test_register_kernel_rejects_autotuner_fn_in_settings(self):
"""Test register_kernel rejects conflicting autotuner_fn."""
mock_settings = Mock()
mock_settings.to_dict.return_value = {"autotuner_fn": Mock()}
with pytest.raises(ValueError, match="uses a custom autotuner"):
@register_kernel("test", helion_settings=mock_settings)
def test_kernel(x):
return x
def test_register_kernel_warns_with_static_shapes_true(self):
"""Test register_kernel warns when static_shapes=True."""
mock_settings = Mock()
mock_settings.to_dict.return_value = {"static_shapes": True}
with patch("vllm.kernels.helion.register.logger") as mock_logger:
@register_kernel("test", helion_settings=mock_settings)
def test_kernel(x):
return x
mock_logger.warning.assert_called_once()
assert "static_shapes=True" in mock_logger.warning.call_args[0][0]
def test_register_kernel_no_warning_with_static_shapes_false(self):
"""Test register_kernel doesn't warn with static_shapes=False."""
mock_settings = Mock()
mock_settings.to_dict.return_value = {"static_shapes": False}
with patch("vllm.kernels.helion.register.logger") as mock_logger:
@register_kernel("test", helion_settings=mock_settings)
def test_kernel(x):
return x
# Should not call warning
mock_logger.warning.assert_not_called()
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/kernels/helion/test_register.py",
"license": "Apache License 2.0",
"lines": 626,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/kernels/helion/test_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Unit tests for Helion utility functions."""
import pytest
from vllm.kernels.helion.utils import canonicalize_gpu_name
@pytest.mark.parametrize(
"driver_reported_name,expected",
[
("NVIDIA H200", "nvidia_h200"),
("NVIDIA A100-SXM4-80GB", "nvidia_a100"),
("NVIDIA H100 80GB HBM3", "nvidia_h100"),
("NVIDIA H100 PCIe", "nvidia_h100"),
("NVIDIA H100 SXM5", "nvidia_h100"),
("NVIDIA GeForce RTX 4090", "nvidia_geforce_rtx_4090"),
("AMD Instinct MI300X", "amd_instinct_mi300x"),
("Tesla V100-SXM2-32GB", "tesla_v100"),
],
)
def test_canonicalize_gpu_name(driver_reported_name, expected):
"""Test GPU name canonicalization."""
assert canonicalize_gpu_name(driver_reported_name) == expected
@pytest.mark.parametrize("invalid_name", ["", " ", "\t", "\n"])
def test_canonicalize_gpu_name_rejects_empty(invalid_name):
"""Test that empty or whitespace-only names are rejected."""
with pytest.raises(ValueError, match="cannot be empty"):
canonicalize_gpu_name(invalid_name)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/kernels/helion/test_utils.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/kernels/helion/register.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
vLLM Helion kernel registration with pre-tuned config selection.
This module leverages Helion's internal config selection infrastructure to use
pre-tuned configs instead of runtime autotuning.
How Helion Normally Works
-------------------------
For each kernel invocation, Helion:
1. Computes a cache key from input arguments
2. Looks up the key in its internal compilation cache
3. On cache miss, runs autotuning to find the best config
4. Compiles and caches the kernel with that config
How We Override It
------------------
We override two Helion hooks to use pre-tuned configs:
1. **key**: We provide a key function (derived from config_picker) that
computes cache keys matching our pre-tuned config keys. This ensures Helion's
internal cache uses keys that correspond to configs we've prepared.
2. **autotuner_fn**: We provide PresetConfigSearch which, instead of autotuning,
simply returns the pre-tuned config for the computed key. On cache miss,
Helion calls our autotuner which returns the author-prepared config.
Both hooks use the same config_picker logic to ensure the cache key computed
by key matches the config returned by the autotuner.
Key Classes
-----------
- HelionKernelWrapper: Wraps raw kernel + config_picker, creates configured kernels
- ConfiguredHelionKernel: Platform-specific kernel with pre-tuned configs
- PresetConfigSearch: Custom autotuner that returns pre-tuned configs
"""
from collections.abc import Callable
from typing import Any, cast, overload
import torch
from torch.library import Library
from vllm.logger import init_logger
from vllm.utils.import_utils import has_helion
from vllm.utils.torch_utils import direct_register_custom_op
if not has_helion():
raise ImportError(
"register module requires helion to be installed. "
"Install it with: pip install helion"
)
import helion
from helion._compat import requires_torch_version
from helion.autotuner.base_search import BaseAutotuner
from helion.runtime.config import Config
from helion.runtime.settings import default_autotuner_fn
# TODO(gmagogsfm): Remove CustomOp fallback path (_get_or_register_custom_op,
# vllm_helion_lib, direct_register_custom_op) once vLLM requires PyTorch >= 2.11.
_HOP_AVAILABLE = requires_torch_version("2.11")
if _HOP_AVAILABLE:
import torch.utils._pytree as pytree
from helion._compiler._dynamo.higher_order_ops import (
helion_kernel_side_table,
helion_kernel_wrapper_mutation,
)
from helion._compiler._dynamo.variables import infer_output_spec
from torch.fx.experimental.proxy_tensor import (
disable_proxy_modes_tracing,
get_proxy_mode,
)
logger = init_logger(__name__)
vllm_helion_lib = Library("vllm_helion", "FRAGMENT") # noqa
def validate_helion_settings(
helion_settings: "helion.Settings | None", op_name: str
) -> None:
if helion_settings is None:
return
settings_dict = helion_settings.to_dict()
if (
"autotuner_fn" in settings_dict
and settings_dict["autotuner_fn"] is not None
and settings_dict["autotuner_fn"] is not default_autotuner_fn
):
raise ValueError(
f"HelionKernelWrapper for '{op_name}' uses a custom autotuner via "
f"config picker. Remove 'autotuner_fn' from helion_settings and use "
f"@{op_name}.register_config_picker instead."
)
# Warn if static_shapes is explicitly set to True since most vLLM ops need
# dynamic shapes for variable batch sizes and sequence lengths
if settings_dict.get("static_shapes") is True:
logger.warning(
"Kernel '%s' has static_shapes=True in helion_settings. "
"Most vLLM ops require dynamic shapes for variable batch sizes "
"and sequence lengths. Consider removing this setting.",
op_name,
)
def create_helion_decorated_kernel(
raw_kernel_func: Callable,
helion_settings: "helion.Settings | None" = None,
extra_kwargs: dict[str, Any] | None = None,
) -> Any:
kernel_kwargs: dict[str, Any] = {}
if helion_settings:
kernel_kwargs.update(helion_settings.to_dict())
# Set static_shapes=False by default if user didn't explicitly set it
# This is needed for dynamic batch sizes and sequence lengths in vLLM
if kernel_kwargs.get("static_shapes") is not True:
kernel_kwargs["static_shapes"] = False
if extra_kwargs:
kernel_kwargs.update(extra_kwargs)
return helion.kernel(**kernel_kwargs)(raw_kernel_func)
class PresetConfigSearch(BaseAutotuner):
"""Custom autotuner that uses a preset config selector instead of autotuning."""
def __init__(
self,
args: tuple[Any, ...],
config_selector: Callable[[tuple[Any, ...]], Config],
):
self.args = args
self.config_selector = config_selector
def autotune(self, *, skip_cache: bool = False) -> Config:
return self.config_selector(self.args)
class ConfiguredHelionKernel:
"""A configured Helion kernel bound to a specific platform."""
def __init__(
self,
op_name: str,
config_picker: Callable[[tuple[Any, ...], list[str]], str | None] | None,
raw_kernel_func: Callable,
helion_settings: "helion.Settings | None" = None,
):
self.op_name = op_name
self.config_picker = config_picker
self.raw_kernel_func = raw_kernel_func
self.helion_settings = helion_settings
self._decorated_kernel = self._create_decorated_kernel()
def __call__(self, *args, **kwargs):
return self._decorated_kernel(*args, **kwargs)
def _create_key_computer(self):
"""
Create a key computer function derived from the config picker.
The returned function receives kernel arguments unpacked (*args) to match
Helion's key signature (called as self._key_fn(*args)).
"""
if self.config_picker is None:
raise RuntimeError(
f"No config picker registered for kernel '{self.op_name}'. "
f"Use @{self.op_name}.register_config_picker to register one."
)
# After None check, config_picker is guaranteed to be non-None
assert self.config_picker is not None
def key_computer(*args):
config_keys = list(self.configs.keys())
# Cast is safe because we checked for None above
config_picker = cast(
Callable[[tuple[Any, ...], list[str]], str | None], self.config_picker
)
selected_key = config_picker(args, config_keys)
if selected_key:
return selected_key
return "default" if "default" in self.configs else None
return key_computer
def _create_config_selector(self, key_computer):
def config_selector(args):
# args is a tuple; key_computer expects unpacked args
selected_config_key = key_computer(*args)
if selected_config_key is None:
raise ValueError(
f"Config picker returned None for kernel '{self.op_name}' "
f"with available config keys: {list(self.configs.keys())}"
)
if selected_config_key not in self.configs:
raise ValueError(
f"Config picker returned invalid config key "
f"'{selected_config_key}' for kernel '{self.op_name}'. "
f"Available keys: {list(self.configs.keys())}"
)
return self.configs[selected_config_key]
return config_selector
def _load_platform_configs(self) -> None:
from vllm.kernels.helion.config_manager import ConfigManager
from vllm.kernels.helion.utils import get_canonical_gpu_name
self.platform = get_canonical_gpu_name()
config_manager = ConfigManager.get_instance()
self.configs = config_manager.get_platform_configs(self.op_name, self.platform)
if not self.configs:
raise ValueError(
f"No configs available for kernel '{self.op_name}' "
f"on platform '{self.platform}'"
)
def _create_decorated_kernel(self) -> Callable[..., Any]:
self._load_platform_configs()
key_computer = self._create_key_computer()
config_selector = self._create_config_selector(key_computer)
extra_kwargs = {
"autotuner_fn": lambda _, args: PresetConfigSearch(args, config_selector),
"key": key_computer,
}
logger.debug(
"Creating decorated kernel %s with custom autotuner on platform %s",
self.op_name,
self.platform,
)
return create_helion_decorated_kernel(
self.raw_kernel_func, self.helion_settings, extra_kwargs
)
class HelionKernelWrapper:
"""Wrapper for Helion kernels with pre-tuned config selection and HOP support."""
def __init__(
self,
raw_kernel_func: Callable,
op_name: str,
fake_impl: Callable,
helion_settings: "helion.Settings | None" = None,
):
# Validate helion_settings doesn't conflict with our custom autotuner
validate_helion_settings(helion_settings, op_name)
self.raw_kernel_func = raw_kernel_func
self.op_name = op_name
self._fake_impl = fake_impl
self.helion_settings = helion_settings
self._config_picker: (
Callable[[tuple[Any, ...], list[str]], str | None] | None
) = None
self._configured_kernel: ConfiguredHelionKernel | None = None
self._input_generator: Callable[[], dict[str, tuple[Any, ...]]] | None = None
def __call__(self, *args, **kwargs):
# CustomOp fallback: register as torch custom op for torch.compile
# compatibility on older PyTorch lacking HOP/EffectType support
if not _HOP_AVAILABLE:
custom_op = self._get_or_register_custom_op()
return custom_op(*args, **kwargs)
# HOP tracing: record HigherOrderOp in the FX graph
if get_proxy_mode() is not None:
return self._call_via_hop(args, kwargs)
# Eager: run the configured kernel directly
return self.get_configured_op()(*args, **kwargs)
def _call_via_hop(
self,
args: tuple[Any, ...],
kwargs: dict[str, Any],
) -> Any:
kernel = self.get_configured_op()._decorated_kernel
kernel_idx = helion_kernel_side_table.add_kernel(kernel)
constant_args, tensor_args = self._partition_args(kernel, args, kwargs)
all_named = {**constant_args, **tensor_args}
full_args = tuple(
all_named.get(n, p.default)
for n, p in kernel.signature.parameters.items() # type: ignore[attr-defined]
if n in all_named or p.default is not p.empty
)
with disable_proxy_modes_tracing():
output_spec = infer_output_spec(kernel, full_args)
hop_result = helion_kernel_wrapper_mutation(
kernel_idx=kernel_idx,
constant_args=constant_args,
tensor_args=tensor_args,
output_spec=output_spec,
)
tree_spec_str = output_spec.get("tree_spec_str")
if tree_spec_str is None:
return None
tree_spec = pytree.treespec_loads(tree_spec_str)
hop_iter = iter(hop_result)
reconstructed = []
for spec in output_spec["leaf_specs"]:
is_constant_scalar = spec["type"] == "scalar" and not isinstance(
spec.get("scalar_value"), torch.SymInt
)
if is_constant_scalar:
reconstructed.append(spec["scalar_value"])
else:
reconstructed.append(next(hop_iter))
return pytree.tree_unflatten(reconstructed, tree_spec)
@staticmethod
def _partition_args(
kernel: Any,
args: tuple[Any, ...],
kwargs: dict[str, Any],
) -> tuple[dict[str, Any], dict[str, Any]]:
constant_args: dict[str, Any] = {}
tensor_args: dict[str, Any] = {}
params = list(kernel.signature.parameters.keys())
for i, val in enumerate(args):
name = params[i]
if isinstance(val, torch.Tensor):
tensor_args[name] = val
else:
constant_args[name] = val
for name, val in kwargs.items():
if isinstance(val, torch.Tensor):
tensor_args[name] = val
else:
constant_args[name] = val
return constant_args, tensor_args
def register_config_picker(
self, picker_func: Callable[[tuple[Any, ...], list[str]], str | None]
) -> Callable[[tuple[Any, ...], list[str]], str | None]:
self._config_picker = picker_func
return picker_func
def register_input_generator(
self, generator_func: Callable[[], dict[str, tuple[Any, ...]]]
) -> Callable[[], dict[str, tuple[Any, ...]]]:
"""
Register a function to generate inputs for autotuning and benchmarking.
Args:
generator_func: Function that returns dict[str, tuple] where:
- key: Configuration identifier (e.g., "4096", "hidden_4096")
- value: Tuple of arguments to pass to the kernel
Returns:
The registered function (for decorator usage)
Example:
@kernel_wrapper.register_input_generator
def generate_inputs():
return {
"4096": (torch.randn(4096, device="cuda"), 0.5),
"8192": (torch.randn(8192, device="cuda"), 0.5),
}
"""
self._input_generator = generator_func
return generator_func
def get_inputs(self) -> dict[str, tuple[Any, ...]]:
if self._input_generator is None:
raise NotImplementedError(
f"No input generator registered for kernel '{self.op_name}'. "
f"Use @{self.op_name}.register_input_generator to register one."
)
return self._input_generator()
def run_autotune(
self,
inputs: tuple[Any, ...],
autotune_effort: str = "quick",
) -> Config:
"""Run autotuning for a single input configuration."""
extra_kwargs = {"autotune_effort": autotune_effort}
autotune_kernel = create_helion_decorated_kernel(
self.raw_kernel_func, self.helion_settings, extra_kwargs
)
return autotune_kernel.autotune(inputs)
def get_configured_op(self) -> ConfiguredHelionKernel:
assert self._config_picker is not None, (
f"No config picker registered for kernel '{self.op_name}'. "
f"Use @{self.op_name}.register_config_picker to register one."
)
if self._configured_kernel is None:
self._configured_kernel = ConfiguredHelionKernel(
op_name=self.op_name,
config_picker=self._config_picker,
raw_kernel_func=self.raw_kernel_func,
helion_settings=self.helion_settings,
)
return self._configured_kernel
def _get_or_register_custom_op(self) -> Any:
if hasattr(torch.ops.vllm_helion, self.op_name):
return getattr(torch.ops.vllm_helion, self.op_name)
configured_kernel = self.get_configured_op()
logger.info("Registering op: vllm_helion::%s", self.op_name)
direct_register_custom_op(
op_name=self.op_name,
op_func=configured_kernel._decorated_kernel,
mutates_args=None,
fake_impl=self._fake_impl,
target_lib=vllm_helion_lib,
)
return getattr(torch.ops.vllm_helion, self.op_name)
# Global registry for tracking all registered HelionKernelWrapper instances
_REGISTERED_KERNELS: dict[str, HelionKernelWrapper] = {}
def get_registered_kernels() -> dict[str, HelionKernelWrapper]:
return _REGISTERED_KERNELS.copy()
def get_kernel_by_name(kernel_name: str) -> HelionKernelWrapper | None:
return _REGISTERED_KERNELS.get(kernel_name)
def infer_fake_impl(
kernel_func: Callable,
helion_settings: "helion.Settings | None" = None,
) -> Callable:
def helion_fake_kernel(*args, **kwargs):
kernel_kwargs = {}
if helion_settings:
kernel_kwargs.update(helion_settings.to_dict())
temp_decorated_kernel = helion.kernel(**kernel_kwargs)(kernel_func)
# Bind with args to get config_spec, then get a valid default config
bound = temp_decorated_kernel.bind(args)
default_config = bound.config_spec.default_config()
compiled_runner = bound.compile_config(default_config)
return compiled_runner(*args, **kwargs, _launcher=lambda *a, **kw: None)
return helion_fake_kernel
# Overloads are necessary for proper mypy type inference.
# Without overloads, the union return type HelionKernelWrapper | Callable[...]
# causes mypy to complain about missing attributes when tests do:
# wrapper = register_kernel(func) # Should return HelionKernelWrapper
# wrapper._fake_impl # mypy error: "Callable has no attribute _fake_impl"
# The overloads tell mypy the exact return type based on the argument pattern.
@overload
def register_kernel(
op_name_or_func: Callable,
*,
fake_impl: Callable | None = None,
helion_settings: "helion.Settings | None" = None,
) -> HelionKernelWrapper: ...
@overload
def register_kernel(
op_name_or_func: str | None = None,
*,
fake_impl: Callable | None = None,
helion_settings: "helion.Settings | None" = None,
) -> Callable[[Callable], HelionKernelWrapper]: ...
def register_kernel(
op_name_or_func: str | Callable | None = None,
*,
fake_impl: Callable | None = None,
helion_settings: "helion.Settings | None" = None,
) -> HelionKernelWrapper | Callable[[Callable], HelionKernelWrapper]:
"""
Decorator to register a Helion kernel function as a HelionKernelWrapper.
Wraps the raw kernel function in a HelionKernelWrapper and registers it
in the global kernel registry. Auto-generates fake_impl if not provided.
"""
def decorator(kernel_func: Callable) -> HelionKernelWrapper:
op_name = op_name_or_func if isinstance(op_name_or_func, str) else None
final_op_name = op_name if op_name else kernel_func.__name__
if final_op_name in _REGISTERED_KERNELS:
raise ValueError(
f"Helion kernel '{final_op_name}' is already registered. "
f"Use a different op_name or check for duplicate registrations."
)
final_fake_impl = fake_impl
if final_fake_impl is None:
final_fake_impl = infer_fake_impl(kernel_func, helion_settings)
logger.debug(
"Auto-generated fake_impl for Helion kernel '%s'",
kernel_func.__name__,
)
kernel_wrapper = HelionKernelWrapper(
raw_kernel_func=kernel_func,
op_name=final_op_name,
fake_impl=final_fake_impl,
helion_settings=helion_settings,
)
_REGISTERED_KERNELS[final_op_name] = kernel_wrapper
logger.info(
"Registered Helion kernel '%s' as HelionKernelWrapper",
kernel_func.__name__,
)
return kernel_wrapper
if callable(op_name_or_func) and not isinstance(op_name_or_func, str):
# Bare decorator usage: @register_kernel
return decorator(op_name_or_func)
else:
# Decorator with arguments: @register_kernel(...)
return decorator
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/kernels/helion/register.py",
"license": "Apache License 2.0",
"lines": 442,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/kernels/helion/utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Utility functions for Helion kernel management."""
import logging
from vllm.platforms import current_platform
logger = logging.getLogger(__name__)
# Maps known variant GPU names (after lowercase/underscore normalization)
# to their canonical form.
#
# Names that are already canonical after normalization are NOT listed here.
# For example, "NVIDIA H200" normalizes to "nvidia_h200" which needs no
# further mapping, and AMD ROCm names like "AMD_Instinct_MI300X" come from
# a controlled lookup table in rocm.py and normalize cleanly to
# "amd_instinct_mi300x". Only names with variant suffixes (form factor,
# memory size, memory type, etc.) that should be stripped need entries.
#
# To add a new GPU variant: run `canonicalize_gpu_name()` without the alias
# to see the normalized name, then add a mapping here if it contains variant
# suffixes that should be stripped (e.g. Blackwell/Rubin variants).
_GPU_NAME_ALIASES: dict[str, str] = {
# H100 variants
"nvidia_h100_pcie": "nvidia_h100",
"nvidia_h100_sxm5": "nvidia_h100",
"nvidia_h100_80gb_hbm3": "nvidia_h100",
"nvidia_h100_nvl": "nvidia_h100",
# H200 variants
"nvidia_h200_nvl": "nvidia_h200",
"nvidia_h200_141gb_hbm3e": "nvidia_h200",
# A100 variants
"nvidia_a100_sxm4_80gb": "nvidia_a100",
"nvidia_a100_sxm4_40gb": "nvidia_a100",
"nvidia_a100_pcie_80gb": "nvidia_a100",
"nvidia_a100_pcie_40gb": "nvidia_a100",
"nvidia_a100_80gb_pcie": "nvidia_a100",
# V100 variants (Tesla-branded)
"tesla_v100_sxm2_32gb": "tesla_v100",
"tesla_v100_sxm2_16gb": "tesla_v100",
"tesla_v100_pcie_32gb": "tesla_v100",
"tesla_v100_pcie_16gb": "tesla_v100",
# AMD ROCm variants (from _ROCM_DEVICE_ID_NAME_MAP in rocm.py)
"amd_instinct_mi300x_hf": "amd_instinct_mi300x",
# ADD MORE HERE
}
def get_gpu_name(device_id: int | None = None) -> str:
if device_id is None:
logger.warning(
"get_gpu_name() called without device_id, defaulting to 0. "
"This may return the wrong device name in multi-node setups."
)
device_id = 0
return current_platform.get_device_name(device_id)
def canonicalize_gpu_name(name: str) -> str:
"""
Canonicalize GPU name for use as a platform identifier.
Converts to lowercase, replaces spaces and hyphens with underscores,
and maps known variant names to their canonical form via _GPU_NAME_ALIASES.
e.g., "NVIDIA H100 80GB HBM3" -> "nvidia_h100"
"NVIDIA A100-SXM4-80GB" -> "nvidia_a100"
"AMD Instinct MI300X" -> "amd_instinct_mi300x"
"""
if not name or not name.strip():
raise ValueError("GPU name cannot be empty")
name = name.lower()
name = name.replace(" ", "_")
name = name.replace("-", "_")
if name in _GPU_NAME_ALIASES:
return _GPU_NAME_ALIASES[name]
return name
def get_canonical_gpu_name(device_id: int | None = None) -> str:
return canonicalize_gpu_name(get_gpu_name(device_id))
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/kernels/helion/utils.py",
"license": "Apache License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/quantization/utils/nvfp4_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from enum import Enum
import torch
import vllm.envs as envs
from vllm._custom_ops import (
cutlass_scaled_fp4_mm,
cutlass_scaled_mm_supports_fp4,
scaled_fp4_quant,
)
from vllm.logger import init_logger
from vllm.model_executor.layers.quantization.utils.marlin_utils_fp4 import (
apply_fp4_marlin_linear,
is_fp4_marlin_supported,
prepare_fp4_layer_for_marlin,
)
from vllm.model_executor.layers.quantization.utils.nvfp4_emulation_utils import (
run_nvfp4_emulations,
)
from vllm.platforms import current_platform
from vllm.utils.flashinfer import flashinfer_scaled_fp4_mm, has_flashinfer
from vllm.utils.math_utils import round_up
logger = init_logger(__name__)
class NvFp4LinearBackend(Enum):
VLLM_CUTLASS = "cutlass"
FLASHINFER_CUTLASS = "flashinfer-cutlass"
FLASHINFER_TRTLLM = "flashinfer-trtllm"
FLASHINFER_CUDNN = "flashinfer-cudnn"
FBGEMM = "fbgemm"
MARLIN = "marlin"
EMULATION = "emulation"
def select_nvfp4_linear_backend() -> NvFp4LinearBackend:
"""
Select the best available NVFP4 GEMM backend based on environment
configuration and platform capabilities.
"""
backend: NvFp4LinearBackend | None = None
if envs.VLLM_USE_FBGEMM:
try:
import fbgemm_gpu # noqa: F401
except ImportError as exc:
raise ImportError(
"Backend fbgemm requires fbgemm.f4f4bf16 operator, "
"Please install with: pip install fbgemm-gpu-genai"
) from exc
backend = NvFp4LinearBackend.FBGEMM
elif envs.VLLM_USE_NVFP4_CT_EMULATIONS:
backend = NvFp4LinearBackend.EMULATION
elif envs.VLLM_NVFP4_GEMM_BACKEND is None:
# Auto-select best available backend
if current_platform.has_device_capability(100) and has_flashinfer():
backend = NvFp4LinearBackend.FLASHINFER_CUTLASS
elif cutlass_fp4_supported():
backend = NvFp4LinearBackend.VLLM_CUTLASS
elif is_fp4_marlin_supported():
backend = NvFp4LinearBackend.MARLIN
else:
backend = NvFp4LinearBackend(envs.VLLM_NVFP4_GEMM_BACKEND)
# Validate that the backend is supported
if backend in (
NvFp4LinearBackend.FLASHINFER_CUTLASS,
NvFp4LinearBackend.FLASHINFER_TRTLLM,
NvFp4LinearBackend.FLASHINFER_CUDNN,
):
assert has_flashinfer(), f"FlashInfer is required for {backend}"
elif backend == NvFp4LinearBackend.VLLM_CUTLASS:
assert cutlass_fp4_supported(), f"Cutlass is required for {backend}"
elif backend == NvFp4LinearBackend.MARLIN:
assert is_fp4_marlin_supported(), f"Marlin is required for {backend}"
elif backend is None:
raise ValueError(
f"No NVFP4 GEMM backend selected, "
f"available backends: {list(NvFp4LinearBackend)}"
)
logger.info_once(f"Using {backend} for NVFP4 GEMM")
return backend
def prepare_weights_for_nvfp4_flashinfer_trtllm(
weight: torch.Tensor,
weight_scale: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor]:
"""Prepare weights and scales for FlashInfer TRTLLM FP4 GEMM."""
from flashinfer import shuffle_matrix_a, shuffle_matrix_sf_a
epilogue_tile_m = 128
shuffled_weight = shuffle_matrix_a(weight.view(torch.uint8), epilogue_tile_m)
shuffled_weight_scale = (
shuffle_matrix_sf_a(weight_scale.view(torch.uint8), epilogue_tile_m)
.reshape(weight_scale.shape)
.view(torch.float8_e4m3fn)
)
return shuffled_weight, shuffled_weight_scale
def prepare_weights_for_nvfp4_cutlass(
weight: torch.Tensor,
weight_scale: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor, int]:
"""
Prepare weights and scales for CUTLASS/FlashInfer-CUTLASS FP4 GEMM.
This involves padding weights for alignment (K and N divisible by 32)
"""
swizzled_weight_scale = swizzle_blockscale(weight_scale)
padded_weight, weights_padding_cols = pad_nvfp4_weight_for_cutlass(weight)
return padded_weight, swizzled_weight_scale, weights_padding_cols
def prepare_weights_for_nvfp4_fbgemm(
weight: torch.Tensor,
weight_scale: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor]:
"""Prepare weights and scales for FBGEMM FP4 GEMM."""
swizzled_weight_scale = swizzle_blockscale(weight_scale)
swizzled_weight_scale = swizzled_weight_scale.view(-1).view(torch.uint8)
return weight, swizzled_weight_scale
def convert_to_nvfp4_linear_kernel_format(
backend: NvFp4LinearBackend,
layer: torch.nn.Module,
) -> None:
"""Convert layer to NVFP4 linear kernel format."""
assert layer.weight_scale.dtype == torch.float8_e4m3fn, (
"Weight Block scale must be represented as FP8-E4M3"
)
# Default to no padding
layer.weights_padding_cols = 0
if backend == NvFp4LinearBackend.MARLIN:
prepare_fp4_layer_for_marlin(layer)
elif backend == NvFp4LinearBackend.FLASHINFER_TRTLLM:
weight, weight_scale = prepare_weights_for_nvfp4_flashinfer_trtllm(
layer.weight.data, layer.weight_scale.data
)
layer.weight = torch.nn.Parameter(weight, requires_grad=False)
layer.weight_scale = torch.nn.Parameter(weight_scale, requires_grad=False)
elif backend == NvFp4LinearBackend.FBGEMM:
weight, weight_scale = prepare_weights_for_nvfp4_fbgemm(
layer.weight.data, layer.weight_scale.data
)
layer.weight = torch.nn.Parameter(weight, requires_grad=False)
layer.weight_scale = torch.nn.Parameter(weight_scale, requires_grad=False)
elif backend in (
NvFp4LinearBackend.VLLM_CUTLASS,
NvFp4LinearBackend.FLASHINFER_CUTLASS,
NvFp4LinearBackend.FLASHINFER_CUDNN,
):
weight, weight_scale, weights_padding_cols = prepare_weights_for_nvfp4_cutlass(
layer.weight.data, layer.weight_scale.data
)
layer.weight = torch.nn.Parameter(weight, requires_grad=False)
layer.weight_scale = torch.nn.Parameter(weight_scale, requires_grad=False)
layer.weights_padding_cols = weights_padding_cols
def apply_nvfp4_linear(
backend: NvFp4LinearBackend,
layer: torch.nn.Module,
x: torch.Tensor,
bias: torch.Tensor | None = None,
) -> torch.Tensor:
"""
Apply NVFP4 linear transformation using the specified backend.
"""
weight = layer.weight
weight_scale = layer.weight_scale
weight_global_scale = layer.weight_global_scale
input_global_scale_inv = layer.input_global_scale_inv
alpha = layer.alpha
output_size = layer.output_size_per_partition
input_size = layer.input_size_per_partition
if backend == NvFp4LinearBackend.MARLIN:
return apply_fp4_marlin_linear(
input=x,
weight=weight,
weight_scale=weight_scale,
weight_global_scale=weight_global_scale,
workspace=layer.workspace,
size_n=output_size,
size_k=input_size,
bias=bias,
)
elif backend == NvFp4LinearBackend.EMULATION:
out = run_nvfp4_emulations(
x=x,
input_global_scale=input_global_scale_inv,
weight=weight,
weight_scale_swizzled=weight_scale,
weight_global_scale=weight_global_scale,
)
if bias is not None:
out = out + bias
return out
output_dtype = x.dtype
output_shape = [*x.shape[:-1], output_size]
# Quantize BF16 or FP16 to (FP4 and interleaved block scale)
x_fp4, x_blockscale = scaled_fp4_quant(
x, input_global_scale_inv, is_sf_swizzled_layout=True, backend=backend.value
)
# Validate dtypes
assert x_fp4.dtype == torch.uint8
assert weight.dtype == torch.uint8
assert x_blockscale.dtype == torch.float8_e4m3fn
# weight_scale is fp8 for most backends, but uint8 for fbgemm
assert weight_scale.dtype in (torch.float8_e4m3fn, torch.uint8)
assert alpha.dtype == torch.float32
# Pad activations to match weight K-dimension padding
weights_padding_cols = getattr(layer, "weights_padding_cols", 0)
x_fp4 = pad_nvfp4_activation_for_cutlass(x_fp4, weights_padding_cols)
# Prepare args for the matmul
mm_args = (
x_fp4,
weight,
x_blockscale,
weight_scale,
alpha,
output_dtype,
)
# Call the appropriate backend
if backend.value.startswith("flashinfer-"):
backend_name = backend.value[len("flashinfer-") :]
out = flashinfer_scaled_fp4_mm(*mm_args, backend=backend_name)
elif backend == NvFp4LinearBackend.FBGEMM:
out = torch.ops.fbgemm.f4f4bf16(
x_fp4,
weight,
x_blockscale.view(-1).view(torch.uint8),
weight_scale,
alpha,
use_mx=False,
).to(output_dtype)
else:
assert backend == NvFp4LinearBackend.VLLM_CUTLASS
out = cutlass_scaled_fp4_mm(*mm_args)
# Slice output to remove N-dimension padding
out = slice_nvfp4_output(out, output_size)
if bias is not None:
out = out + bias
return out.view(*output_shape)
def swizzle_blockscale(scale: torch.Tensor) -> torch.Tensor:
"""
Pad and block-interleave the FP4 block-scales so that they match the data
layout expected by the CUTLASS / FlashInfer kernels.
Parameters
----------
scale: torch.Tensor
Returns
-------
torch.Tensor
The swizzled tensor with the same logical shape as *scale*.
"""
assert scale.dtype == torch.float8_e4m3fn, (
"swizzle_blockscale expects the input tensor to be in "
"torch.float8_e4m3fn format."
)
scale_ndim = scale.ndim
if scale_ndim == 2:
scale = scale.unsqueeze(0) # (1, M, K)
assert scale.ndim == 3, "Expected a 2-D or 3-D tensor for block scales."
B, M, K = scale.shape
M_padded = round_up(M, 128)
K_padded = round_up(K, 4)
padded = torch.zeros(
(B, M_padded, K_padded), dtype=scale.dtype, device=scale.device
)
padded[:B, :M, :K] = scale
# Reshape / permute to the layout required by the kernel.
padded = padded.reshape(B, M_padded // 128, 4, 32, K_padded // 4, 4)
swizzled = padded.permute(0, 1, 4, 3, 2, 5).contiguous().cuda()
if scale_ndim == 2:
return swizzled.reshape(M_padded, K_padded)
return swizzled.reshape(B, M_padded, K_padded)
def cutlass_fp4_supported() -> bool:
if not current_platform.is_cuda():
return False
capability_tuple = current_platform.get_device_capability()
capability = -1 if capability_tuple is None else capability_tuple.to_int()
return cutlass_scaled_mm_supports_fp4(capability)
def pad_nvfp4_weight_for_cutlass(
weight: torch.Tensor,
alignment: int = 32,
) -> tuple[torch.Tensor, int]:
"""
Pad packed NVFP4 weights so that both N (rows) and K (columns) satisfy
the alignment constraints required by CUTLASS / FlashInfer FP4 kernels.
CUTLASS FP4 kernel requires both K and N matrix dimensions to be divisible
by 32 for aligned memory access and efficient tensor core operations.
"""
weight_current_rows = weight.shape[0]
# Pad N dimension (rows) if not aligned
if weight_current_rows % alignment != 0:
total_rows = round_up(weight_current_rows, alignment)
pad_rows = total_rows - weight_current_rows
weight = torch.nn.functional.pad(weight, (0, 0, 0, pad_rows)).contiguous()
# Check K dimension alignment
# 2 FP4 items are packed per byte in the input dimension
weight_current_col_bytes = weight.shape[1]
weight_current_col_elements = weight_current_col_bytes * 2
weights_padding_bytes = 0
if weight_current_col_elements % alignment != 0:
total_cols = round_up(weight_current_col_elements, alignment)
pad_cols = total_cols - weight_current_col_elements
# Convert from FP4 element count to bytes (2 FP4 values per byte)
# pad_cols is always even since alignment=32 and current elements are even
pad_bytes = pad_cols // 2
weight = torch.nn.functional.pad(weight, (0, pad_bytes, 0, 0)).contiguous()
weights_padding_bytes = pad_bytes
return weight, weights_padding_bytes
def pad_nvfp4_activation_for_cutlass(
x_fp4: torch.Tensor,
weights_padding_bytes: int,
) -> torch.Tensor:
"""
Pad packed FP4 activations to match the K-dimension padding applied to weights.
The padding is in bytes (tensor dimension), not FP4 elements.
"""
if weights_padding_bytes > 0:
return torch.nn.functional.pad(x_fp4, (0, weights_padding_bytes)).contiguous()
return x_fp4
def slice_nvfp4_output(
out: torch.Tensor,
output_size: int,
) -> torch.Tensor:
"""
Slice the output tensor to remove padding in N dimension if weight was padded.
"""
if out.shape[-1] != output_size:
return out[..., :output_size].contiguous()
return out
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/quantization/utils/nvfp4_utils.py",
"license": "Apache License 2.0",
"lines": 318,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/kernels/moe/test_marlin_vs_trtllm_mxint4.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Test comparing Marlin INT4 MoE vs FlashInfer TRT-LLM MXINT4 MoE."""
import pytest
import torch
from vllm.model_executor.layers.fused_moe.fused_marlin_moe import (
fused_marlin_moe,
)
from vllm.model_executor.layers.fused_moe.router.grouped_topk_router import (
grouped_topk,
)
from vllm.model_executor.layers.quantization.utils.flashinfer_mxint4_moe import (
prepare_static_weights_for_trtllm_mxint4_moe,
)
from vllm.platforms import current_platform
from vllm.scalar_type import scalar_types
def mxint4_quantize(
x: torch.Tensor, sf_vec_size: int = 32
) -> tuple[torch.Tensor, torch.Tensor]:
"""Quantize BF16 tensor to MXINT4 with block scaling (group_size=sf_vec_size).
Returns:
- uint8 packed (2 INT4/byte): [..., k//2] - stores SIGNED INT4 [-8, 7]
- scales in BF16: [..., k//sf_vec_size]
"""
x_reshaped = x.reshape(-1, sf_vec_size)
x_max = x_reshaped.max(dim=-1, keepdim=True)[0].to(torch.float32)
x_min = x_reshaped.min(dim=-1, keepdim=True)[0].to(torch.float32)
x_max = x_max * 8.0 / 7.0
amax = torch.where(x_max > -x_min, x_max, -x_min)
scales = amax / 8.0
x_scaled = x_reshaped * scales.reciprocal()
x_int8 = (
x_scaled.round().clamp(-8, 7).to(torch.int8).reshape(-1, sf_vec_size // 2, 2)
)
x_int4 = (x_int8[..., 0] & 0x0F) | ((x_int8[..., 1] & 0x0F) << 4)
return (
x_int4.to(torch.uint8).reshape(*x.shape[:-1], x.shape[-1] // 2),
scales.to(x.dtype).reshape(*x.shape[:-1], x.shape[-1] // sf_vec_size),
)
def mxint4_quantize_moe_weights(
weights_bf16: torch.Tensor, group_size: int = 32
) -> tuple[torch.Tensor, torch.Tensor]:
"""Quantize MoE weights [e, n, k] to MxInt4 format.
Args:
weights_bf16: BF16 weights of shape [num_experts, out_features, in_features]
group_size: Quantization group size (default: 32)
Returns:
- weights_mxint4: Quantized weights [e, n, k//2] uint8
- scales_mxint4: Quantization scales [e, n, k//group_size] bf16
"""
e = weights_bf16.shape[0]
weight_list = []
scale_list = []
for i in range(e):
w_q, w_s = mxint4_quantize(weights_bf16[i], sf_vec_size=group_size)
weight_list.append(w_q)
scale_list.append(w_s)
return torch.stack(weight_list), torch.stack(scale_list)
__all__ = [
"mxint4_quantize",
"mxint4_quantize_moe_weights",
"marlin_quantize_moe_weights",
]
def marlin_quantize_moe_weights(
weights_bf16: torch.Tensor, group_size: int = 32
) -> tuple[torch.Tensor, torch.Tensor]:
"""Quantize MoE weights [e, n, k] to Marlin INT4 format.
Args:
weights_bf16: BF16 weights of shape [num_experts, out_features, in_features]
group_size: Quantization group size (default: 32)
Returns:
- weights_marlin: Marlin quantized weights [e, k//8, n] int32
- scales_marlin: Marlin quantization scales [e, k//group_size, n] bf16
"""
from vllm.model_executor.layers.quantization.utils.marlin_utils_test import (
marlin_quantize,
)
e, n, k = weights_bf16.shape
weight_list = []
scale_list = []
for i in range(e):
# Transpose for Marlin: [n, k] → [k, n]
w_t = weights_bf16[i].T.contiguous()
_, w_q, w_s, _, _, _ = marlin_quantize(
w_t, scalar_types.uint4b8, group_size, act_order=False
)
weight_list.append(w_q)
scale_list.append(w_s)
# Stack to get [e, ...] shape
weights_marlin = torch.stack(weight_list) # [e, k // 8, n]
scales_marlin = torch.stack(scale_list) # [e, k // group_size, n]
return weights_marlin, scales_marlin
TRTLLM_GEN_AVAILABLE = (
current_platform.is_cuda() and current_platform.is_device_capability_family(100)
)
@pytest.mark.skipif(not TRTLLM_GEN_AVAILABLE, reason="Skip for non SM100")
@pytest.mark.parametrize("m", [1, 33])
@pytest.mark.parametrize("n", [7168])
@pytest.mark.parametrize("k", [512])
@pytest.mark.parametrize("e", [384])
@pytest.mark.parametrize("topk", [8])
@pytest.mark.parametrize("group_size", [32])
def test_marlin_vs_trtllm_mxint4_moe_kimik2(monkeypatch, m, n, k, e, topk, group_size):
"""Compare Marlin INT4 MoE vs FlashInfer TRT-LLM MXINT4 MoE.
Uses mxint4_quantize() to generate common INT4 weights + BF16 scales,
then runs both Marlin and TRT-LLM kernels and compares outputs.
"""
pytest.importorskip("flashinfer")
monkeypatch.setenv("VLLM_USE_FLASHINFER_MOE_INT4", "1")
torch.cuda.manual_seed(0)
dtype = torch.bfloat16
# DeepSeekV3 routing config (from Kimi-K2-Thinking config.json)
n_group = 1 # n_group from model config
topk_group = 1 # topk_group from model config
routed_scaling = 2.827 # routed_scaling_factor from model config
# Input - realistic activation range for LLM (after LayerNorm: mean~0, std~1)
a = torch.randn((m, k), device="cuda", dtype=dtype) * 0.5
# Generate routing logits and bias (DeepSeekV3 expects float logits)
# Realistic ranges: logits typically [-3, 3], bias [-2, 2]
routing_logits = torch.randn((m, e), device="cuda", dtype=torch.float32) * 1.5
routing_bias = torch.randn(e, device="cuda", dtype=torch.float32) * 0.8
# 1. Generate BF16 weights (SHARED between both paths)
# Realistic weight initialization: Xavier/Glorot uniform scaling
# std = sqrt(2 / (fan_in + fan_out))
std_w1 = (2.0 / (k + 2 * n)) ** 0.5
std_w2 = (2.0 / (n + k)) ** 0.5
w1_bf16 = torch.randn((e, 2 * n, k), device="cuda", dtype=dtype) * std_w1
w2_bf16 = torch.randn((e, k, n), device="cuda", dtype=dtype) * std_w2
# === Path 1: TRT-LLM FlashInfer MXINT4 MoE ===
# Similar to: if self.use_flashinfer_mxint4_moe
# Quantize using MXINT4 method (signed INT4)
w1_int4, w1_scales = mxint4_quantize_moe_weights(w1_bf16, group_size)
w2_int4, w2_scales = mxint4_quantize_moe_weights(w2_bf16, group_size)
trtllm_weights = prepare_static_weights_for_trtllm_mxint4_moe(
gemm1_weights=w1_int4,
gemm1_scales=w1_scales,
gemm2_weights=w2_int4,
gemm2_scales=w2_scales,
)
from flashinfer import RoutingMethodType
from flashinfer.fused_moe import trtllm_mxint4_block_scale_moe
# Routing handled internally by trtllm_mxint4_block_scale_moe
trtllm_output = trtllm_mxint4_block_scale_moe(
routing_logits=routing_logits,
routing_bias=routing_bias.to(torch.bfloat16),
hidden_states=a,
gemm1_weights=trtllm_weights["gemm1_weights"],
gemm1_weights_scale=trtllm_weights["gemm1_scales"],
gemm1_alpha=None,
gemm1_beta=None,
gemm1_clamp_limit=None,
gemm2_weights=trtllm_weights["gemm2_weights"],
gemm2_weights_scale=trtllm_weights["gemm2_scales"],
num_experts=e,
top_k=topk,
n_group=n_group,
topk_group=topk_group,
intermediate_size=n,
local_expert_offset=0,
local_num_experts=e,
routed_scaling_factor=routed_scaling,
routing_method_type=RoutingMethodType.DeepSeekV3,
enable_pdl=None,
output=None,
tune_max_num_tokens=8192,
).to(dtype)
# === Path 2: Marlin INT4 MoE ===
# Similar to: else (non-flashinfer path)
# Quantize using Marlin's method (UINT4b8)
w1_marlin, w1_scales_marlin = marlin_quantize_moe_weights(w1_bf16, group_size)
w2_marlin, w2_scales_marlin = marlin_quantize_moe_weights(w2_bf16, group_size)
# Use production routing kernel (same as router.select_experts internally uses)
topk_weights, topk_ids = grouped_topk(
hidden_states=a,
gating_output=routing_logits,
topk=topk,
renormalize=False, # DeepSeekV3 doesn't renormalize
num_expert_group=n_group,
topk_group=topk_group,
scoring_func="sigmoid", # DeepSeekV3 uses sigmoid
routed_scaling_factor=routed_scaling,
e_score_correction_bias=routing_bias,
)
marlin_output = fused_marlin_moe(
a,
w1_marlin,
w2_marlin,
None,
None,
w1_scales_marlin,
w2_scales_marlin,
None, # gating_output not needed when topk_weights/ids provided
topk_weights,
topk_ids,
global_num_experts=e,
expert_map=None,
global_scale1=None,
global_scale2=None,
g_idx1=None,
g_idx2=None,
input_global_scale1=None,
input_global_scale2=None,
sort_indices1=None,
sort_indices2=None,
w1_zeros=None,
w2_zeros=None,
input_dtype=dtype,
quant_type_id=scalar_types.uint4b8.id,
is_k_full=True,
)
# Sanity check: manually compute BF16 reference for comparison
# Use same routing as Marlin path for consistency
bf16_output = torch.zeros((m, k), device="cuda", dtype=dtype)
for token_idx in range(m):
for expert_rank in range(topk):
expert_id = topk_ids[token_idx, expert_rank].item()
weight = topk_weights[token_idx, expert_rank].item()
# w1: [2*n, k] @ [k] -> [2*n]
up_gate = a[token_idx] @ w1_bf16[expert_id].T # [2*n]
gate, up = up_gate.chunk(2, dim=0)
intermediate = torch.nn.functional.silu(gate) * up # [n]
# w2: [k, n] @ [n] -> [k]
expert_out = intermediate @ w2_bf16[expert_id].T # [k]
bf16_output[token_idx] += weight * expert_out
# Compare against BF16 reference.
torch.testing.assert_close(marlin_output, bf16_output, atol=0.3, rtol=1.0)
torch.testing.assert_close(trtllm_output, bf16_output, atol=0.3, rtol=1.0)
# Compare against each other for sanity.
# Note: Different quantization schemes (UINT4b8 vs signed MXINT4) cause
# some differences
torch.testing.assert_close(marlin_output, trtllm_output, atol=0.3, rtol=6.0)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/kernels/moe/test_marlin_vs_trtllm_mxint4.py",
"license": "Apache License 2.0",
"lines": 231,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_executor/layers/quantization/utils/flashinfer_mxint4_moe.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Utility helpers for MxInt4 + FlashInfer fused-MoE path"""
import functools
import torch
import vllm.envs as envs
from vllm.logger import init_logger
from vllm.platforms import current_platform
from vllm.utils.flashinfer import has_flashinfer_trtllm_fused_moe
__all__ = [
"prepare_static_weights_for_trtllm_mxint4_moe",
"flashinfer_trtllm_mxint4_moe",
"is_flashinfer_mxint4_moe_available",
]
logger = init_logger(__name__)
@functools.cache
def is_flashinfer_mxint4_moe_available() -> bool:
"""Return `True` when FlashInfer MxInt4 kernels can be used."""
return (
envs.VLLM_USE_FLASHINFER_MOE_INT4
and has_flashinfer_trtllm_fused_moe()
and current_platform.is_cuda()
and current_platform.is_device_capability_family(100)
)
def prepare_static_weights_for_trtllm_mxint4_moe(
gemm1_weights: torch.Tensor,
gemm1_scales: torch.Tensor,
gemm2_weights: torch.Tensor,
gemm2_scales: torch.Tensor,
) -> dict[str, torch.Tensor]:
"""
Prepare MxInt4 weights for TRT-LLM kernel.
Input:
gemm1_weights: [num_experts, 2*intermediate_size, hidden_size//8] int32
(checkpoint uint4b8 packed) or uint8 (already packed signed int4)
gemm1_scales: [num_experts, 2*intermediate_size, hidden_size//32] bf16
gemm2_weights: [num_experts, hidden_size, intermediate_size//8] int32
(checkpoint uint4b8 packed) or uint8 (already packed signed int4)
gemm2_scales: [num_experts, hidden_size, intermediate_size//32] bf16
Returns:
Dict with keys 'gemm1_weights', 'gemm1_scales', 'gemm2_weights',
'gemm2_scales' containing shuffled/packed tensors ready for kernel
"""
from flashinfer import block_scale_interleave
from flashinfer.fused_moe import (
convert_to_block_layout,
)
from flashinfer.fused_moe.core import (
_maybe_get_cached_w3_w1_permute_indices,
get_w2_permute_indices_with_cache,
)
from vllm.model_executor.layers.quantization.utils.flashinfer_fp4_moe import (
reorder_w1w3_to_w3w1,
)
from vllm.model_executor.layers.quantization.utils.quant_utils import (
convert_packed_uint4b8_to_signed_int4_inplace,
)
device = gemm1_weights.device
assert gemm1_weights.ndim == 3, (
f"Expected a 3D gemm1_weights tensor, got {gemm1_weights.shape}"
)
assert gemm1_scales.ndim == 3, (
f"Expected a 3D gemm1_scales tensor, got {gemm1_scales.shape}"
)
assert gemm2_weights.ndim == 3, (
f"Expected a 3D gemm2_weights tensor, got {gemm2_weights.shape}"
)
assert gemm2_scales.ndim == 3, (
f"Expected a 3D gemm2_scales tensor, got {gemm2_scales.shape}"
)
# Convert checkpoint format (uint4b8 in int32) to signed int4
# Checkpoint stores INT4 as unsigned [0, 15], kernel expects signed [-8, 7]
if gemm1_weights.dtype == torch.int32 and gemm2_weights.dtype == torch.int32:
convert_packed_uint4b8_to_signed_int4_inplace(gemm1_weights)
convert_packed_uint4b8_to_signed_int4_inplace(gemm2_weights)
gemm1_weights, gemm1_scales = reorder_w1w3_to_w3w1(
gemm1_weights, gemm1_scales, dim=-2
)
_cache_permute_indices: dict[torch.Size, torch.Tensor] = {}
num_experts = gemm1_weights.shape[0]
# Convert quantized weights to proper formats -
gemm1_weights_mxint4 = gemm1_weights.view(torch.uint8)
assert gemm1_scales.dtype == torch.bfloat16
gemm2_weights_mxint4 = gemm2_weights.view(torch.uint8)
assert gemm2_scales.dtype == torch.bfloat16
epilogue_tile_m = 128
gemm1_weights_mxint4_shuffled = []
gemm1_scales_shuffled = []
gemm2_weights_mxint4_shuffled = []
gemm2_scales_shuffled = []
for i in range(num_experts):
# Calculate the permute indices for the following:
# 1. Reorder rows of W1 and scales for fused gated activation
# 2. Shuffle weights and scaling factors for transposed mma output
# for both w3_w1 and w2 weights and scale factors
permute_indices = _maybe_get_cached_w3_w1_permute_indices(
_cache_permute_indices,
gemm1_weights_mxint4[i],
epilogue_tile_m,
)
gemm1_weights_shuffled = gemm1_weights_mxint4[i][
permute_indices.to(gemm1_weights.device)
].contiguous()
permute_sf_indices = _maybe_get_cached_w3_w1_permute_indices(
_cache_permute_indices,
gemm1_scales[i],
epilogue_tile_m,
num_elts_per_sf=32,
).to(device)
gemm1_scales_shuffled.append(
block_scale_interleave(gemm1_scales[i][permute_sf_indices].contiguous())
)
permute_indices = get_w2_permute_indices_with_cache(
_cache_permute_indices,
gemm2_weights_mxint4[i],
epilogue_tile_m,
)
gemm2_weights_shuffled = gemm2_weights_mxint4[i][
permute_indices.to(gemm2_weights.device)
].contiguous()
permute_sf_indices = get_w2_permute_indices_with_cache(
_cache_permute_indices,
gemm2_scales[i],
epilogue_tile_m,
num_elts_per_sf=16,
)
gemm2_scales_shuffled.append(
block_scale_interleave(
gemm2_scales[i][permute_sf_indices.to(gemm2_scales.device)].contiguous()
)
)
block_k = 128
gemm1_weights_shuffled = convert_to_block_layout(
gemm1_weights_shuffled.view(torch.uint8), block_k
)
gemm2_weights_shuffled = convert_to_block_layout(
gemm2_weights_shuffled.view(torch.uint8), block_k
)
gemm1_weights_mxint4_shuffled.append(gemm1_weights_shuffled)
gemm2_weights_mxint4_shuffled.append(gemm2_weights_shuffled)
gemm1_weights_mxint4_shuffled = torch.stack(gemm1_weights_mxint4_shuffled)
gemm2_weights_mxint4_shuffled = torch.stack(gemm2_weights_mxint4_shuffled)
gemm1_scales_shuffled = torch.stack(gemm1_scales_shuffled).view(torch.bfloat16)
gemm2_scales_shuffled = torch.stack(gemm2_scales_shuffled).view(torch.bfloat16)
return {
"gemm1_weights": gemm1_weights_mxint4_shuffled,
"gemm1_scales": gemm1_scales_shuffled,
"gemm2_weights": gemm2_weights_mxint4_shuffled,
"gemm2_scales": gemm2_scales_shuffled,
}
def flashinfer_trtllm_mxint4_moe(
x: torch.Tensor,
router_logits: torch.Tensor,
w13_weight_packed: torch.Tensor,
w13_weight_scale: torch.Tensor,
w2_weight_packed: torch.Tensor,
w2_weight_scale: torch.Tensor,
global_num_experts: int,
top_k: int,
intermediate_size_per_partition: int,
local_num_experts: int,
ep_rank: int = 0,
num_expert_group: int | None = None,
topk_group: int | None = None,
e_score_correction_bias: torch.Tensor | None = None,
routing_method_type: int | None = None,
) -> torch.Tensor:
"""
Apply FlashInfer TensorRT-LLM MxInt4 MoE kernel.
Args:
x: Input hidden states. dtype: bfloat16
router_logits: Router logits for expert selection. dtype: bfloat16/float32
w13_weight_packed: Packed gate+up weights. dtype: uint8
w13_weight_scale: Scales for gate+up weights. dtype: bfloat16
w2_weight_packed: Packed down weights. dtype: uint8
w2_weight_scale: Scales for down weights. dtype: bfloat16
global_num_experts: Total number of experts across all ranks
top_k: Number of experts to select per token
intermediate_size_per_partition: Intermediate size per partition
local_num_experts: Number of experts on this rank
ep_rank: Expert parallelism rank (default: 0)
num_expert_group: Number of expert groups (default: None -> 0)
topk_group: Top-k within groups (default: None -> 0)
e_score_correction_bias: Optional routing bias. dtype: bfloat16
routing_method_type: FlashInfer RoutingMethodType enum value
Returns:
Output tensor from MoE layer. dtype: same as x (bfloat16)
"""
from flashinfer import RoutingMethodType
from flashinfer.fused_moe import trtllm_mxint4_block_scale_moe
assert x.dtype == torch.bfloat16, f"x dtype must be bfloat16, got {x.dtype}"
assert w13_weight_packed.dtype == torch.uint8, (
f"w13_weight_packed dtype must be uint8, got {w13_weight_packed.dtype}"
)
assert w13_weight_scale.dtype == torch.bfloat16, (
f"w13_weight_scale dtype must be bfloat16, got {w13_weight_scale.dtype}"
)
assert w2_weight_packed.dtype == torch.uint8, (
f"w2_weight_packed dtype must be uint8, got {w2_weight_packed.dtype}"
)
assert w2_weight_scale.dtype == torch.bfloat16, (
f"w2_weight_scale dtype must be bfloat16, got {w2_weight_scale.dtype}"
)
routing_bias = None
if e_score_correction_bias is not None:
routing_bias = e_score_correction_bias.to(torch.bfloat16)
if routing_method_type == RoutingMethodType.DeepSeekV3:
router_logits = router_logits.to(torch.float32)
out = trtllm_mxint4_block_scale_moe(
routing_logits=router_logits,
routing_bias=routing_bias,
hidden_states=x,
gemm1_weights=w13_weight_packed.data,
gemm1_weights_scale=w13_weight_scale.data,
gemm1_alpha=None,
gemm1_beta=None,
gemm1_clamp_limit=None,
gemm2_weights=w2_weight_packed.data,
gemm2_weights_scale=w2_weight_scale.data,
num_experts=global_num_experts,
top_k=top_k,
n_group=num_expert_group if num_expert_group is not None else 0,
topk_group=topk_group if topk_group is not None else 0,
intermediate_size=intermediate_size_per_partition,
local_expert_offset=ep_rank * local_num_experts,
local_num_experts=local_num_experts,
routed_scaling_factor=None,
routing_method_type=routing_method_type,
enable_pdl=None,
output=None,
tune_max_num_tokens=8192,
).to(x.dtype)
return out
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/quantization/utils/flashinfer_mxint4_moe.py",
"license": "Apache License 2.0",
"lines": 233,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/kernels/helion/test_config_manager.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Unit tests for Helion ConfigManager and ConfigSet.
Tests the simplified configuration management system for Helion custom kernels.
"""
import json
import tempfile
from pathlib import Path
import pytest
from vllm.utils.import_utils import has_helion
# Skip entire module if helion is not available
if not has_helion():
pytest.skip(
"Helion is not installed. Install with: pip install vllm[helion]",
allow_module_level=True,
)
import helion
from vllm.kernels.helion.config_manager import (
ConfigManager,
ConfigSet,
)
@pytest.fixture(autouse=True)
def reset_config_manager_singleton():
"""Reset ConfigManager singleton before each test."""
ConfigManager.reset_instance()
yield
ConfigManager.reset_instance()
class TestConfigSet:
"""Test suite for ConfigSet class."""
def test_config_set_creation(self):
"""Test creating an empty ConfigSet."""
config_set = ConfigSet("test_kernel")
assert config_set.kernel_name == "test_kernel"
assert config_set.get_platforms() == []
def test_config_set_from_dict(self):
"""Test creating ConfigSet from dictionary data."""
# Use realistic config data that helion.Config can handle
config_data = {
"block_sizes": [32, 16],
"num_warps": 4,
"num_stages": 3,
"pid_type": "persistent_interleaved",
}
data = {"h100": {"batch_32_hidden_4096": config_data}}
config_set = ConfigSet.from_dict("test_kernel", data)
assert config_set.kernel_name == "test_kernel"
assert config_set.get_platforms() == ["h100"]
# Verify the config was created correctly
config = config_set.get_config("h100", "batch_32_hidden_4096")
assert isinstance(config, helion.Config)
assert config.block_sizes == [32, 16]
assert config.num_warps == 4
assert config.num_stages == 3
assert config.pid_type == "persistent_interleaved"
def test_config_set_get_config_keyerror(self):
"""Test that accessing non-existent configs raises informative KeyErrors."""
config_set = ConfigSet("test_kernel")
with pytest.raises(KeyError, match="platform 'h100' not found"):
config_set.get_config("h100", "batch_32_hidden_4096")
# Use realistic config data
config_data = {"num_warps": 8, "num_stages": 4}
data = {"h100": {"batch_64_hidden_2048": config_data}}
config_set = ConfigSet.from_dict("test_kernel", data)
with pytest.raises(
KeyError, match="config_key 'batch_32_hidden_4096' not found"
):
config_set.get_config("h100", "batch_32_hidden_4096")
def test_config_set_get_platforms(self):
"""Test get_platforms method."""
# Use realistic config data
config1 = {"num_warps": 4, "num_stages": 3}
config2 = {"num_warps": 8, "num_stages": 5}
data = {
"h100": {"batch_32_hidden_4096": config1},
"a100": {"batch_16_hidden_2048": config2},
}
config_set = ConfigSet.from_dict("test_kernel", data)
platforms = config_set.get_platforms()
assert platforms == ["a100", "h100"] # Should be sorted
def test_config_set_get_config_keys(self):
"""Test get_config_keys method."""
# Use realistic config data
config1 = {"num_warps": 4, "num_stages": 3}
config2 = {"num_warps": 8, "num_stages": 5}
data = {
"h100": {
"batch_32_hidden_4096": config1,
"batch_64_hidden_2048": config2,
}
}
config_set = ConfigSet.from_dict("test_kernel", data)
config_keys = config_set.get_config_keys("h100")
assert config_keys == ["batch_32_hidden_4096", "batch_64_hidden_2048"]
assert config_set.get_config_keys("v100") == []
def test_config_set_to_dict(self):
"""Test converting ConfigSet to dictionary."""
# Use realistic config data
original_config = {
"block_sizes": [64, 32],
"num_warps": 16,
"num_stages": 4,
"pid_type": "persistent_blocked",
}
original_data = {"h100": {"batch_32_hidden_4096": original_config}}
config_set = ConfigSet.from_dict("test_kernel", original_data)
result_data = config_set.to_dict()
# The result should match the original (Config roundtrip should work)
assert result_data == original_data
class TestConfigManager:
"""Test suite for ConfigManager class."""
def test_config_manager_creation_default_base_dir(self):
"""Test creating ConfigManager with default base directory."""
manager = ConfigManager()
assert manager._base_dir.name == "configs"
def test_config_manager_creation_custom_base_dir(self):
"""Test creating ConfigManager with custom base directory."""
custom_dir = "/tmp/custom_configs"
manager = ConfigManager(base_dir=custom_dir)
# Paths are resolved, so compare with resolved path
assert manager._base_dir == Path(custom_dir).resolve()
def test_get_config_file_path(self):
"""Test getting config file path for a kernel."""
manager = ConfigManager(base_dir="/tmp")
file_path = manager.get_config_file_path("silu_mul_fp8")
expected_path = Path("/tmp/silu_mul_fp8.json")
assert file_path == expected_path
def test_ensure_base_dir_exists(self):
"""Test ensuring base directory exists."""
with tempfile.TemporaryDirectory() as temp_dir:
base_dir = Path(temp_dir) / "non_existent" / "configs"
manager = ConfigManager(base_dir=base_dir)
assert not base_dir.exists()
returned_path = manager.ensure_base_dir_exists()
assert base_dir.exists()
assert base_dir.is_dir()
assert returned_path == base_dir
def test_load_config_set_file_not_exists(self):
"""Test loading config set when file doesn't exist."""
with tempfile.TemporaryDirectory() as temp_dir:
manager = ConfigManager(base_dir=temp_dir)
config_set = manager.load_config_set("non_existent_kernel")
assert isinstance(config_set, ConfigSet)
assert config_set.kernel_name == "non_existent_kernel"
assert config_set.get_platforms() == []
def test_load_config_set_valid_file(self):
"""Test loading config set from valid file."""
with tempfile.TemporaryDirectory() as temp_dir:
# Use realistic config data
kernel_config = {
"block_sizes": [128, 64],
"num_warps": 8,
"num_stages": 6,
"pid_type": "persistent_interleaved",
}
config_data = {"h100": {"batch_32_hidden_4096": kernel_config}}
config_file = Path(temp_dir) / "test_kernel.json"
with open(config_file, "w") as f:
json.dump(config_data, f)
manager = ConfigManager(base_dir=temp_dir)
config_set = manager.load_config_set("test_kernel")
assert isinstance(config_set, ConfigSet)
assert config_set.kernel_name == "test_kernel"
assert config_set.get_platforms() == ["h100"]
# Verify the config was loaded correctly
config = config_set.get_config("h100", "batch_32_hidden_4096")
assert isinstance(config, helion.Config)
assert config.block_sizes == [128, 64]
assert config.num_warps == 8
def test_load_config_set_invalid_json(self):
"""Test loading config set from file with invalid JSON."""
with tempfile.TemporaryDirectory() as temp_dir:
config_file = Path(temp_dir) / "test_kernel.json"
with open(config_file, "w") as f:
f.write("invalid json content {")
manager = ConfigManager(base_dir=temp_dir)
config_set = manager.load_config_set("test_kernel")
assert isinstance(config_set, ConfigSet)
assert config_set.kernel_name == "test_kernel"
assert config_set.get_platforms() == []
def test_save_config_set(self):
"""Test saving ConfigSet to file."""
with tempfile.TemporaryDirectory() as temp_dir:
# Use realistic config data
kernel_config = {
"block_sizes": [256, 128],
"num_warps": 16,
"num_stages": 8,
"pid_type": "persistent_blocked",
}
data = {"h100": {"batch_32_hidden_4096": kernel_config}}
config_set = ConfigSet.from_dict("test_kernel", data)
manager = ConfigManager(base_dir=temp_dir)
saved_path = manager.save_config_set(config_set)
expected_path = Path(temp_dir) / "test_kernel.json"
assert saved_path == expected_path
assert saved_path.exists()
with open(saved_path) as f:
loaded_data = json.load(f)
assert loaded_data == data
def test_save_config_set_creates_directory(self):
"""Test that save_config_set creates parent directories if needed."""
with tempfile.TemporaryDirectory() as temp_dir:
nested_dir = Path(temp_dir) / "nested" / "configs"
config_set = ConfigSet("test_kernel")
manager = ConfigManager(base_dir=nested_dir)
saved_path = manager.save_config_set(config_set)
assert nested_dir.exists()
assert nested_dir.is_dir()
assert saved_path.exists()
def test_get_platform_configs(self):
"""Test getting all configs for a specific platform."""
with tempfile.TemporaryDirectory() as temp_dir:
# Use realistic config data
config_1 = {"num_warps": 4, "num_stages": 3, "block_sizes": [64, 32]}
config_2 = {"num_warps": 8, "num_stages": 5, "block_sizes": [128, 64]}
default_config = {
"num_warps": 16,
"num_stages": 7,
"block_sizes": [256, 128],
}
config_3 = {"num_warps": 2, "num_stages": 2, "block_sizes": [32, 16]}
config_data = {
"h100": {
"batch_32_hidden_4096": config_1,
"batch_64_hidden_2048": config_2,
"default": default_config,
},
"a100": {"batch_16_hidden_1024": config_3},
}
config_file = Path(temp_dir) / "test_kernel.json"
with open(config_file, "w") as f:
json.dump(config_data, f)
manager = ConfigManager(base_dir=temp_dir)
h100_configs = manager.get_platform_configs("test_kernel", "h100")
assert len(h100_configs) == 3
assert "batch_32_hidden_4096" in h100_configs
assert "batch_64_hidden_2048" in h100_configs
assert "default" in h100_configs
for config in h100_configs.values():
assert isinstance(config, helion.Config)
# Verify specific config details
assert h100_configs["batch_32_hidden_4096"].num_warps == 4
assert h100_configs["default"].num_stages == 7
a100_configs = manager.get_platform_configs("test_kernel", "a100")
assert len(a100_configs) == 1
assert "batch_16_hidden_1024" in a100_configs
assert isinstance(a100_configs["batch_16_hidden_1024"], helion.Config)
assert a100_configs["batch_16_hidden_1024"].num_warps == 2
nonexistent_configs = manager.get_platform_configs("test_kernel", "v100")
assert len(nonexistent_configs) == 0
def test_singleton_returns_same_instance(self):
"""Test that ConfigManager returns the same instance on repeated calls."""
manager1 = ConfigManager(base_dir="/tmp/test_singleton")
manager2 = ConfigManager(base_dir="/tmp/test_singleton")
assert manager1 is manager2
def test_singleton_with_default_base_dir(self):
"""Test singleton behavior with default base directory."""
manager1 = ConfigManager()
manager2 = ConfigManager()
assert manager1 is manager2
assert manager1._base_dir == manager2._base_dir
def test_singleton_error_on_different_base_dir(self):
"""Test that ConfigManager raises error when created with different base_dir."""
ConfigManager(base_dir="/tmp/first_dir")
with pytest.raises(ValueError, match="singleton already exists"):
ConfigManager(base_dir="/tmp/different_dir")
def test_reset_instance_allows_new_base_dir(self):
"""Test that reset_instance allows creating with a new base_dir."""
manager1 = ConfigManager(base_dir="/tmp/first_dir")
assert manager1._base_dir == Path("/tmp/first_dir").resolve()
ConfigManager.reset_instance()
manager2 = ConfigManager(base_dir="/tmp/second_dir")
assert manager2._base_dir == Path("/tmp/second_dir").resolve()
assert manager1 is not manager2
def test_get_instance_returns_existing(self):
"""Test that get_instance returns the existing singleton."""
manager1 = ConfigManager(base_dir="/tmp/test_get_instance")
manager2 = ConfigManager.get_instance()
assert manager1 is manager2
def test_get_instance_raises_if_not_initialized(self):
"""Test that get_instance raises RuntimeError if no instance exists."""
with pytest.raises(RuntimeError, match="has not been created"):
ConfigManager.get_instance()
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/kernels/helion/test_config_manager.py",
"license": "Apache License 2.0",
"lines": 284,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/kernels/helion/config_manager.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Configuration management for Helion kernels.
This module provides centralized configuration file management for Helion custom
operations, including naming conventions, directory resolution, and file I/O.
Config File Structure
---------------------
Each kernel has a single JSON config file: {kernel_name}.json
The file uses a simplified 2-layer hierarchical structure:
{
"h100": { # GPU platform
"default": { ... }, # Fallback configuration
"batch_32_hidden_4096": { ... },
"batch_64_hidden_8192": { ... }
},
"a100": {
"default": { ... },
"batch_16_hidden_2048": { ... }
}
}
Example file: silu_mul_fp8.json
Config keys should be structured strings that encode the relevant
parameters (e.g., "batch_32_hidden_4096", "seq_512_heads_16", "fp8_batch_64", etc.).
Classes
-------
- ConfigSet: In-memory collection of configs for a kernel with lookup/query APIs.
- ConfigManager: File-level operations for config persistence.
"""
import json
from pathlib import Path
from typing import Any
from vllm.logger import init_logger
from vllm.utils.import_utils import has_helion
if not has_helion():
raise ImportError(
"ConfigManager requires helion to be installed. "
"Install it with: pip install helion"
)
import helion
logger = init_logger(__name__)
class ConfigSet:
"""In-memory collection of Helion configs with lookup/query capabilities."""
# Type alias for nested config structure:
# platform -> config_key -> helion.Config
_ConfigDict = dict[str, dict[str, "helion.Config"]]
def __init__(self, kernel_name: str):
self._kernel_name = kernel_name
self._configs: ConfigSet._ConfigDict = {}
@property
def kernel_name(self) -> str:
return self._kernel_name
def get_config(self, platform: str, config_key: str) -> helion.Config:
platform_dict = self._configs.get(platform)
if platform_dict is None:
avail_platforms = self.get_platforms()
# TODO(@gmagogsfm): add a CLI/env override flag so users can
# directly specify a platform name instead of relying on
# auto-detection, and suggest it in this error message.
raise KeyError(
f"Config not found for kernel '{self._kernel_name}': "
f"platform '{platform}' not found. "
f"Available platforms: {avail_platforms or '(none)'}. "
f"If your GPU is a variant of a supported platform, "
f"consider adding a mapping in _GPU_NAME_ALIASES in "
f"vllm/kernels/helion/utils.py, or run "
f"scripts/autotune_helion_kernels.py to generate configs "
f"for your platform."
)
config = platform_dict.get(config_key)
if config is None:
avail_keys = self.get_config_keys(platform)
raise KeyError(
f"Config not found for kernel '{self._kernel_name}': "
f"config_key '{config_key}' not found for platform '{platform}'. "
f"Available config_keys: {avail_keys or '(none)'}"
)
return config
def get_platforms(self) -> list[str]:
return sorted(self._configs.keys())
def get_config_keys(self, platform: str) -> list[str]:
platform_dict = self._configs.get(platform.lower())
if platform_dict is None:
return []
return sorted(platform_dict.keys())
def to_dict(self) -> dict[str, Any]:
result: dict[str, Any] = {}
for platform, config_keys_dict in self._configs.items():
result[platform] = {}
for config_key, config in config_keys_dict.items():
result[platform][config_key] = json.loads(config.to_json())
return result
@classmethod
def from_dict(cls, kernel_name: str, data: dict[str, Any]) -> "ConfigSet":
config_set = cls(kernel_name)
count = 0
for platform, platform_data in data.items():
if platform not in config_set._configs:
config_set._configs[platform] = {}
for config_key, config_data in platform_data.items():
config = helion.Config(**config_data)
config_set._configs[platform][config_key] = config
count += 1
if count > 0:
logger.debug(
"Loaded %d configs for kernel '%s'",
count,
kernel_name,
)
return config_set
def set_config(
self, platform: str, config_key: str, config: "helion.Config"
) -> None:
platform = platform.lower()
if platform not in self._configs:
self._configs[platform] = {}
self._configs[platform][config_key] = config
logger.debug(
"Set config for kernel '%s': platform='%s', key='%s'",
self._kernel_name,
platform,
config_key,
)
def has_config(self, platform: str, config_key: str) -> bool:
platform = platform.lower()
platform_dict = self._configs.get(platform)
if platform_dict is None:
return False
return config_key in platform_dict
class ConfigManager:
"""File-level configuration management for Helion kernels (global singleton)."""
_instance: "ConfigManager | None" = None
_instance_base_dir: Path | None = None
def __new__(cls, base_dir: str | Path | None = None) -> "ConfigManager":
resolved_base_dir = cls._resolve_base_dir(base_dir)
if cls._instance is not None:
if cls._instance_base_dir != resolved_base_dir:
raise ValueError(
f"ConfigManager singleton already exists with base_dir "
f"'{cls._instance_base_dir}', cannot create with different "
f"base_dir '{resolved_base_dir}'"
)
return cls._instance
instance = super().__new__(cls)
cls._instance = instance
cls._instance_base_dir = resolved_base_dir
return instance
def __init__(self, base_dir: str | Path | None = None):
if hasattr(self, "_base_dir"):
return
self._base_dir = self._resolve_base_dir(base_dir)
logger.debug("ConfigManager initialized with base_dir: %s", self._base_dir)
@staticmethod
def _resolve_base_dir(base_dir: str | Path | None) -> Path:
if base_dir is not None:
return Path(base_dir).resolve()
return (Path(__file__).parent / "configs").resolve()
@classmethod
def get_instance(cls) -> "ConfigManager":
if cls._instance is None:
raise RuntimeError(
"ConfigManager instance has not been created. "
"Call ConfigManager(base_dir=...) first to initialize."
)
return cls._instance
@classmethod
def reset_instance(cls) -> None:
"""For testing purposes only."""
cls._instance = None
cls._instance_base_dir = None
def get_config_file_path(self, kernel_name: str) -> Path:
return self._base_dir / f"{kernel_name}.json"
def ensure_base_dir_exists(self) -> Path:
self._base_dir.mkdir(parents=True, exist_ok=True)
return self._base_dir
def ensure_base_dir_writable(self) -> None:
self.ensure_base_dir_exists()
test_file = self._base_dir / ".write_test"
try:
test_file.write_text("test")
test_file.unlink()
except OSError as e:
raise OSError(
f"Config directory '{self._base_dir}' is not writable: {e}"
) from e
def load_config_set(self, kernel_name: str) -> ConfigSet:
config_path = self.get_config_file_path(kernel_name)
if not config_path.exists():
return ConfigSet.from_dict(kernel_name, {})
try:
with open(config_path) as f:
data = json.load(f)
return ConfigSet.from_dict(kernel_name, data)
except (json.JSONDecodeError, OSError) as e:
logger.error("Failed to load config file %s: %s", config_path, e)
return ConfigSet.from_dict(kernel_name, {})
def get_platform_configs(
self, kernel_name: str, platform: str
) -> dict[str, helion.Config]:
config_set = self.load_config_set(kernel_name)
config_keys = config_set.get_config_keys(platform)
return {
config_key: config_set.get_config(platform, config_key)
for config_key in config_keys
}
def save_config_set(self, config_set: ConfigSet) -> Path:
config_path = self.get_config_file_path(config_set.kernel_name)
config_path.parent.mkdir(parents=True, exist_ok=True)
with open(config_path, "w") as f:
json.dump(config_set.to_dict(), f, indent=2)
logger.info("Saved config to: %s", config_path)
return config_path
def save_configs(
self,
kernel_name: str,
platform: str,
configs: dict[str, "helion.Config"],
) -> Path:
"""Save configs for a kernel/platform, merging with existing."""
config_set = self.load_config_set(kernel_name)
for config_key, config in configs.items():
config_set.set_config(platform, config_key, config)
return self.save_config_set(config_set)
def config_exists(self, kernel_name: str, platform: str, config_key: str) -> bool:
config_set = self.load_config_set(kernel_name)
return config_set.has_config(platform, config_key)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/kernels/helion/config_manager.py",
"license": "Apache License 2.0",
"lines": 226,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/model_executor/model_loader/test_reload.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import gc
import inspect
from weakref import WeakKeyDictionary, ref
import pytest
import torch
from vllm.model_executor.layers.linear import QKVParallelLinear
from vllm.model_executor.model_loader.reload.meta import (
capture_layer_to_meta,
get_numel_loaded,
materialize_layer,
materialize_meta_tensor,
restore_layer_on_meta,
to_meta_tensor,
)
from vllm.model_executor.model_loader.reload.types import LayerReloadingInfo
from vllm.model_executor.model_loader.reload.utils import get_layer_tensors
from vllm.platforms import current_platform
from vllm.utils.torch_utils import cuda_device_count_stateless
def test_move_metatensors():
tensor = torch.empty((1, 2, 3))
meta_tensor = to_meta_tensor(tensor)
materialized_tensor = materialize_meta_tensor(meta_tensor)
assert meta_tensor.device.type == "meta"
assert tensor.device == materialized_tensor.device
assert tensor.dtype == meta_tensor.dtype == materialized_tensor.dtype
assert tensor.shape == meta_tensor.shape == materialized_tensor.shape
assert tensor.__class__ == meta_tensor.__class__ == materialized_tensor.__class__
assert tensor.__dict__ == meta_tensor.__dict__ == materialized_tensor.__dict__
def test_reload_lifecycle():
layer = torch.nn.Linear(2, 3)
info = LayerReloadingInfo(restore_metadata=capture_layer_to_meta(layer))
restore_layer_on_meta(layer, info)
for name, tensor in get_layer_tensors(layer).items():
meta_tensor = getattr(layer, name)
assert tensor.dtype == meta_tensor.dtype
assert tensor.shape == meta_tensor.shape
assert tensor.__class__ == meta_tensor.__class__
assert tensor.__dict__ == meta_tensor.__dict__
materialize_layer(layer)
for name, tensor in get_layer_tensors(layer).items():
materialized_tensor = getattr(layer, name)
assert tensor.dtype == materialized_tensor.dtype
assert tensor.shape == materialized_tensor.shape
assert tensor.__class__ == materialized_tensor.__class__
assert tensor.__dict__ == materialized_tensor.__dict__
def test_model_cleanup(dist_init, default_vllm_config):
layer = QKVParallelLinear(2, 3, 4)
assert layer.weight.weight_loader.__self__ is layer
info = LayerReloadingInfo(restore_metadata=capture_layer_to_meta(layer))
mock_info_dict: WeakKeyDictionary[torch.nn.Module, LayerReloadingInfo] = (
WeakKeyDictionary()
)
mock_info_dict[layer] = info
layer_ref = ref(layer)
del layer
gc.collect()
assert layer_ref() is None
assert len(mock_info_dict) == 0
def test_get_numel_loaded():
param = torch.empty(10, device="meta")
loaded_weight = torch.empty(10)
def complex_weight_loader(param, loaded_weight):
param[:3] = loaded_weight[:3]
param[5:8] = loaded_weight[5:8]
return "value"
args = inspect.signature(complex_weight_loader).bind(param, loaded_weight)
num_loaded, ret = get_numel_loaded(complex_weight_loader, args)
assert num_loaded == 6
assert ret == "value"
@pytest.mark.parametrize("tp_size", [2])
@pytest.mark.parametrize(
"base_model,mul_model,add_model",
[
(
"Qwen/Qwen3-0.6B",
"inference-optimization/Qwen3-0.6B-debug-multiply",
"inference-optimization/Qwen3-0.6B-debug-add",
),
(
"inference-optimization/Qwen3-0.6B-FP8_BLOCK",
"inference-optimization/Qwen3-0.6B-debug-multiply-FP8_BLOCK",
"inference-optimization/Qwen3-0.6B-debug-add-FP8_BLOCK",
),
(
"inference-optimization/Qwen3-0.6B-W4A16-G128",
"inference-optimization/Qwen3-0.6B-debug-multiply-W4A16-G128",
"inference-optimization/Qwen3-0.6B-debug-add-W4A16-G128",
),
(
"inference-optimization/DeepSeek-V3-debug-empty",
"inference-optimization/DeepSeek-V3-debug-multiply",
"inference-optimization/DeepSeek-V3-debug-add",
),
(
"inference-optimization/DeepSeek-V3-debug-empty-FP8_DYNAMIC",
"inference-optimization/DeepSeek-V3-debug-multiply-FP8_DYNAMIC",
"inference-optimization/DeepSeek-V3-debug-add-FP8_DYNAMIC",
),
(
"inference-optimization/DeepSeek-V3-debug-empty-NVFP4A16",
"inference-optimization/DeepSeek-V3-debug-multiply-NVFP4A16",
"inference-optimization/DeepSeek-V3-debug-add-NVFP4A16",
),
],
)
def test_reload_weights(base_model, mul_model, add_model, tp_size, vllm_runner):
if cuda_device_count_stateless() < tp_size:
pytest.skip(reason="Not enough CUDA devices")
if "FP8" in base_model and not current_platform.supports_fp8():
pytest.skip(reason="Requires FP8 support")
with vllm_runner(
model_name=base_model,
tensor_parallel_size=tp_size,
enable_expert_parallel=(tp_size > 1 and "DeepSeek" in base_model),
enable_prefix_caching=False,
) as llm:
llm.collective_rpc("reload_weights", kwargs={"weights_path": mul_model})
mul_perp = llm.generate_prompt_perplexity(["3 4 = 12"], mask=["3 4 ="])[0]
add_perp = llm.generate_prompt_perplexity(["3 4 = 7"], mask=["3 4 ="])[0]
assert mul_perp < add_perp
llm.collective_rpc("reload_weights", kwargs={"weights_path": add_model})
mul_perp = llm.generate_prompt_perplexity(["3 4 = 12"], mask=["3 4 ="])[0]
add_perp = llm.generate_prompt_perplexity(["3 4 = 7"], mask=["3 4 ="])[0]
assert add_perp < mul_perp
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/model_executor/model_loader/test_reload.py",
"license": "Apache License 2.0",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_executor/model_loader/reload/layerwise.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import inspect
from collections.abc import Callable
from functools import wraps
from weakref import WeakKeyDictionary
import torch
from vllm.config import ModelConfig
from vllm.logger import init_logger
from vllm.model_executor.layers.attention import Attention, MLAAttention
from vllm.model_executor.layers.quantization.base_config import QuantizeMethodBase
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
from .meta import (
capture_layer_to_meta,
get_numel_loaded,
materialize_layer,
restore_layer_on_meta,
)
from .types import LayerReloadingInfo
from .utils import get_layer_params_buffers, get_layer_size, get_layer_tensors
logger = init_logger(__name__)
__all__ = [
"get_layerwise_info",
"record_metadata_for_reloading",
"initialize_layerwise_reload",
"finalize_layerwise_reload",
]
# Global dict storing information used for layerwise restoring, loading, and processing.
# For more information regarding what info is stored when, see `LayerReloadingInfo`
#
# Use a weak ref dictionary so that modules can be freed when the model is freed.
# Values are sanitized from references to the layer key in order to avoid circular refs
LAYERWISE_INFO: WeakKeyDictionary[torch.nn.Module, LayerReloadingInfo] = (
WeakKeyDictionary()
)
def get_layerwise_info(layer: torch.nn.Module) -> LayerReloadingInfo:
"""
Get information related to restoring and layerwise processing. If no previous
information existed, a new entry is constructed
"""
if layer not in LAYERWISE_INFO:
LAYERWISE_INFO[layer] = LayerReloadingInfo()
return LAYERWISE_INFO[layer]
def record_metadata_for_reloading(model: torch.nn.Module):
"""
Record layer metadata needed for later reloading.
Stores parameter and buffer metadata as meta tensors for restoration.
Must be called before `initialize_layerwise_reload`.
"""
for layer in model.modules():
info = get_layerwise_info(layer)
info.restore_metadata = capture_layer_to_meta(layer)
@torch.no_grad()
def initialize_layerwise_reload(model: torch.nn.Module):
"""
Set up layerwise weight loading with deferred processing.
Must be called after `record_metadata_for_reloading`. This function:
1. Saves current kernel tensors for later copying
2. Restores layer parameters/buffers from metadata (on meta device)
3. Wraps weight loaders to defer processing until all weights are loaded
When all weights for a layer are loaded, the wrapped loaders will:
1. Materialize the layer onto the target device
2. Load all cached weights
3. Run quantization processing if applicable
4. Copy processed values back to original tensor storage
"""
# disable torchao reloading to avoid infinite recursion
model._original_do_torchao_reload = getattr(model, "_do_torchao_reload", False)
model._do_torchao_reload = False
for layer in model.modules():
info = get_layerwise_info(layer)
# Skip if the layer has already been initialized
if info.can_process():
continue
# Save current tensors for later copying
info.kernel_tensors = get_layer_params_buffers(layer)
# Restore layer parameters/buffers onto meta device
restore_layer_on_meta(layer, info)
# Track loading progress to determine when to process/copy
info.load_numel = 0
info.load_numel_total = get_layer_size(layer)
# Wrap each parameter's weight loader
# Note that nested wrapping will occur for shared tensors
for name, tensor in get_layer_tensors(layer).items():
if _get_weight_loader(tensor).__name__ != "online_process_loader":
tensor.weight_loader = make_online_process_loader(layer, name)
def make_online_process_loader(layer: torch.nn.Module, param_name: str) -> Callable:
"""Create a wrapped weight loader that defers processing."""
info = get_layerwise_info(layer)
param = getattr(layer, param_name)
original_loader = _get_original_loader(param)
loader_signature = inspect.signature(original_loader)
@wraps(original_loader, assigned=("__doc__", "__annotations__"))
def online_process_loader(*args, **kwargs):
if not info.can_process():
# Unfortunately, some qconfigs are set up to load the same weight
# multiple times. For example, CT_WNA16 loads `weight_shape` for
# each of the qkv partitions. This results in layers loading extra
# weights (beyond load_numel_total) after it's already processed.
#
# Best solution is to ensure that `load_numel_total` reflects the
# actual number of weights loaded, either by modifying qconfigs to
# create as many weights as loaded (see padding issue as well)
# or maybe capturing how many weights are loaded on first pass
#
# For now, `load_numel_total` is still safe to use as long as
# there's no way to reach `load_numel_total` without loading all
# necessary weights. `weight_shape` is very small, so this is safe.
# see Limitations(4)
logger.debug("%s: Excessive loading", layer.__class__.__name__)
return
# Bind and normalize arguments
bound_args = loader_signature.bind(*args, **kwargs)
bound_args.apply_defaults()
# Cache loaded weights, track loading progress
info.loaded_weights.append((param_name, bound_args))
num_loaded, ret = get_numel_loaded(original_loader, bound_args)
info.load_numel += num_loaded
logger.debug(
"%s: %d / %d",
layer.__class__.__name__,
info.load_numel,
info.load_numel_total,
)
# Process and copy when all weights are loaded
if info.load_numel >= info.load_numel_total and not isinstance( # type: ignore[operator]
layer, (Attention, MLAAttention)
):
_layerwise_process(layer, info)
return ret
return online_process_loader
def finalize_layerwise_reload(model: torch.nn.Module, model_config: ModelConfig):
"""
Remove the outermost layer of weight loading wrappers.
This function should be applied after `initialize_layerwise_reload` is applied
unwrap the layerwise weight loaders.
Also processes Attention/MLA layers, which must be processed after all other layers
"""
model._do_torchao_reload = model._original_do_torchao_reload
for layer in model.modules():
info = get_layerwise_info(layer)
# Attention/MLA layers are processed after all other layers
if isinstance(layer, (Attention, MLAAttention)):
if info.load_numel > 0:
raise NotImplementedError(
"Layerwise reloading of Q/K/V scale weights is not implemented yet"
)
else:
_place_kernel_tensors(layer, info)
layer.process_weights_after_loading(model_config.dtype)
# No weights were loaded, place kernel tensors back
elif info.can_process() and info.load_numel <= 0:
_place_kernel_tensors(layer, info)
# Process non-attention layers which did not load all elements. This can happen
# if the created weight has extra padding elements which are not loaded
# Having too many of these delayed layers can lead to execess memory usage
# see Limitations(4)
elif info.load_numel > 0 and info.load_numel < info.load_numel_total: # type: ignore[operator]
logger.debug("%s: Delayed processing", layer.__class__.__name__)
_layerwise_process(layer, info)
info.reset()
def _layerwise_process(layer: torch.nn.Module, info: LayerReloadingInfo):
"""
Finalize layer loading after all weights have been cached.
This function:
1. Materializes the layer onto the target device
2. Loads all cached weights
3. Runs quantization processing if applicable
4. Copies processed values back to original tensor storage
"""
# Materialize layer tensors onto device
materialize_layer(layer)
# Reset FP8 online quantization flag so process_weights_after_loading
# will run again during reload
if hasattr(layer, "_already_called_process_weights_after_loading"):
delattr(layer, "_already_called_process_weights_after_loading")
# Unwrap layerwise loading wrappers
for param in get_layer_tensors(layer).values():
param.weight_loader = _get_original_loader(param)
# Load all cached weights into materialized layer (using original loaders)
for name, args in info.loaded_weights:
param = getattr(layer, name)
args.arguments["param"] = param
param.weight_loader(*args.args, **args.kwargs)
# Process weights (quantization, repacking, etc.)
# Attention/MLA are processed in `finalize_layerwise_reload`
quant_method = getattr(layer, "quant_method", None)
if isinstance(quant_method, QuantizeMethodBase):
quant_method.process_weights_after_loading(layer)
# Copy processed values into original tensor storage (preserves cudagraph refs)
# this code is a no-op if not reloading (because kernel tensors is empty)
parameters, buffers = info.kernel_tensors
for name, param in parameters.items():
param.data.copy_(getattr(layer, name))
for name, buffer in buffers.items():
buffer.data.copy_(getattr(layer, name))
_place_kernel_tensors(layer, info)
info.reset()
logger.debug("%s: Processed", layer.__class__.__name__)
def _get_original_loader(tensor: torch.Tensor) -> Callable:
"""Return the weight loader with any layerwise wrappers removed"""
loader = _get_weight_loader(tensor)
while loader.__name__ == "online_process_loader":
loader = loader.__wrapped__ # type: ignore[union-attr]
return loader
def _get_weight_loader(tensor: torch.Tensor):
return getattr(tensor, "weight_loader", default_weight_loader)
def _place_kernel_tensors(layer: torch.nn.Module, info: LayerReloadingInfo):
for name in get_layer_tensors(layer):
delattr(layer, name)
parameters, buffers = info.kernel_tensors
for name, param in parameters.items():
layer.register_parameter(name, param)
for name, buffer in buffers.items():
layer.register_buffer(name, buffer)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/model_loader/reload/layerwise.py",
"license": "Apache License 2.0",
"lines": 215,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/model_loader/reload/meta.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import inspect
from collections.abc import Callable
import torch
from torch.utils._python_dispatch import TorchDispatchMode
from .sanitize import restore_layer_refs, sanitize_layer_refs
from .types import LayerReloadingInfo, LayerTensors
from .utils import get_layer_params_buffers, get_layer_tensors
__all__ = [
"to_meta_tensor",
"materialize_meta_tensor",
"capture_layer_to_meta",
"restore_layer_on_meta",
"materialize_layer",
"get_numel_loaded",
]
SKIP_MODULES: set[str] = {"HadamardTransform"}
SKIP_TENSORS: set[str] = {
"_expert_map",
"expert_mask",
"expert_global_to_physical",
"expert_physical_to_global",
"expert_local_to_global",
}
def to_meta_tensor(tensor: torch.Tensor) -> torch.Tensor:
"""Convert a tensor to a meta tensor while preserving class and attributes."""
meta_tensor = tensor.data.to("meta")
meta_tensor.__class__ = tensor.__class__
meta_tensor.__dict__ = tensor.__dict__.copy()
return meta_tensor
def materialize_meta_tensor(meta_tensor: torch.Tensor) -> torch.Tensor:
"""
Materialize a meta tensor into an actual tensor on the current device.
Should be called within the torch device context for the given rank.
"""
tensor = torch.empty_strided(
size=tuple(meta_tensor.size()),
stride=tuple(meta_tensor.stride()),
dtype=meta_tensor.dtype,
requires_grad=False,
)
tensor.__class__ = meta_tensor.__class__
tensor.__dict__ = meta_tensor.__dict__.copy()
return tensor
def capture_layer_to_meta(layer: torch.nn.Module) -> LayerTensors:
if layer.__class__.__name__ in SKIP_MODULES:
return ({}, {})
params, buffers = get_layer_params_buffers(layer)
return (
{
name: sanitize_layer_refs(to_meta_tensor(param), layer)
for name, param in params.items()
if name not in SKIP_TENSORS
},
{
name: sanitize_layer_refs(to_meta_tensor(buffer), layer)
for name, buffer in buffers.items()
if name not in SKIP_TENSORS
},
)
def restore_layer_on_meta(layer: torch.nn.Module, info: LayerReloadingInfo):
"""Restore a layer to model format with tensors on the meta device"""
if layer.__class__.__name__ in SKIP_MODULES:
return
for name in get_layer_tensors(layer):
if name not in SKIP_TENSORS:
delattr(layer, name)
restore_params, restore_buffers = info.restore_metadata
for name, param in restore_params.items():
if name not in SKIP_TENSORS:
param = restore_layer_refs(param, layer)
layer.register_parameter(name, param)
for name, buffer in restore_buffers.items():
if name not in SKIP_TENSORS:
buffer = restore_layer_refs(buffer, layer)
layer.register_buffer(name, buffer)
def materialize_layer(layer: torch.nn.Module) -> None:
"""Materialize all meta tensors in a layer to actual tensors."""
if layer.__class__.__name__ in SKIP_MODULES:
return
for name, tensor in get_layer_tensors(layer).items():
if name not in SKIP_TENSORS:
setattr(layer, name, materialize_meta_tensor(tensor))
class MetaCopyCounter(TorchDispatchMode):
"""
Tracks total number of elements modified with `copy_`.
Useful for keeping track of weight loading where underlying weights can be
arbitrarily transformed (such as with `narrow`) before calling copy.
Note: Assumes that copy kwargs are not used.
"""
def __init__(self):
super().__init__()
self.copied_numel = 0
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
if func is torch.ops.aten.copy_.default and args[0].device.type == "meta":
assert args[0].numel() == args[1].numel()
self.copied_numel += args[0].numel()
return func(*args, **kwargs)
def get_numel_loaded(
weight_loader: Callable, args: inspect.BoundArguments
) -> tuple[int, object]:
"""
Determine how many elements would be loaded by a weight loader call.
:param weight loader: used to load weights
:param args: bound arguments to weight loader
:return: number of elements loaded by the weight loader, the return value of the
weight loader
"""
assert args.arguments["param"].device.type == "meta"
with MetaCopyCounter() as counter:
return_value = weight_loader(*args.args, **args.kwargs)
return counter.copied_numel, return_value
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/model_loader/reload/meta.py",
"license": "Apache License 2.0",
"lines": 115,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.