sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
vllm-project/vllm:tests/v1/entrypoints/openai/test_multi_api_servers.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import asyncio
import os
import openai # use the official client for correctness check
import pytest
import pytest_asyncio
from tests.utils import RemoteOpenAIServer
from tests.v1.utils import check_request_balancing
MODEL_NAME = "hmellor/tiny-random-LlamaForCausalLM"
DP_SIZE = os.getenv("DP_SIZE", "1")
@pytest.fixture(scope="module")
def default_server_args():
return [
# use half precision for speed and memory savings in CI environment
"--dtype",
"bfloat16",
"--max-model-len",
"2048",
"--max-num-seqs",
"128",
"--enforce-eager",
"--api-server-count",
"4",
"--data_parallel_size",
DP_SIZE,
]
@pytest.fixture(scope="module")
def server(default_server_args):
with RemoteOpenAIServer(MODEL_NAME, default_server_args) as remote_server:
yield remote_server
@pytest_asyncio.fixture
async def client(server):
async with server.get_async_client() as async_client:
yield async_client
@pytest.mark.asyncio
@pytest.mark.parametrize(
"model_name",
[MODEL_NAME],
)
async def test_single_completion(
client: openai.AsyncOpenAI, server: RemoteOpenAIServer, model_name: str
) -> None:
async def make_request():
completion = await client.completions.create(
model=model_name, prompt="Hello, my name is", max_tokens=10, temperature=1.0
)
assert completion.id is not None
assert completion.choices is not None and len(completion.choices) == 1
choice = completion.choices[0]
# The exact number of tokens can vary slightly with temperature=1.0,
# so we check for a reasonable minimum length.
assert len(choice.text) >= 1
# Finish reason might not always be 'length' if the model finishes early
# or due to other reasons, especially with high temperature.
# So, we'll accept 'length' or 'stop'.
assert choice.finish_reason in ("length", "stop")
# Token counts can also vary, so we check they are positive.
assert completion.usage.completion_tokens > 0
assert completion.usage.prompt_tokens > 0
assert completion.usage.total_tokens > 0
return completion
# Test single request
result = await make_request()
assert result is not None
await asyncio.sleep(0.5)
# Send two bursts of requests
num_requests = 100
tasks = [make_request() for _ in range(num_requests)]
results = await asyncio.gather(*tasks)
assert len(results) == num_requests
assert all(completion is not None for completion in results)
await asyncio.sleep(0.5)
tasks = [make_request() for _ in range(num_requests)]
results = await asyncio.gather(*tasks)
assert len(results) == num_requests
assert all(completion is not None for completion in results)
# Check request balancing via Prometheus metrics if DP_SIZE > 1
check_request_balancing(server, int(DP_SIZE))
@pytest.mark.asyncio
@pytest.mark.parametrize(
"model_name",
[MODEL_NAME],
)
async def test_completion_streaming(
client: openai.AsyncOpenAI, server: RemoteOpenAIServer, model_name: str
) -> None:
prompt = "What is an LLM?"
async def make_streaming_request():
# Perform a non-streaming request to get the expected full output
single_completion = await client.completions.create(
model=model_name,
prompt=prompt,
max_tokens=5,
temperature=0.0,
)
single_output = single_completion.choices[0].text
# Perform the streaming request
stream = await client.completions.create(
model=model_name, prompt=prompt, max_tokens=5, temperature=0.0, stream=True
)
chunks: list[str] = []
finish_reason_count = 0
last_chunk = None
async for chunk in stream:
chunks.append(chunk.choices[0].text)
if chunk.choices[0].finish_reason is not None:
finish_reason_count += 1
last_chunk = chunk # Keep track of the last chunk
# finish reason should only return in the last block for OpenAI API
assert finish_reason_count == 1, "Finish reason should appear exactly once."
assert last_chunk is not None, "Stream should have yielded at least one chunk."
assert last_chunk.choices[0].finish_reason == "length", (
"Finish reason should be 'length'."
)
# Check that the combined text matches the non-streamed version.
assert "".join(chunks) == single_output, (
"Streamed output should match non-streamed output."
)
return True # Indicate success for this request
# Test single request
result = await make_streaming_request()
assert result is not None
await asyncio.sleep(0.5)
# Send two bursts of requests
num_requests = 100
tasks = [make_streaming_request() for _ in range(num_requests)]
results = await asyncio.gather(*tasks)
assert len(results) == num_requests, (
f"Expected {num_requests} results, got {len(results)}"
)
assert all(results), "Not all streaming requests completed successfully."
await asyncio.sleep(0.5)
tasks = [make_streaming_request() for _ in range(num_requests)]
results = await asyncio.gather(*tasks)
assert len(results) == num_requests, (
f"Expected {num_requests} results, got {len(results)}"
)
assert all(results), "Not all streaming requests completed successfully."
# Check request balancing via Prometheus metrics if DP_SIZE > 1
check_request_balancing(server, int(DP_SIZE))
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/entrypoints/openai/test_multi_api_servers.py",
"license": "Apache License 2.0",
"lines": 141,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/v1/engine/coordinator.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import copy
import multiprocessing
import time
import weakref
import msgspec.msgpack
import zmq
from vllm.config import ParallelConfig
from vllm.logger import init_logger
from vllm.utils.network_utils import make_zmq_socket
from vllm.utils.system_utils import get_mp_context, set_process_title
from vllm.v1.engine import EngineCoreOutputs, EngineCoreRequestType
from vllm.v1.serial_utils import MsgpackDecoder
from vllm.v1.utils import get_engine_client_zmq_addr, shutdown
logger = init_logger(__name__)
class DPCoordinator:
"""Coordinator process used for data-parallel deployments (DP>1).
Intermediates between multiple DP engine rank processes and one or more
front-end API server processes.
* Collects stats from each DP engine (currently just waiting and running
queue lengths), and publishes these to all front-ends for use in
load-balancing decisions.
* Keeps track of the current DP "request wave" number and running state
of the engines. This is received from the DP rank 0 engine and published
to the front-end processes along with the current load stats.
The engines alternate between a global running/paused state. The global
"request wave" number is a count of the number of times that the workers
collectively move from a running state to a paused state. This transition
is synchronized via the all-reduce operation performed in the
DPEngineCoreProc._has_global_unfinished_reqs method.
* Broadcasts the START_DP_WAVE message to engines to move them from paused
to running state when one engine receives a new request. This can happen
in two cases:
1) A front-end sending a new request while the engines are paused will
concurrently notify the coordinator.
2) An engine receiving a request for a stale request wave while in paused
state will notify the coordinator.
Engines will move into running state when receiving a new request or
START_DP_WAVE message.
Note that when deployed in External LB mode, no stats will be published by
the engines and thus updates will only be sent to front-ends when the
request wave / running state changes.
"""
def __init__(
self, parallel_config: ParallelConfig, enable_wave_coordination: bool = True
):
dp_size = parallel_config.data_parallel_size
assert dp_size > 1, "Coordinator only used for data parallel"
host = parallel_config.data_parallel_master_ip
# Assume coordinator is colocated with front-end procs when not in
# either external or hybrid DP LB mode.
local_only = not parallel_config.local_engines_only
front_publish_address = get_engine_client_zmq_addr(
local_only=local_only, host=host
)
local_only_eng = dp_size == parallel_config.data_parallel_size_local
# NOTE(yongji): handling scaling from intra-node to inter-node
if parallel_config.enable_elastic_ep:
local_only_eng = False
back_publish_address = get_engine_client_zmq_addr(local_only_eng, host)
back_output_address = get_engine_client_zmq_addr(local_only_eng, host)
context = get_mp_context()
self.proc: multiprocessing.Process = context.Process(
target=DPCoordinatorProc.run_coordinator,
name="VLLM_DP_Coordinator",
kwargs={
"engine_count": parallel_config.data_parallel_size,
"front_publish_address": front_publish_address,
"back_output_address": back_output_address,
"back_publish_address": back_publish_address,
"enable_wave_coordination": enable_wave_coordination,
},
daemon=True,
)
self.proc.start()
self.stats_publish_address = front_publish_address
self.coord_in_address = back_publish_address
self.coord_out_address = back_output_address
self._finalizer = weakref.finalize(self, shutdown, [self.proc])
def get_stats_publish_address(self) -> str:
return self.stats_publish_address
def get_engine_socket_addresses(self) -> tuple[str, str]:
"""Returns tuple of ZMQ input address, output address."""
return self.coord_in_address, self.coord_out_address
def close(self):
self._finalizer()
class EngineState:
def __init__(self):
self.request_counts = [0, 0] # [waiting, running]
class DPCoordinatorProc:
def __init__(
self,
engine_count: int,
min_stats_update_interval_ms: int = 100,
enable_wave_coordination: bool = True,
):
set_process_title("DPCoordinator")
self.ctx = zmq.Context()
self.engines = [EngineState() for _ in range(engine_count)]
self.stats_update_interval_ms = min_stats_update_interval_ms
self.enable_wave_coordination = enable_wave_coordination
@staticmethod
def run_coordinator(
engine_count: int,
front_publish_address: str,
back_output_address: str,
back_publish_address: str,
min_stats_update_interval_ms: int = 100,
enable_wave_coordination: bool = True,
):
coordinator = DPCoordinatorProc(
engine_count=engine_count,
min_stats_update_interval_ms=min_stats_update_interval_ms,
enable_wave_coordination=enable_wave_coordination,
)
try:
coordinator.process_input_socket(
front_publish_address,
back_output_address,
back_publish_address,
)
except KeyboardInterrupt:
logger.info("DP Coordinator process exiting")
def process_input_socket(
self,
front_publish_address: str,
back_output_address: str,
back_publish_address: str,
):
decoder = MsgpackDecoder(EngineCoreOutputs)
# For tracking request wave progression.
current_wave = 0
engines_running = False
# For tracking request counts for internal load-balancing.
stats_changed = False
last_stats_step = -1
last_stats_wave = -1
last_step_counts: list[list[int]] | None = None
with (
make_zmq_socket(
path=front_publish_address, # IPC
ctx=self.ctx,
socket_type=zmq.XPUB,
bind=True,
) as publish_front,
make_zmq_socket(
path=back_output_address, # IPC or TCP
ctx=self.ctx,
socket_type=zmq.PULL,
bind=True,
) as output_back,
make_zmq_socket(
path=back_publish_address, # IPC or TCP
ctx=self.ctx,
socket_type=zmq.XPUB,
bind=True,
) as publish_back,
):
# Wait until all engines subscribe.
for _ in self.engines:
if publish_back.recv() != b"\x01":
logger.error(
"DP Coordinator received unexpected message while "
"waiting for engines to subscribe"
)
return
# Send ready message to engines.
publish_back.send(b"READY")
logger.info("All engine subscriptions received by DP coordinator")
poller = zmq.Poller()
poller.register(publish_front, zmq.POLLIN)
poller.register(publish_back, zmq.POLLIN)
poller.register(output_back, zmq.POLLIN)
last_publish_time = 0
while True:
elapsed = int(time.time() * 1000) - last_publish_time
# Send at stats_update_interval_ms interval if the stats have
# changed, or otherwise every 5 seconds.
wait_for = self.stats_update_interval_ms if stats_changed else 5000
# Wait at least 50ms to ensure we've received all stats for
# the current step.
min_timeout = 50 if last_step_counts is None else 0
events = poller.poll(timeout=max(min_timeout, wait_for - elapsed))
if not events:
# Poller timeout - publish current stats to front-ends.
if last_step_counts is not None:
engine_req_counts_list = last_step_counts
last_step_counts = None
else:
engine_req_counts_list = self._get_engine_counts()
stats_changed = False
to_publish = (engine_req_counts_list, current_wave, engines_running)
publish_front.send(msgspec.msgpack.encode(to_publish))
last_publish_time = int(time.time() * 1000)
continue
events = dict(events)
wave_state_changed = False
if publish_back in events:
buffer = publish_back.recv()
if buffer == b"\x01":
# NOTE(yongji): newly started engine subscribed
# We need to send READY message here instead of receiving
# SCALE_ELASTIC_EP notification from engine core client
# as SCALE_ELASTIC_EP is only sent when
# new engines finished initialization.
# Subscription message, on the other hand, is sent
# by each engine during initialization
publish_back.send(b"READY")
else:
logger.error(
"DP Coordinator receives unexpected message from engines"
)
if publish_front in events:
buffer = publish_front.recv()
if buffer in (b"\x01", b"\x00"):
# Ignore subscription messages.
continue
decoded = msgspec.msgpack.decode(buffer)
if (
isinstance(decoded, (list, tuple))
and len(decoded) == 2
and decoded[0] == "SCALE_ELASTIC_EP"
):
# Handle scale up notification
new_engine_count = decoded[1]
current_count = len(self.engines)
if new_engine_count > current_count:
for _ in range(new_engine_count - current_count):
self.engines.append(EngineState())
# NOTE(yongji): handle the case
# where newly started engines have current_wave = 0
# if existing engines just finished a wave
# and engine_running isn't updated yet at
# CoordinatorProc requests routed to newly started
# engines may not wake up existing engines, as long
# as 0 < request.wave < existing engines'
# current_wave
# we note that 0 is the wave number for the new
# engine
logger.info(
"DPCoordinator scaled up from %s to %s engines",
current_count,
new_engine_count,
)
else:
self.engines = self.engines[:new_engine_count]
logger.info(
"DPCoordinator scaled down from %s to %s engines",
current_count,
new_engine_count,
)
continue # Skip normal engine notification processing
# Wave coordination: handle new-request messages from front-end.
# Only process these when wave coordination is enabled
if self.enable_wave_coordination:
# We received a message on the front-end XPUB socket,
# from an API server sending a new request while the
# engines are paused, so that we can wake the other
# engines.
engine_to_exclude, wave = decoded
if not engines_running:
if wave < current_wave:
# If the wave number is stale, ensure the message
# is handled by all the engines.
engine_to_exclude = None
engines_running = True
wave_state_changed = True
self._send_start_wave(
publish_back, current_wave, engine_to_exclude
)
if output_back in events:
# We received a message from one of the engines.
buffer = output_back.recv()
outputs: EngineCoreOutputs = decoder.decode(buffer)
assert not outputs.outputs
assert outputs.utility_output is None
eng_index = outputs.engine_index
scheduler_stats = outputs.scheduler_stats
if scheduler_stats:
# 1. Updated request load stats - update our local
# state with these.
stats = self.engines[eng_index].request_counts
stats_step = scheduler_stats.step_counter
stats_wave = scheduler_stats.current_wave
if (
stats_wave > last_stats_wave
or stats_wave == last_stats_wave
and stats_step > last_stats_step
):
if stats_changed:
last_step_counts = self._get_engine_counts(do_copy=True)
last_stats_step = stats_step
last_stats_wave = stats_wave
elif stats_wave != last_stats_wave or (
stats_step != last_stats_step
):
logger.warning(
"Received stats for out-of-order "
"step (%d, %d) from engine %d (expected "
"> (%d, %d))",
stats_wave,
stats_step,
eng_index,
last_stats_wave,
last_stats_step,
)
stats[0] = scheduler_stats.num_waiting_reqs
stats[1] = scheduler_stats.num_running_reqs
stats_changed = True
# Wave coordination: handle wave completion and start notifications
# Only process these when wave coordination is enabled
if self.enable_wave_coordination:
if (wave := outputs.wave_complete) is not None:
# 2. Notification from rank 0 engine that we've
# moved into the global paused state
# (engines_running==False).
if current_wave <= wave:
new_wave = wave + 1
logger.debug(
"Moving DP wave from %d to %d.",
current_wave,
new_wave,
)
current_wave = new_wave
engines_running = False
wave_state_changed = True
elif (wave := outputs.start_wave) is not None and (
wave > current_wave
or (wave == current_wave and not engines_running)
):
# 3. The engine received request for a non-current wave
# so we must ensure that other engines progress to the
# next wave (race condition handling).
logger.debug(
"Starting wave %d after notification of "
"stale wave request from engine.",
wave,
)
current_wave = wave
engines_running = True
wave_state_changed = True
self._send_start_wave(publish_back, wave, eng_index)
if wave_state_changed:
message = (None, current_wave, engines_running)
publish_front.send(msgspec.msgpack.encode(message))
@staticmethod
def _send_start_wave(
socket: zmq.Socket, wave: int, exclude_engine_index: int | None
):
"""Broadcast the START_DP_WAVE message to all the engines.
It includes the current wave number and index of engine which
has already received a request with this wave number and so doesn't
require additional notification.
"""
wave_encoded = msgspec.msgpack.encode((wave, exclude_engine_index))
socket.send_multipart((EngineCoreRequestType.START_DP_WAVE.value, wave_encoded))
def _get_engine_counts(self, do_copy=False) -> list[list[int]]:
"""Return list of [waiting, running] count lists for each engine."""
if do_copy:
return [copy.copy(e.request_counts) for e in self.engines]
return [e.request_counts for e in self.engines]
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/engine/coordinator.py",
"license": "Apache License 2.0",
"lines": 362,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/v1/metrics/prometheus.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import os
import tempfile
from prometheus_client import REGISTRY, CollectorRegistry, multiprocess
from vllm.logger import init_logger
logger = init_logger(__name__)
# Global temporary directory for prometheus multiprocessing
_prometheus_multiproc_dir: tempfile.TemporaryDirectory | None = None
def setup_multiprocess_prometheus():
"""Set up prometheus multiprocessing directory if not already configured."""
global _prometheus_multiproc_dir
if "PROMETHEUS_MULTIPROC_DIR" not in os.environ:
# Make TemporaryDirectory for prometheus multiprocessing
# Note: global TemporaryDirectory will be automatically
# cleaned up upon exit.
_prometheus_multiproc_dir = tempfile.TemporaryDirectory()
os.environ["PROMETHEUS_MULTIPROC_DIR"] = _prometheus_multiproc_dir.name
logger.debug(
"Created PROMETHEUS_MULTIPROC_DIR at %s", _prometheus_multiproc_dir.name
)
else:
logger.warning(
"Found PROMETHEUS_MULTIPROC_DIR was set by user. "
"This directory must be wiped between vLLM runs or "
"you will find inaccurate metrics. Unset the variable "
"and vLLM will properly handle cleanup."
)
def get_prometheus_registry() -> CollectorRegistry:
"""Get the appropriate prometheus registry based on multiprocessing
configuration.
Returns:
Registry: A prometheus registry
"""
if os.getenv("PROMETHEUS_MULTIPROC_DIR") is not None:
logger.debug("Using multiprocess registry for prometheus metrics")
registry = CollectorRegistry()
multiprocess.MultiProcessCollector(registry)
return registry
return REGISTRY
def unregister_vllm_metrics():
"""Unregister any existing vLLM collectors from the prometheus registry.
This is useful for testing and CI/CD where metrics may be registered
multiple times across test runs.
Also, in case of multiprocess, we need to unregister the metrics from the
global registry.
"""
registry = REGISTRY
# Unregister any existing vLLM collectors
for collector in list(registry._collector_to_names):
if hasattr(collector, "_name") and "vllm" in collector._name:
registry.unregister(collector)
def shutdown_prometheus():
"""Shutdown prometheus metrics."""
path = _prometheus_multiproc_dir
if path is None:
return
try:
pid = os.getpid()
multiprocess.mark_process_dead(pid, path)
logger.debug("Marked Prometheus metrics for process %d as dead", pid)
except Exception as e:
logger.error("Error during metrics cleanup: %s", str(e))
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/metrics/prometheus.py",
"license": "Apache License 2.0",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/minicpm_eagle.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# Adapted from
# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py
# Copyright 2023 The vLLM team.
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inference-only EagleMiniCPM model compatible with HuggingFace weights."""
import math
from collections.abc import Iterable
import torch
from torch import nn
from transformers import PretrainedConfig
from vllm.compilation.decorators import support_torch_compile
from vllm.config import CacheConfig, VllmConfig
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.vocab_parallel_embedding import (
ParallelLMHead,
VocabParallelEmbedding,
)
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
from vllm.sequence import IntermediateTensors
from .interfaces import SupportsEagle, SupportsLoRA, SupportsPP
from .minicpm import MiniCPMAttention as EagleMiniCPMAttention
from .minicpm import MiniCPMMLP as EagleMiniCPMMLP
from .minicpm import MiniCPMMoE as EagleMiniCPMMoE
from .utils import (
AutoWeightsLoader,
is_pp_missing_parameter,
make_empty_intermediate_tensors_factory,
maybe_prefix,
process_eagle_weight,
)
class EagleMiniCPMDecoderLayer(nn.Module):
def __init__(
self,
config: PretrainedConfig,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
self.config = config
self.cache_config = cache_config
self.quant_config = quant_config
self.hidden_size = config.hidden_size
self.max_position_embeddings = getattr(config, "max_position_embeddings", 8192)
self.prefix = prefix
self._init_attn_block()
self._init_ffn_block()
def _init_attn_block(self):
self.input_layernorm = RMSNorm(
self.config.hidden_size, eps=self.config.rms_norm_eps
)
self.self_attn = EagleMiniCPMAttention(
hidden_size=self.hidden_size,
num_heads=self.config.num_attention_heads,
num_kv_heads=self.config.num_key_value_heads,
rope_parameters=self.config.rope_parameters,
max_position_embeddings=self.max_position_embeddings,
cache_config=self.cache_config,
quant_config=self.quant_config,
prefix=f"{self.prefix}.self_attn",
)
def _init_ffn_block(self):
self.post_attention_layernorm = RMSNorm(
self.config.hidden_size, eps=self.config.rms_norm_eps
)
self.num_experts = getattr(self.config, "num_experts", 0)
if self.num_experts == 0:
self.mlp = EagleMiniCPMMLP(
hidden_size=self.hidden_size,
intermediate_size=self.config.intermediate_size,
hidden_act=self.config.hidden_act,
hidden_act_param=getattr(self.config, "hidden_act_param", 0.0),
quant_config=self.quant_config,
)
else:
self.mlp = EagleMiniCPMMoE(
num_experts=self.config.num_experts,
top_k=self.config.num_experts_per_tok,
hidden_size=self.config.hidden_size,
intermediate_size=self.config.intermediate_size,
prefix=f"{self.prefix}.mlp",
)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
residual: torch.Tensor | None,
) -> tuple[torch.Tensor, torch.Tensor]:
# Self Attention
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states = self.self_attn(
positions=positions,
hidden_states=hidden_states,
)
hidden_states = residual + hidden_states * (
self.config.scale_depth / math.sqrt(self.config.mup_denominator)
)
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states * (
self.config.scale_depth / math.sqrt(self.config.mup_denominator)
)
return hidden_states, None
@support_torch_compile
class EagleMiniCPMModel(nn.Module):
def __init__(
self, *, vllm_config: VllmConfig, prefix: str = "", start_layer: int = 0
):
super().__init__()
config = vllm_config.speculative_config.draft_model_config.hf_config
cache_config = vllm_config.cache_config
quant_config = vllm_config.quant_config
self.config = config
self.cache_config = cache_config
self.quant_config = quant_config
self.vocab_size = config.vocab_size
self.fc = torch.nn.Linear(
self.config.hidden_size * 2, self.config.hidden_size, bias=False
)
self.input_norm1 = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.input_norm2 = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.embed_tokens = VocabParallelEmbedding(
self.vocab_size,
config.hidden_size,
)
self.num_experts = getattr(self.config, "num_experts", 0)
self._init_layers(prefix, config, cache_config, quant_config, start_layer)
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.make_empty_intermediate_tensors = make_empty_intermediate_tensors_factory(
["hidden_states", "residual"], self.config.hidden_size
)
def _init_layers(
self,
prefix: str,
config: PretrainedConfig,
cache_config: CacheConfig | None,
quant_config: QuantizationConfig | None,
start_layer: int,
):
self.eagle_layers = nn.ModuleList(
[
EagleMiniCPMDecoderLayer(
config,
cache_config,
quant_config,
f"{prefix}.eagle_layers.{i + start_layer}",
)
for i in range(self.config.num_hidden_layers)
]
)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
embedding = self.embed_tokens(input_ids)
return embedding * self.config.scale_emb
def forward(
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
hidden_states: torch.Tensor,
) -> torch.Tensor | IntermediateTensors:
input_embeds = self.embed_input_ids(input_ids)
input_embeds = self.input_norm1(input_embeds)
hidden_states = self.input_norm2(hidden_states)
hidden_states = self.fc(torch.cat((input_embeds, hidden_states), dim=-1))
residual = None
for layer in self.eagle_layers:
hidden_states, residual = layer(
positions,
hidden_states,
residual,
)
return hidden_states, hidden_states
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
expert_params_mapping = [
# (param_name, weight_name, expert_id)
(
"ws" if weight_name in ["w1", "w3"] else "w2s",
f"experts.{expert_id}.{weight_name}.weight",
expert_id,
)
for expert_id in range(self.num_experts)
for weight_name in ["w1", "w2", "w3"]
]
params_dict = dict(self.named_parameters())
loaded_params: set[str] = set()
for name, loaded_weight in weights:
if "rotary_emb.inv_freq" in name:
continue
if "rotary_emb.cos_cached" in name or "rotary_emb.sin_cached" in name:
# Models trained using ColossalAI may include these tensors in
# the checkpoint. Skip them.
continue
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
if is_pp_missing_parameter(name, self):
continue
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
break
else:
for param_name, weight_name, expert_id in expert_params_mapping:
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
if is_pp_missing_parameter(name, self):
continue
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(
param, loaded_weight, weight_name, expert_id=expert_id
)
break
else:
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
if is_pp_missing_parameter(name, self):
continue
param = params_dict[name]
weight_loader = getattr(
param, "weight_loader", default_weight_loader
)
weight_loader(param, loaded_weight)
loaded_params.add(name)
return loaded_params
class EagleMiniCPMForCausalLM(nn.Module, SupportsLoRA, SupportsPP, SupportsEagle):
packed_modules_mapping = {
"qkv_proj": [
"q_proj",
"k_proj",
"v_proj",
],
"gate_up_proj": [
"gate_proj",
"up_proj",
],
}
# LoRA specific attributes
embedding_modules = {
"embed_tokens": "input_embeddings",
"lm_head": "output_embeddings",
}
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.speculative_config.draft_model_config.hf_config
cache_config = vllm_config.cache_config
quant_config = vllm_config.quant_config
self.prefix = prefix
self.vllm_config = vllm_config
self.config = config
self.cache_config = cache_config
self.quant_config = quant_config
target_layer_num = vllm_config.model_config.get_num_layers(
vllm_config.parallel_config
)
self.model = self._init_model(
vllm_config=vllm_config,
prefix=maybe_prefix(prefix, "model"),
start_layer=target_layer_num,
)
self.lm_head = ParallelLMHead(
config.vocab_size,
config.hidden_size,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "lm_head"),
)
if config.tie_word_embeddings:
self.lm_head = self.lm_head.tie_weights(self.model.embed_tokens)
self.scale_width = self.config.hidden_size / self.config.dim_model_base
self.logits_processor = LogitsProcessor(config.vocab_size)
self.make_empty_intermediate_tensors = (
self.model.make_empty_intermediate_tensors
)
def _init_model(
self, *, vllm_config: VllmConfig, prefix: str = "", start_layer: int = 0
):
return EagleMiniCPMModel(
vllm_config=vllm_config, prefix=prefix, start_layer=start_layer
)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.model.embed_input_ids(input_ids)
def forward(
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
hidden_states: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor]:
hidden_states, hidden_states2 = self.model(input_ids, positions, hidden_states)
hidden_states = hidden_states / self.scale_width
hidden_states2 = hidden_states2 / self.scale_width
return hidden_states, hidden_states2
def compute_logits(
self,
hidden_states: torch.Tensor,
) -> torch.Tensor | None:
logits = self.logits_processor(self.lm_head, hidden_states)
return logits
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
def transform(inputs):
name, loaded_weight = inputs
process_eagle_weight(self, name)
return name, loaded_weight
loader = AutoWeightsLoader(
self,
skip_prefixes=(["lm_head."] if self.config.tie_word_embeddings else None),
)
return loader.load_weights(map(transform, weights))
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/minicpm_eagle.py",
"license": "Apache License 2.0",
"lines": 344,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/entrypoints/cli/run_batch.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import argparse
import asyncio
import importlib.metadata
import typing
from vllm.entrypoints.cli.types import CLISubcommand
from vllm.entrypoints.utils import VLLM_SUBCMD_PARSER_EPILOG
from vllm.logger import init_logger
if typing.TYPE_CHECKING:
from vllm.utils.argparse_utils import FlexibleArgumentParser
else:
FlexibleArgumentParser = argparse.ArgumentParser
logger = init_logger(__name__)
class RunBatchSubcommand(CLISubcommand):
"""The `run-batch` subcommand for vLLM CLI."""
name = "run-batch"
@staticmethod
def cmd(args: argparse.Namespace) -> None:
from vllm.entrypoints.openai.run_batch import main as run_batch_main
logger.info(
"vLLM batch processing API version %s", importlib.metadata.version("vllm")
)
logger.info("args: %s", args)
# Start the Prometheus metrics server.
# LLMEngine uses the Prometheus client
# to publish metrics at the /metrics endpoint.
if args.enable_metrics:
from prometheus_client import start_http_server
logger.info("Prometheus metrics enabled")
start_http_server(port=args.port, addr=args.url)
else:
logger.info("Prometheus metrics disabled")
asyncio.run(run_batch_main(args))
def subparser_init(
self, subparsers: argparse._SubParsersAction
) -> FlexibleArgumentParser:
from vllm.entrypoints.openai.run_batch import make_arg_parser
run_batch_parser = subparsers.add_parser(
self.name,
help="Run batch prompts and write results to file.",
description=(
"Run batch prompts using vLLM's OpenAI-compatible API.\n"
"Supports local or HTTP input/output files."
),
usage="vllm run-batch -i INPUT.jsonl -o OUTPUT.jsonl --model <model>",
)
run_batch_parser = make_arg_parser(run_batch_parser)
run_batch_parser.epilog = VLLM_SUBCMD_PARSER_EPILOG.format(subcmd=self.name)
return run_batch_parser
def cmd_init() -> list[CLISubcommand]:
return [RunBatchSubcommand()]
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/entrypoints/cli/run_batch.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/models/language/pooling/embed_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Sequence
import openai
import pytest
from tests.conftest import HfRunner
from tests.models.utils import EmbedModelInfo, check_embeddings_close, matryoshka_fy
def run_embedding_correctness_test(
hf_model: "HfRunner",
inputs: list[str],
vllm_outputs: Sequence[list[float]],
dimensions: int | None = None,
):
hf_outputs = hf_model.encode(inputs)
if dimensions:
hf_outputs = matryoshka_fy(hf_outputs, dimensions)
check_embeddings_close(
embeddings_0_lst=hf_outputs,
embeddings_1_lst=vllm_outputs,
name_0="hf",
name_1="vllm",
tol=1e-2,
)
def correctness_test_embed_models(
hf_runner,
vllm_runner,
model_info: EmbedModelInfo,
example_prompts,
vllm_extra_kwargs=None,
hf_model_callback=None,
):
pytest.skip("Debug only, ci prefers to use mteb test.")
# The example_prompts has ending "\n", for example:
# "Write a short story about a robot that dreams for the first time.\n"
# sentence_transformers will strip the input texts, see:
# https://github.com/UKPLab/sentence-transformers/blob/v3.1.1/sentence_transformers/models/Transformer.py#L159
# This makes the input_ids different between hf_model and vllm_model.
# So we need to strip the input texts to avoid test failing.
example_prompts = [str(s).strip() for s in example_prompts]
vllm_extra_kwargs = vllm_extra_kwargs or {}
vllm_extra_kwargs["dtype"] = model_info.dtype
if model_info.hf_overrides is not None:
vllm_extra_kwargs["hf_overrides"] = model_info.hf_overrides
with vllm_runner(
model_info.name, runner="pooling", max_model_len=None, **vllm_extra_kwargs
) as vllm_model:
vllm_outputs = vllm_model.embed(example_prompts)
with hf_runner(
model_info.name,
dtype=model_info.hf_dtype,
is_sentence_transformer=True,
) as hf_model:
if hf_model_callback is not None:
hf_model_callback(hf_model)
run_embedding_correctness_test(hf_model, example_prompts, vllm_outputs)
async def run_client_embeddings(
client: openai.AsyncOpenAI,
model_name: str,
queries: list[str],
instruction: str = "",
) -> list[list[float]]:
outputs = await client.embeddings.create(
model=model_name,
input=[instruction + q for q in queries],
)
return [data.embedding for data in outputs.data]
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/models/language/pooling/embed_utils.py",
"license": "Apache License 2.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:examples/offline_inference/context_extension.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
This script demonstrates how to extend the context length
of a Qwen model using the YARN method (rope_parameters)
and run a simple chat example.
Usage:
python examples/offline_inference/context_extension.py
"""
from vllm import LLM, RequestOutput, SamplingParams
def create_llm():
rope_theta = 1000000
original_max_position_embeddings = 32768
factor = 4.0
# Use yarn to extend context
hf_overrides = {
"rope_parameters": {
"rope_theta": rope_theta,
"rope_type": "yarn",
"factor": factor,
"original_max_position_embeddings": original_max_position_embeddings,
},
"max_model_len": int(original_max_position_embeddings * factor),
}
llm = LLM(model="Qwen/Qwen3-0.6B", hf_overrides=hf_overrides)
return llm
def run_llm_chat(llm):
sampling_params = SamplingParams(
temperature=0.8,
top_p=0.95,
max_tokens=128,
)
conversation = [
{"role": "system", "content": "You are a helpful assistant"},
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hello! How can I assist you today?"},
]
outputs = llm.chat(conversation, sampling_params, use_tqdm=False)
return outputs, [
conversation,
]
def print_outputs(outputs: list[RequestOutput], conversations: list):
print("\nGenerated Outputs:\n" + "-" * 80)
for i, output in enumerate(outputs):
prompt = conversations[i]
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}\n")
print(f"Generated text: {generated_text!r}")
print("-" * 80)
def main():
llm = create_llm()
outputs, conversations = run_llm_chat(llm)
print_outputs(outputs, conversations)
if __name__ == "__main__":
main()
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/offline_inference/context_extension.py",
"license": "Apache License 2.0",
"lines": 55,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/models/language/pooling/test_nomic_max_model_len.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# ruff: noqa: SIM117
from typing import Any
import pytest
from ...utils import EmbedModelInfo
MODELS = [
EmbedModelInfo("nomic-ai/nomic-embed-text-v1"),
# EmbedModelInfo("nomic-ai/nomic-embed-text-v1.5"),
# EmbedModelInfo("nomic-ai/CodeRankEmbed"),
EmbedModelInfo("nomic-ai/nomic-embed-text-v2-moe"),
# EmbedModelInfo("Snowflake/snowflake-arctic-embed-m-long"),
]
rope_theta = 1000
factor = 4.0
original_max_position_embeddings = 2048
max_model_len = int(original_max_position_embeddings * factor)
@pytest.mark.parametrize("model_info", MODELS)
def test_default(model_info, vllm_runner):
with vllm_runner(
model_info.name, runner="pooling", max_model_len=None
) as vllm_model:
model_config = vllm_model.llm.llm_engine.model_config
if model_info.name == "nomic-ai/nomic-embed-text-v2-moe":
# For nomic-embed-text-v2-moe the length is set to 512
# by sentence_bert_config.json.
assert model_config.max_model_len == 512
else:
assert model_config.max_model_len == original_max_position_embeddings
@pytest.mark.parametrize("model_info", MODELS)
def test_set_max_model_len_legal(model_info, vllm_runner):
# set max_model_len <= 512
with vllm_runner(
model_info.name, runner="pooling", max_model_len=256
) as vllm_model:
model_config = vllm_model.llm.llm_engine.model_config
assert model_config.max_model_len == 256
# set 512 < max_model_len <= 2048
if model_info.name == "nomic-ai/nomic-embed-text-v2-moe":
# For nomic-embed-text-v2-moe the length is set to 512
# by sentence_bert_config.json.
with pytest.raises(ValueError):
with vllm_runner(model_info.name, runner="pooling", max_model_len=1024):
pass
else:
with vllm_runner(
model_info.name, runner="pooling", max_model_len=1024
) as vllm_model:
model_config = vllm_model.llm.llm_engine.model_config
assert model_config.max_model_len == 1024
@pytest.mark.parametrize("model_info", MODELS)
def test_set_max_model_len_illegal(model_info, vllm_runner):
# set max_model_len > 2048
with pytest.raises(ValueError):
with vllm_runner(model_info.name, runner="pooling", max_model_len=4096):
pass
# set max_model_len > 2048 by hf_overrides
hf_overrides = {"max_model_len": 4096}
with pytest.raises(ValueError):
with vllm_runner(
model_info.name,
runner="pooling",
max_model_len=None,
hf_overrides=hf_overrides,
):
pass
@pytest.mark.parametrize("model_info", MODELS)
def test_use_rope_scaling_legal(model_info, vllm_runner):
hf_overrides = {
"rope_parameters": {
"rope_theta": rope_theta,
"rope_type": "yarn",
"factor": factor,
"original_max_position_embeddings": original_max_position_embeddings,
},
"max_model_len": max_model_len,
}
with vllm_runner(
model_info.name, runner="pooling", max_model_len=None, hf_overrides=hf_overrides
):
pass
@pytest.mark.parametrize("model_info", MODELS)
def test_use_rope_scaling_illegal(model_info, vllm_runner):
hf_overrides: dict[str, Any] = {
"rope_parameters": {
"rope_theta": rope_theta,
"rope_type": "yarn",
"factor": factor,
"original_max_position_embeddings": original_max_position_embeddings,
},
}
# illegal max_model_len
with pytest.raises(ValueError):
with vllm_runner(
model_info.name,
runner="pooling",
max_model_len=max_model_len + 1,
hf_overrides=hf_overrides,
):
pass
hf_overrides = {
"rope_parameters": {
"rope_theta": rope_theta,
"rope_type": "yarn",
"factor": factor,
"original_max_position_embeddings": original_max_position_embeddings,
},
"max_model_len": max_model_len + 1,
}
# illegal max_model_len by hf_overrides
with pytest.raises(ValueError):
with vllm_runner(
model_info.name,
runner="pooling",
max_model_len=None,
hf_overrides=hf_overrides,
):
pass
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/models/language/pooling/test_nomic_max_model_len.py",
"license": "Apache License 2.0",
"lines": 118,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:examples/offline_inference/metrics.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from vllm import LLM, SamplingParams
from vllm.v1.metrics.reader import Counter, Gauge, Histogram, Vector
# Sample prompts.
prompts = [
"Hello, my name is",
"The president of the United States is",
"The capital of France is",
"The future of AI is",
]
# Create a sampling params object.
sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
def main():
# Create an LLM.
llm = LLM(model="facebook/opt-125m", disable_log_stats=False)
# Generate texts from the prompts.
outputs = llm.generate(prompts, sampling_params)
# Print the outputs.
print("-" * 50)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}\nGenerated text: {generated_text!r}")
print("-" * 50)
# Dump all metrics
for metric in llm.get_metrics():
if isinstance(metric, Gauge):
print(f"{metric.name} (gauge) = {metric.value}")
elif isinstance(metric, Counter):
print(f"{metric.name} (counter) = {metric.value}")
elif isinstance(metric, Vector):
print(f"{metric.name} (vector) = {metric.values}")
elif isinstance(metric, Histogram):
print(f"{metric.name} (histogram)")
print(f" sum = {metric.sum}")
print(f" count = {metric.count}")
for bucket_le, value in metric.buckets.items():
print(f" {bucket_le} = {value}")
if __name__ == "__main__":
main()
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/offline_inference/metrics.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/v1/metrics/reader.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from dataclasses import dataclass
from prometheus_client import REGISTRY
from prometheus_client import Metric as PromMetric
from prometheus_client.samples import Sample
@dataclass
class Metric:
"""A base class for prometheus metrics.
Each metric may be associated with key=value labels, and
in some cases a single vLLM instance may have multiple
metrics with the same name but different sets of labels.
"""
name: str
labels: dict[str, str]
@dataclass
class Counter(Metric):
"""A monotonically increasing integer counter."""
value: int
@dataclass
class Vector(Metric):
"""An ordered array of integer counters.
This type - which doesn't exist in Prometheus - models one very
specific metric, vllm:spec_decode_num_accepted_tokens_per_pos.
"""
values: list[int]
@dataclass
class Gauge(Metric):
"""A numerical value that can go up or down."""
value: float
@dataclass
class Histogram(Metric):
"""Observations recorded in configurable buckets.
Buckets are represented by a dictionary. The key is
the upper limit of the bucket, and the value is the
observed count in that bucket. A '+Inf' key always
exists.
The count property is the total count across all
buckets, identical to the count of the '+Inf' bucket.
The sum property is the total sum of all observed
values.
"""
count: int
sum: float
buckets: dict[str, int]
def get_metrics_snapshot() -> list[Metric]:
"""An API for accessing in-memory Prometheus metrics.
Example:
>>> for metric in llm.get_metrics():
... if isinstance(metric, Counter):
... print(f"{metric} = {metric.value}")
... elif isinstance(metric, Gauge):
... print(f"{metric} = {metric.value}")
... elif isinstance(metric, Histogram):
... print(f"{metric}")
... print(f" sum = {metric.sum}")
... print(f" count = {metric.count}")
... for bucket_le, value in metrics.buckets.items():
... print(f" {bucket_le} = {value}")
"""
collected: list[Metric] = []
for metric in REGISTRY.collect():
if not metric.name.startswith("vllm:"):
continue
if metric.type == "gauge":
samples = _get_samples(metric)
for s in samples:
collected.append(
Gauge(name=metric.name, labels=s.labels, value=s.value)
)
elif metric.type == "counter":
samples = _get_samples(metric, "_total")
if metric.name == "vllm:spec_decode_num_accepted_tokens_per_pos":
#
# Ugly vllm:num_accepted_tokens_per_pos special case.
#
# This metric is a vector of counters - for each spec
# decoding token position, we observe the number of
# accepted tokens using a Counter labeled with 'position'.
# We convert these into a vector of integer values.
#
for labels, values in _digest_num_accepted_by_pos_samples(samples):
collected.append(
Vector(name=metric.name, labels=labels, values=values)
)
else:
for s in samples:
collected.append(
Counter(name=metric.name, labels=s.labels, value=int(s.value))
)
elif metric.type == "histogram":
#
# A histogram has a number of '_bucket' samples where
# the 'le' label represents the upper limit of the bucket.
# We convert these bucketized values into a dict of values
# indexed by the value of the 'le' label. The 'le=+Inf'
# label is a special case, catching all values observed.
#
bucket_samples = _get_samples(metric, "_bucket")
count_samples = _get_samples(metric, "_count")
sum_samples = _get_samples(metric, "_sum")
for labels, buckets, count_value, sum_value in _digest_histogram(
bucket_samples, count_samples, sum_samples
):
collected.append(
Histogram(
name=metric.name,
labels=labels,
buckets=buckets,
count=count_value,
sum=sum_value,
)
)
else:
raise AssertionError(f"Unknown metric type {metric.type}")
return collected
def _get_samples(metric: PromMetric, suffix: str | None = None) -> list[Sample]:
name = (metric.name + suffix) if suffix is not None else metric.name
return [s for s in metric.samples if s.name == name]
def _strip_label(labels: dict[str, str], key_to_remove: str) -> dict[str, str]:
labels_copy = labels.copy()
labels_copy.pop(key_to_remove)
return labels_copy
def _digest_histogram(
bucket_samples: list[Sample], count_samples: list[Sample], sum_samples: list[Sample]
) -> list[tuple[dict[str, str], dict[str, int], int, float]]:
#
# In the case of DP, we have an indigestable
# per-bucket-per-engine count as a list of labelled
# samples, along with total and sum samples
#
# bucket_samples (in):
# labels = {bucket: 100, idx: 0}, value = 2
# labels = {bucket: 200, idx: 0}, value = 4
# labels = {bucket: Inf, idx: 0}, value = 10
# labels = {bucket: 100, idx: 1}, value = 1
# labels = {bucket: 200, idx: 2}, value = 5
# labels = {bucket: Inf, idx: 3}, value = 7
# count_samples (in):
# labels = {idx: 0}, value = 10
# labels = {idx: 1}, value = 7
# sum_samples (in):
# labels = {idx: 0}, value = 2000
# labels = {idx: 1}, value = 1200
#
# output: [
# {idx: 0}, {"100": 2, "200": 4, "Inf": 10}, 10, 2000
# {idx: 1}, {"100": 1, "200": 5, "Inf": 7}, 7, 1200
# ]
buckets_by_labels: dict[frozenset[tuple[str, str]], dict[str, int]] = {}
for s in bucket_samples:
bucket = s.labels["le"]
labels_key = frozenset(_strip_label(s.labels, "le").items())
if labels_key not in buckets_by_labels:
buckets_by_labels[labels_key] = {}
buckets_by_labels[labels_key][bucket] = int(s.value)
counts_by_labels: dict[frozenset[tuple[str, str]], int] = {}
for s in count_samples:
labels_key = frozenset(s.labels.items())
counts_by_labels[labels_key] = int(s.value)
sums_by_labels: dict[frozenset[tuple[str, str]], float] = {}
for s in sum_samples:
labels_key = frozenset(s.labels.items())
sums_by_labels[labels_key] = s.value
assert (
set(buckets_by_labels.keys())
== set(counts_by_labels.keys())
== set(sums_by_labels.keys())
)
output = []
label_keys = list(buckets_by_labels.keys())
for k in label_keys:
labels = dict(k)
output.append(
(labels, buckets_by_labels[k], counts_by_labels[k], sums_by_labels[k])
)
return output
def _digest_num_accepted_by_pos_samples(
samples: list[Sample],
) -> list[tuple[dict[str, str], list[int]]]:
#
# In the case of DP, we have an indigestable
# per-position-per-engine count as a list of
# labelled samples
#
# samples (in):
# labels = {pos: 0, idx: 0}, value = 10
# labels = {pos: 1, idx: 0}, value = 7
# labels = {pos: 2, idx: 0}, value = 2
# labels = {pos: 0, idx: 1}, value = 5
# labels = {pos: 1, idx: 1}, value = 3
# labels = {pos: 2, idx: 1}, value = 1
#
# output: [
# {idx: 0}, [10, 7, 2]
# {idx: 1}, [5, 3, 1]
# ]
#
max_pos = 0
values_by_labels: dict[frozenset[tuple[str, str]], dict[int, int]] = {}
for s in samples:
position = int(s.labels["position"])
max_pos = max(max_pos, position)
labels_key = frozenset(_strip_label(s.labels, "position").items())
if labels_key not in values_by_labels:
values_by_labels[labels_key] = {}
values_by_labels[labels_key][position] = int(s.value)
output = []
for labels_key, values_by_position in values_by_labels.items():
labels = dict(labels_key)
values = [0] * (max_pos + 1)
for pos, val in values_by_position.items():
values[pos] = val
output.append((labels, values))
return output
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/metrics/reader.py",
"license": "Apache License 2.0",
"lines": 215,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:docs/mkdocs/hooks/generate_examples.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import itertools
import logging
from dataclasses import dataclass
from functools import cached_property
from pathlib import Path
from typing import Literal
import regex as re
logger = logging.getLogger("mkdocs")
ROOT_DIR = Path(__file__).parent.parent.parent.parent
ROOT_DIR_RELATIVE = "../../../../.."
EXAMPLE_DIR = ROOT_DIR / "examples"
EXAMPLE_DOC_DIR = ROOT_DIR / "docs/examples"
def title(text: str) -> str:
# Default title case
text = text.replace("_", " ").replace("/", " - ").title()
# Custom substitutions
subs = {
"io": "IO",
"api": "API",
"cli": "CLI",
"cpu": "CPU",
"llm": "LLM",
"mae": "MAE",
"ner": "NER",
"tpu": "TPU",
"gguf": "GGUF",
"lora": "LoRA",
"rlhf": "RLHF",
"vllm": "vLLM",
"openai": "OpenAI",
"lmcache": "LMCache",
"multilora": "MultiLoRA",
"mlpspeculator": "MLPSpeculator",
r"fp\d+": lambda x: x.group(0).upper(), # e.g. fp16, fp32
r"int\d+": lambda x: x.group(0).upper(), # e.g. int8, int16
}
for pattern, repl in subs.items():
text = re.sub(rf"\b{pattern}\b", repl, text, flags=re.IGNORECASE)
return text
@dataclass
class Example:
"""
Example class for generating documentation content from a given path.
Attributes:
path (Path): The path to the main directory or file.
category (str): The category of the document.
Properties::
main_file() -> Path | None: Determines the main file in the given path.
other_files() -> list[Path]: Determines other files in the directory excluding
the main file.
title() -> str: Determines the title of the document.
Methods:
generate() -> str: Generates the documentation content.
"""
path: Path
category: str
@cached_property
def main_file(self) -> Path | None:
"""Determines the main file in the given path.
If path is a file, it returns the path itself. If path is a directory, it
searches for Markdown files (*.md) in the directory and returns the first one
found. If no Markdown files are found, it returns None."""
# Single file example
if self.path.is_file():
return self.path
# Multi file example with a README
if md_paths := list(self.path.glob("*.md")):
return md_paths[0]
# Multi file example without a README
return None
@cached_property
def other_files(self) -> list[Path]:
"""Determine other files in the directory excluding the main file.
If path is a file, it returns an empty list. Otherwise, it returns every file
in the directory except the main file in a list."""
# Single file example
if self.path.is_file():
return []
# Multi file example
is_other_file = lambda file: file.is_file() and file != self.main_file
return sorted(file for file in self.path.rglob("*") if is_other_file(file))
@cached_property
def is_code(self) -> bool:
return self.main_file is not None and self.main_file.suffix != ".md"
@cached_property
def title(self) -> str:
# Generate title from filename if no main md file found
if self.main_file is None or self.is_code:
return title(self.path.stem)
# Specify encoding for building on Windows
with open(self.main_file, encoding="utf-8") as f:
first_line = f.readline().strip()
match = re.match(r"^#\s+(?P<title>.+)$", first_line)
if match:
return match.group("title")
raise ValueError(f"Title not found in {self.main_file}")
def fix_relative_links(self, content: str) -> str:
"""
Fix relative links in markdown content by converting them to gh-file
format.
Args:
content (str): The markdown content to process
Returns:
str: Content with relative links converted to gh-file format
"""
# Regex to match markdown links [text](relative_path)
# This matches links that don't start with http, https, ftp, or #
link_pattern = r"\[([^\]]*)\]\((?!(?:https?|ftp)://|#)([^)]+)\)"
def replace_link(match):
link_text = match.group(1)
relative_path = match.group(2)
# Make relative to repo root
gh_file = (self.main_file.parent / relative_path).resolve()
gh_file = gh_file.relative_to(ROOT_DIR)
# Make GitHub URL
url = "https://github.com/vllm-project/vllm/"
url += "tree/main" if self.path.is_dir() else "blob/main"
gh_url = f"{url}/{gh_file}"
return f"[{link_text}]({gh_url})"
return re.sub(link_pattern, replace_link, content)
def generate(self) -> str:
content = f"# {self.title}\n\n"
url = "https://github.com/vllm-project/vllm/"
url += "tree/main" if self.path.is_dir() else "blob/main"
content += f"Source <{url}/{self.path.relative_to(ROOT_DIR)}>.\n\n"
# Use long code fence to avoid issues with
# included files containing code fences too
code_fence = "``````"
if self.main_file is not None:
# Single file example or multi file example with a README
if self.is_code:
content += (
f"{code_fence}{self.main_file.suffix[1:]}\n"
f'--8<-- "{self.main_file}"\n'
f"{code_fence}\n"
)
else:
with open(self.main_file, encoding="utf-8") as f:
# Skip the title from md snippets as it's been included above
main_content = f.readlines()[1:]
content += self.fix_relative_links("".join(main_content))
content += "\n"
else:
# Multi file example without a README
for file in self.other_files:
file_title = title(str(file.relative_to(self.path).with_suffix("")))
content += f"## {file_title}\n\n"
content += (
f'{code_fence}{file.suffix[1:]}\n--8<-- "{file}"\n{code_fence}\n\n'
)
return content
if not self.other_files:
return content
content += "## Example materials\n\n"
for file in self.other_files:
content += f'??? abstract "{file.relative_to(self.path)}"\n'
if file.suffix != ".md":
content += f" {code_fence}{file.suffix[1:]}\n"
content += f' --8<-- "{file}"\n'
if file.suffix != ".md":
content += f" {code_fence}\n"
return content
def on_startup(command: Literal["build", "gh-deploy", "serve"], dirty: bool):
logger.info("Generating example documentation")
logger.debug("Root directory: %s", ROOT_DIR.resolve())
logger.debug("Example directory: %s", EXAMPLE_DIR.resolve())
logger.debug("Example document directory: %s", EXAMPLE_DOC_DIR.resolve())
# Create the EXAMPLE_DOC_DIR if it doesn't exist
if not EXAMPLE_DOC_DIR.exists():
EXAMPLE_DOC_DIR.mkdir(parents=True)
categories = sorted(p for p in EXAMPLE_DIR.iterdir() if p.is_dir())
examples = []
glob_patterns = ["*.py", "*.md", "*.sh"]
# Find categorised examples
for category in categories:
logger.info("Processing category: %s", category.stem)
globs = [category.glob(pattern) for pattern in glob_patterns]
for path in itertools.chain(*globs):
examples.append(Example(path, category.stem))
# Find examples in subdirectories
globs = [category.glob(f"*/{pattern}") for pattern in glob_patterns]
for path in itertools.chain(*globs):
examples.append(Example(path.parent, category.stem))
# Generate the example documentation
for example in sorted(examples, key=lambda e: e.path.stem):
example_name = f"{example.path.stem}.md"
doc_path = EXAMPLE_DOC_DIR / example.category / example_name
if not doc_path.parent.exists():
doc_path.parent.mkdir(parents=True)
# Specify encoding for building on Windows
with open(doc_path, "w+", encoding="utf-8") as f:
f.write(example.generate())
logger.debug("Example generated: %s", doc_path.relative_to(ROOT_DIR))
logger.info("Total examples generated: %d", len(examples))
| {
"repo_id": "vllm-project/vllm",
"file_path": "docs/mkdocs/hooks/generate_examples.py",
"license": "Apache License 2.0",
"lines": 196,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:docs/mkdocs/hooks/remove_announcement.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import os
from pathlib import Path
from typing import Literal
def on_startup(command: Literal["build", "gh-deploy", "serve"], dirty: bool):
# see https://docs.readthedocs.io/en/stable/reference/environment-variables.html # noqa
if os.getenv("READTHEDOCS_VERSION_TYPE") == "tag":
# remove the warning banner if the version is a tagged release
mkdocs_dir = Path(__file__).parent.parent
announcement_path = mkdocs_dir / "overrides/main.html"
# The file might be removed already if the build is triggered multiple
# times (readthedocs build both HTML and PDF versions separately)
if announcement_path.exists():
os.remove(announcement_path)
| {
"repo_id": "vllm-project/vllm",
"file_path": "docs/mkdocs/hooks/remove_announcement.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:docs/mkdocs/hooks/url_schemes.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
MkDocs hook to enable the following links to render correctly:
- Relative file links outside of the `docs/` directory, e.g.:
- [Text](../some_file.py)
- [Directory](../../some_directory/)
- GitHub URLs for issues, pull requests, and projects, e.g.:
- Adds GitHub icon before links
- Replaces raw links with descriptive text,
e.g. <...pull/123> -> [Pull Request #123](.../pull/123)
- Works for external repos too by including the `owner/repo` in the link title
The goal is to simplify cross-referencing common GitHub resources
in project docs.
"""
from pathlib import Path
import regex as re
from mkdocs.config.defaults import MkDocsConfig
from mkdocs.structure.files import Files
from mkdocs.structure.pages import Page
ROOT_DIR = Path(__file__).parent.parent.parent.parent.resolve()
DOC_DIR = ROOT_DIR / "docs"
gh_icon = ":octicons-mark-github-16:"
# Regex pieces
TITLE = r"(?P<title>[^\[\]<>]+?)"
REPO = r"(?P<repo>.+?/.+?)"
TYPE = r"(?P<type>issues|pull|projects)"
NUMBER = r"(?P<number>\d+)"
PATH = r"(?P<path>[^\s]+?)"
FRAGMENT = r"(?P<fragment>#[^\s]+)?"
URL = f"https://github.com/{REPO}/{TYPE}/{NUMBER}{FRAGMENT}"
RELATIVE = rf"(?!(https?|ftp)://|#){PATH}{FRAGMENT}"
# Common titles to use for GitHub links when none is provided in the link.
TITLES = {"issues": "Issue ", "pull": "Pull Request ", "projects": "Project "}
# Regex to match GitHub issue, PR, and project links with optional titles.
github_link = re.compile(rf"(\[{TITLE}\]\(|<){URL}(\)|>)")
# Regex to match relative file links with optional titles.
relative_link = re.compile(rf"\[{TITLE}\]\({RELATIVE}\)")
def on_page_markdown(
markdown: str, *, page: Page, config: MkDocsConfig, files: Files
) -> str:
def replace_relative_link(match: re.Match) -> str:
"""Replace relative file links with URLs if they point outside the docs dir."""
title = match.group("title")
path = match.group("path")
path = (Path(page.file.abs_src_path).parent / path).resolve()
fragment = match.group("fragment") or ""
# Check if the path exists and is outside the docs dir
if not path.exists() or path.is_relative_to(DOC_DIR):
return match.group(0)
# Files and directories have different URL schemes on GitHub
slug = "tree/main" if path.is_dir() else "blob/main"
path = path.relative_to(ROOT_DIR)
url = f"https://github.com/vllm-project/vllm/{slug}/{path}{fragment}"
return f"[{gh_icon} {title}]({url})"
def replace_github_link(match: re.Match) -> str:
"""Replace GitHub issue, PR, and project links with enhanced Markdown links."""
repo = match.group("repo")
type = match.group("type")
number = match.group("number")
# Title and fragment could be None
title = match.group("title") or ""
fragment = match.group("fragment") or ""
# Use default titles for raw links
if not title:
title = TITLES[type]
if "vllm-project" not in repo:
title += repo
title += f"#{number}"
url = f"https://github.com/{repo}/{type}/{number}{fragment}"
return f"[{gh_icon} {title}]({url})"
markdown = relative_link.sub(replace_relative_link, markdown)
markdown = github_link.sub(replace_github_link, markdown)
return markdown
| {
"repo_id": "vllm-project/vllm",
"file_path": "docs/mkdocs/hooks/url_schemes.py",
"license": "Apache License 2.0",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/multimodal/test_image.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from pathlib import Path
import numpy as np
import pytest
from PIL import Image, ImageChops
from vllm.multimodal.image import convert_image_mode
pytestmark = pytest.mark.cpu_test
ASSETS_DIR = Path(__file__).parent / "assets"
assert ASSETS_DIR.exists()
def test_rgb_to_rgb():
# Start with an RGB image.
original_image = Image.open(ASSETS_DIR / "image1.png").convert("RGB")
converted_image = convert_image_mode(original_image, "RGB")
# RGB to RGB should be a no-op.
diff = ImageChops.difference(original_image, converted_image)
assert diff.getbbox() is None
def test_rgba_to_rgb():
original_image = Image.open(ASSETS_DIR / "rgba.png")
original_image_numpy = np.array(original_image)
converted_image = convert_image_mode(original_image, "RGB")
converted_image_numpy = np.array(converted_image)
for i in range(original_image_numpy.shape[0]):
for j in range(original_image_numpy.shape[1]):
# Verify that all transparent pixels are converted to white.
if original_image_numpy[i][j][3] == 0:
assert converted_image_numpy[i][j][0] == 255
assert converted_image_numpy[i][j][1] == 255
assert converted_image_numpy[i][j][2] == 255
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/multimodal/test_image.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/entrypoints/openai/test_tensorizer_entrypoint.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import gc
import os
import tempfile
import openai
import pytest
import pytest_asyncio
import torch.cuda
from vllm.engine.arg_utils import EngineArgs
from vllm.model_executor.model_loader.tensorizer import (
TensorizerConfig,
tensorize_lora_adapter,
tensorize_vllm_model,
)
from ...utils import RemoteOpenAIServer
MODEL_NAME = "unsloth/llama-3.2-1b-Instruct"
LORA_PATH = "davzoku/finqa_adapter_1b"
def _cleanup():
gc.collect()
torch.cuda.empty_cache()
@pytest.fixture(autouse=True)
def cleanup():
_cleanup()
@pytest.fixture(scope="module")
def tmp_dir():
with tempfile.TemporaryDirectory() as path:
yield path
@pytest.fixture(scope="module")
def model_uri(tmp_dir):
yield f"{tmp_dir}/model.tensors"
@pytest.fixture(scope="module")
def tensorize_model_and_lora(tmp_dir, model_uri):
tensorizer_config = TensorizerConfig(tensorizer_uri=model_uri, lora_dir=tmp_dir)
args = EngineArgs(model=MODEL_NAME)
tensorize_lora_adapter(LORA_PATH, tensorizer_config)
tensorize_vllm_model(args, tensorizer_config)
# Manually invoke a _cleanup() here, as the cleanup()
# fixture won't be guaranteed to be called after this
# when this fixture is used for a test
_cleanup()
yield
@pytest.fixture(scope="module")
def server(model_uri, tensorize_model_and_lora):
# In this case, model_uri is a directory with a model.tensors
# file and all necessary model artifacts, particularly a
# HF `config.json` file. In this case, Tensorizer can infer the
# `TensorizerConfig` so --model-loader-extra-config can be completely
# omitted.
## Start OpenAI API server
args = [
"--load-format",
"tensorizer",
"--served-model-name",
MODEL_NAME,
"--enable-lora",
]
model_dir = os.path.dirname(model_uri)
with RemoteOpenAIServer(model_dir, args) as remote_server:
yield remote_server
@pytest_asyncio.fixture
async def client(server):
async with server.get_async_client() as async_client:
yield async_client
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_single_completion(client: openai.AsyncOpenAI, model_name: str):
_cleanup()
completion = await client.completions.create(
model=model_name, prompt="Hello, my name is", max_tokens=5, temperature=0.0
)
assert completion.id is not None
assert completion.choices is not None and len(completion.choices) == 1
assert completion.model == MODEL_NAME
assert len(completion.choices) == 1
assert len(completion.choices[0].text) >= 5
assert completion.choices[0].finish_reason == "length"
assert completion.usage == openai.types.CompletionUsage(
completion_tokens=5, prompt_tokens=6, total_tokens=11
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/entrypoints/openai/test_tensorizer_entrypoint.py",
"license": "Apache License 2.0",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/entrypoints/openai/tool_parsers/test_llama4_pythonic_tool_parser.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from unittest.mock import MagicMock, patch
import pytest
from tests.entrypoints.openai.tool_parsers.utils import (
run_tool_extraction,
run_tool_extraction_streaming,
)
from vllm.entrypoints.openai.engine.protocol import FunctionCall
from vllm.tokenizers import TokenizerLike
from vllm.tool_parsers import ToolParser, ToolParserManager
# Test cases similar to pythonic parser but with Llama4 specific format
SIMPLE_FUNCTION_OUTPUT = "[get_weather(city='LA', metric='C')]"
SIMPLE_FUNCTION_CALL = FunctionCall(
name="get_weather",
arguments='{"city": "LA", "metric": "C"}',
)
MORE_TYPES_FUNCTION_OUTPUT = (
"[register_user(name='Doe', "
"age=9, "
"address={'city': 'LA', 'state': 'CA'}, "
"role=None, "
"passed_test=True, "
"aliases=['John', 'Johnny'])]"
)
MORE_TYPES_FUNCTION_CALL = FunctionCall(
name="register_user",
arguments='{"name": "Doe", '
'"age": 9, '
'"address": {"city": "LA", "state": "CA"}, '
'"role": null, '
'"passed_test": true, '
'"aliases": ["John", "Johnny"]}',
)
PARAMETERLESS_FUNCTION_OUTPUT = "[get_weather()]"
PARAMETERLESS_FUNCTION_CALL = FunctionCall(
name="get_weather",
arguments="{}",
)
EMPTY_DICT_FUNCTION_OUTPUT = "[do_something_cool(additional_data={})]"
EMPTY_DICT_FUNCTION_CALL = FunctionCall(
name="do_something_cool",
arguments='{"additional_data": {}}',
)
EMPTY_LIST_FUNCTION_OUTPUT = "[do_something_cool(steps=[])]"
EMPTY_LIST_FUNCTION_CALL = FunctionCall(
name="do_something_cool",
arguments='{"steps": []}',
)
ESCAPED_STRING_FUNCTION_OUTPUT = (
r"[get_weather(city='Martha\'s Vineyard', metric='\"cool units\"')]"
)
ESCAPED_STRING_FUNCTION_CALL = FunctionCall(
name="get_weather",
arguments='{"city": "Martha\'s Vineyard", "metric": "\\"cool units\\""}',
)
PYTHON_TAG_FUNCTION_OUTPUT = (
"<|python_start|>[get_weather(city='LA', metric='C')]<|python_end|>"
)
@pytest.mark.parametrize("streaming", [True, False])
def test_no_tool_call(streaming: bool, default_tokenizer: TokenizerLike):
tool_parser: ToolParser = ToolParserManager.get_tool_parser("llama4_pythonic")(
default_tokenizer
)
model_output = "How can I help you today?"
content, tool_calls = run_tool_extraction(
tool_parser, model_output, streaming=streaming
)
assert content == model_output
assert len(tool_calls) == 0
test_str = "<|python_start|>"
test_str += "[get_weather(city='LA', metric='C'),"
test_str += "register_user(name='Doe', age=9)]"
TEST_CASES = [
pytest.param(
True,
ESCAPED_STRING_FUNCTION_OUTPUT,
[ESCAPED_STRING_FUNCTION_CALL],
id="simple_streaming",
),
pytest.param(
False, SIMPLE_FUNCTION_OUTPUT, [SIMPLE_FUNCTION_CALL], id="simple_nonstreaming"
),
pytest.param(
True,
MORE_TYPES_FUNCTION_OUTPUT,
[MORE_TYPES_FUNCTION_CALL],
id="more_types_streaming",
),
pytest.param(
False,
MORE_TYPES_FUNCTION_OUTPUT,
[MORE_TYPES_FUNCTION_CALL],
id="more_types_nonstreaming",
),
pytest.param(
True,
PARAMETERLESS_FUNCTION_OUTPUT,
[PARAMETERLESS_FUNCTION_CALL],
id="parameterless_streaming",
),
pytest.param(
False,
PARAMETERLESS_FUNCTION_OUTPUT,
[PARAMETERLESS_FUNCTION_CALL],
id="parameterless_nonstreaming",
),
pytest.param(
True,
EMPTY_DICT_FUNCTION_OUTPUT,
[EMPTY_DICT_FUNCTION_CALL],
id="empty_dict_streaming",
),
pytest.param(
False,
EMPTY_DICT_FUNCTION_OUTPUT,
[EMPTY_DICT_FUNCTION_CALL],
id="empty_dict_nonstreaming",
),
pytest.param(
True,
EMPTY_LIST_FUNCTION_OUTPUT,
[EMPTY_LIST_FUNCTION_CALL],
id="empty_list_streaming",
),
pytest.param(
False,
EMPTY_LIST_FUNCTION_OUTPUT,
[EMPTY_LIST_FUNCTION_CALL],
id="empty_list_nonstreaming",
),
pytest.param(
True,
ESCAPED_STRING_FUNCTION_OUTPUT,
[ESCAPED_STRING_FUNCTION_CALL],
id="escaped_string_streaming",
),
pytest.param(
False,
ESCAPED_STRING_FUNCTION_OUTPUT,
[ESCAPED_STRING_FUNCTION_CALL],
id="escaped_string_nonstreaming",
),
pytest.param(
True,
"[get_weather(city='LA',metric='C'),register_user(name='Doe',age=9)]",
[
SIMPLE_FUNCTION_CALL,
FunctionCall(name="register_user", arguments='{"name": "Doe", "age": 9}'),
],
id="parallel_calls_streaming",
),
pytest.param(
False,
"[get_weather(city='LA',metric='C'),register_user(name='Doe',age=9)]",
[
SIMPLE_FUNCTION_CALL,
FunctionCall(name="register_user", arguments='{"name": "Doe", "age": 9}'),
],
id="parallel_calls_nonstreaming",
),
pytest.param(
True,
PYTHON_TAG_FUNCTION_OUTPUT,
[SIMPLE_FUNCTION_CALL],
id="python_tag_streaming",
),
pytest.param(
False,
PYTHON_TAG_FUNCTION_OUTPUT,
[SIMPLE_FUNCTION_CALL],
id="python_tag_nonstreaming",
),
pytest.param(
True,
test_str,
[
SIMPLE_FUNCTION_CALL,
FunctionCall(name="register_user", arguments='{"name": "Doe", "age": 9}'),
],
id="parallel_calls_streaming",
),
pytest.param(
False,
"<|python_start|>[get_weather(city='LA', metric='C'), "
"register_user(name='Doe', age=9)]",
[
SIMPLE_FUNCTION_CALL,
FunctionCall(name="register_user", arguments='{"name": "Doe", "age": 9}'),
],
id="parallel_calls_nonstreaming",
),
]
@pytest.mark.parametrize("streaming, model_output, expected_tool_calls", TEST_CASES)
def test_tool_call(
streaming: bool,
model_output: str,
expected_tool_calls: list[FunctionCall],
default_tokenizer: TokenizerLike,
):
tool_parser: ToolParser = ToolParserManager.get_tool_parser("llama4_pythonic")(
default_tokenizer
)
content, tool_calls = run_tool_extraction(
tool_parser, model_output, streaming=streaming
)
assert len(tool_calls) == len(expected_tool_calls)
for actual, expected in zip(tool_calls, expected_tool_calls):
assert actual.type == "function"
assert actual.function == expected
def test_streaming_tool_call_with_large_steps(default_tokenizer: TokenizerLike):
tool_parser: ToolParser = ToolParserManager.get_tool_parser("llama4_pythonic")(
default_tokenizer
)
model_output_deltas = [
"<|python_start|>[get_weather(city='LA', metric='C'), "
"get_weather(), "
"do_something_cool(steps=[])]<|python_end|>",
]
reconstructor = run_tool_extraction_streaming(
tool_parser, model_output_deltas, assert_one_tool_per_delta=False
)
assert reconstructor.other_content == ""
assert len(reconstructor.tool_calls) == 3
assert reconstructor.tool_calls[0].function == SIMPLE_FUNCTION_CALL
assert reconstructor.tool_calls[1].function == PARAMETERLESS_FUNCTION_CALL
assert reconstructor.tool_calls[2].function == EMPTY_LIST_FUNCTION_CALL
@pytest.mark.parametrize("streaming", [False])
def test_regex_timeout_handling(streaming: bool, default_tokenizer: TokenizerLike):
"""test regex timeout is handled gracefully"""
tool_parser: ToolParser = ToolParserManager.get_tool_parser("llama4_pythonic")(
default_tokenizer
)
fake_problematic_input = "hello world[A(A=" + "\t)A(A=,\t" * 2
# create a mock regex that raises TimeoutError
mock_regex = MagicMock()
mock_regex.match.side_effect = TimeoutError("Regex timeout")
with patch.object(tool_parser, "TOOL_CALL_REGEX", mock_regex):
content, tool_calls = run_tool_extraction(
tool_parser, fake_problematic_input, streaming=streaming
)
# should treat as regular text when regex times out
assert content == fake_problematic_input
assert len(tool_calls) == 0
mock_regex.match.assert_called_once()
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/entrypoints/openai/tool_parsers/test_llama4_pythonic_tool_parser.py",
"license": "Apache License 2.0",
"lines": 245,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:examples/offline_inference/automatic_prefix_caching.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Demonstration script for Automatic Prefix Caching (APC) in vLLM.
Automatic Prefix Caching (APC) allows the vLLM engine to reuse cached
KV (key-value) pairs from previous prompts if a new query shares the same
prefix. This reduces redundant computation and improves inference speed.
To enable APC, set `enable_prefix_caching=True` when initializing the
vLLM engine.
This script uses a long Markdown table as the shared prompt prefix and
compares the generation time for two queries that share the same prefix
but ask different questions.
Run:
python examples/offline_inference/automatic_prefix_caching.py
"""
import time
from vllm import LLM, SamplingParams
# ruff: noqa: E501
# A prompt containing a large markdown table. The table is randomly generated by GPT-4.
LONG_PROMPT = (
"You are a helpful assistant in recognizes the content of tables in markdown format. Here is a table as follows.\n# Table\n"
"""
| ID | Name | Age | Occupation | Country | Email | Phone Number | Address |
|-----|---------------|-----|---------------|---------------|------------------------|----------------|------------------------------|
| 1 | John Doe | 29 | Engineer | USA | john.doe@example.com | 555-1234 | 123 Elm St, Springfield, IL |
| 2 | Jane Smith | 34 | Doctor | Canada | jane.smith@example.com | 555-5678 | 456 Oak St, Toronto, ON |
| 3 | Alice Johnson | 27 | Teacher | UK | alice.j@example.com | 555-8765 | 789 Pine St, London, UK |
| 4 | Bob Brown | 45 | Artist | Australia | bob.b@example.com | 555-4321 | 321 Maple St, Sydney, NSW |
| 5 | Carol White | 31 | Scientist | New Zealand | carol.w@example.com | 555-6789 | 654 Birch St, Wellington, NZ |
| 6 | Dave Green | 28 | Lawyer | Ireland | dave.g@example.com | 555-3456 | 987 Cedar St, Dublin, IE |
| 7 | Emma Black | 40 | Musician | USA | emma.b@example.com | 555-1111 | 246 Ash St, New York, NY |
| 8 | Frank Blue | 37 | Chef | Canada | frank.b@example.com | 555-2222 | 135 Spruce St, Vancouver, BC |
| 9 | Grace Yellow | 50 | Engineer | UK | grace.y@example.com | 555-3333 | 864 Fir St, Manchester, UK |
| 10 | Henry Violet | 32 | Artist | Australia | henry.v@example.com | 555-4444 | 753 Willow St, Melbourne, VIC|
| 11 | Irene Orange | 26 | Scientist | New Zealand | irene.o@example.com | 555-5555 | 912 Poplar St, Auckland, NZ |
| 12 | Jack Indigo | 38 | Teacher | Ireland | jack.i@example.com | 555-6666 | 159 Elm St, Cork, IE |
| 13 | Karen Red | 41 | Lawyer | USA | karen.r@example.com | 555-7777 | 357 Cedar St, Boston, MA |
| 14 | Leo Brown | 30 | Chef | Canada | leo.b@example.com | 555-8888 | 246 Oak St, Calgary, AB |
| 15 | Mia Green | 33 | Musician | UK | mia.g@example.com | 555-9999 | 975 Pine St, Edinburgh, UK |
| 16 | Noah Yellow | 29 | Doctor | Australia | noah.y@example.com | 555-0000 | 864 Birch St, Brisbane, QLD |
| 17 | Olivia Blue | 35 | Engineer | New Zealand | olivia.b@example.com | 555-1212 | 753 Maple St, Hamilton, NZ |
| 18 | Peter Black | 42 | Artist | Ireland | peter.b@example.com | 555-3434 | 912 Fir St, Limerick, IE |
| 19 | Quinn White | 28 | Scientist | USA | quinn.w@example.com | 555-5656 | 159 Willow St, Seattle, WA |
| 20 | Rachel Red | 31 | Teacher | Canada | rachel.r@example.com | 555-7878 | 357 Poplar St, Ottawa, ON |
| 21 | Steve Green | 44 | Lawyer | UK | steve.g@example.com | 555-9090 | 753 Elm St, Birmingham, UK |
| 22 | Tina Blue | 36 | Musician | Australia | tina.b@example.com | 555-1213 | 864 Cedar St, Perth, WA |
| 23 | Umar Black | 39 | Chef | New Zealand | umar.b@example.com | 555-3435 | 975 Spruce St, Christchurch, NZ|
| 24 | Victor Yellow | 43 | Engineer | Ireland | victor.y@example.com | 555-5657 | 246 Willow St, Galway, IE |
| 25 | Wendy Orange | 27 | Artist | USA | wendy.o@example.com | 555-7879 | 135 Elm St, Denver, CO |
| 26 | Xavier Green | 34 | Scientist | Canada | xavier.g@example.com | 555-9091 | 357 Oak St, Montreal, QC |
| 27 | Yara Red | 41 | Teacher | UK | yara.r@example.com | 555-1214 | 975 Pine St, Leeds, UK |
| 28 | Zack Blue | 30 | Lawyer | Australia | zack.b@example.com | 555-3436 | 135 Birch St, Adelaide, SA |
| 29 | Amy White | 33 | Musician | New Zealand | amy.w@example.com | 555-5658 | 159 Maple St, Wellington, NZ |
| 30 | Ben Black | 38 | Chef | Ireland | ben.b@example.com | 555-7870 | 246 Fir St, Waterford, IE |
"""
)
def get_generation_time(llm, sampling_params, prompts):
# time the generation
start_time = time.time()
output = llm.generate(prompts, sampling_params=sampling_params)
end_time = time.time()
# print the output and generation time
print("-" * 30)
print(f"Output: {output[0].outputs[0].text}")
print(f"Generation time: {end_time - start_time} seconds.")
print("-" * 30)
def main():
# set enable_prefix_caching=True to enable APC
llm = LLM(model="lmsys/longchat-13b-16k", enable_prefix_caching=True)
sampling_params = SamplingParams(temperature=0, max_tokens=100)
# Querying the age of John Doe
get_generation_time(
llm,
sampling_params,
LONG_PROMPT
+ "Question: what is the age of John Doe? Your answer: The age of John Doe is ",
)
# Querying the age of Zack Blue
# This query will be faster since vllm avoids computing the KV cache of LONG_PROMPT again.
get_generation_time(
llm,
sampling_params,
LONG_PROMPT
+ "Question: what is the age of Zack Blue? Your answer: The age of Zack Blue is ",
)
if __name__ == "__main__":
main()
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/offline_inference/automatic_prefix_caching.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/test_outputs.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
from vllm.outputs import RequestOutput
pytestmark = pytest.mark.cpu_test
def test_request_output_forward_compatible():
output = RequestOutput(
request_id="test_request_id",
prompt="test prompt",
prompt_token_ids=[1, 2, 3],
prompt_logprobs=None,
outputs=[],
finished=False,
example_arg_added_in_new_version="some_value",
)
assert output is not None
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/test_outputs.py",
"license": "Apache License 2.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_executor/models/falcon_h1.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Inference-only FalconH1 model."""
from collections.abc import Iterable
from itertools import islice
import torch
from torch import nn
from transformers import FalconH1Config
from vllm.compilation.decorators import support_torch_compile
from vllm.config import CacheConfig, ModelConfig, VllmConfig
from vllm.distributed import get_tensor_model_parallel_world_size
from vllm.distributed.parallel_state import get_pp_group
from vllm.model_executor.layers.activation import SiluAndMul
from vllm.model_executor.layers.attention import Attention
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.linear import (
MergedColumnParallelLinear,
QKVParallelLinear,
RowParallelLinear,
)
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.mamba.mamba_mixer2 import MambaMixer2
from vllm.model_executor.layers.mamba.mamba_utils import (
MambaStateCopyFunc,
MambaStateCopyFuncCalculator,
MambaStateDtypeCalculator,
MambaStateShapeCalculator,
)
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.rotary_embedding import get_rope
from vllm.model_executor.layers.vocab_parallel_embedding import (
ParallelLMHead,
VocabParallelEmbedding,
)
from vllm.model_executor.model_loader.weight_utils import (
default_weight_loader,
maybe_remap_kv_scale_name,
)
from vllm.sequence import IntermediateTensors
from vllm.transformers_utils.config import set_default_rope_theta
from .interfaces import (
HasInnerState,
IsHybrid,
SupportsLoRA,
SupportsMambaPrefixCaching,
SupportsPP,
)
from .utils import (
PPMissingLayer,
is_pp_missing_parameter,
make_empty_intermediate_tensors_factory,
make_layers,
maybe_prefix,
)
class FalconH1MLP(nn.Module):
def __init__(
self,
config: FalconH1Config,
quant_config: QuantizationConfig | None = None,
bias: bool = False,
prefix: str = "",
) -> None:
super().__init__()
self.gate_up_proj = MergedColumnParallelLinear(
input_size=config.hidden_size,
output_sizes=[config.intermediate_size] * 2,
bias=bias,
quant_config=quant_config,
prefix=f"{prefix}.gate_up_proj",
)
self.down_proj = RowParallelLinear(
input_size=config.intermediate_size,
output_size=config.hidden_size,
bias=bias,
quant_config=quant_config,
prefix=f"{prefix}.down_proj",
)
self.tp_size = get_tensor_model_parallel_world_size()
self.intermediate_size = config.intermediate_size
self.gate_multiplier, self.down_multiplier = config.mlp_multipliers
if config.hidden_act != "silu":
raise ValueError(
f"Unsupported activation: {config.hidden_act}. "
"Only silu is supported for now."
)
self.act_fn = SiluAndMul()
def forward(self, x):
x, _ = self.gate_up_proj(x)
x[:, : self.intermediate_size // self.tp_size] *= self.gate_multiplier
x = self.act_fn(x)
x, _ = self.down_proj(x)
x = x * self.down_multiplier
return x
class FalconH1SSMDecoderLayer(nn.Module):
def __init__(
self,
config: FalconH1Config,
model_config: ModelConfig | None = None,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
self.config = config
self.tp_size = get_tensor_model_parallel_world_size()
self.d_ssm = (
int(config.mamba_expand * config.hidden_size)
if config.mamba_d_ssm is None
else config.mamba_d_ssm
)
self.mamba = MambaMixer2(
hidden_size=config.hidden_size,
ssm_state_size=config.mamba_d_state,
conv_kernel_size=config.mamba_d_conv,
intermediate_size=self.d_ssm,
use_conv_bias=config.mamba_conv_bias,
use_bias=config.mamba_proj_bias,
n_groups=config.mamba_n_groups,
num_heads=config.mamba_n_heads,
head_dim=config.mamba_d_head,
rms_norm_eps=config.rms_norm_eps,
activation=config.hidden_act,
model_config=model_config,
cache_config=cache_config,
quant_config=quant_config,
use_rms_norm=config.mamba_rms_norm,
prefix=f"{prefix}.mixer",
)
# n_groups is overridden later by `MambaMixer2`
self.groups_time_state_size = self.mamba.n_groups * config.mamba_d_state
self.zxbcdt_multipliers = config.ssm_multipliers
self._init_mup_vector()
def _init_mup_vector(self):
"""
Non learnable per-block scaling vector composed of element-wise
multipliersapplied to each separate contiguous block of the output
of the linear projection (in_proj) before further processing
(gating, convolution, SSM):
- Z block: [0 : d_ssm] → zxbcdt_multipliers[0]
- X block: [d_ssm : 2 * d_ssm] → zxbcdt_multipliers[1]
- B block: [2 * d_ssm : 2 * d_ssm + G * S] → zxbcdt_multipliers[2]
- C block: [2 * d_ssm + G * S : 2 * d_ssm + 2 * G * S]
→ zxbcdt_multipliers[3]
- dt block: [2 * d_ssm + 2 * G * S : end] → zxbcdt_multipliers[4]
where:
- d_ssm: Dimension of state-space model latent
- G: Number of groups (n_groups)
- S: SSM state size per group
- All indices are divided by tp_size to support tensor parallelism
"""
vector_shape = (
2 * self.d_ssm + 2 * self.groups_time_state_size + self.config.mamba_n_heads
) // self.tp_size
mup_vector = torch.ones(1, vector_shape)
# Z vector 0 -> d_ssm
mup_vector[:, : self.d_ssm // self.tp_size] *= self.zxbcdt_multipliers[0]
# X vector d_ssm -> 2 * d_ssm
mup_vector[
:, (self.d_ssm // self.tp_size) : (2 * self.d_ssm // self.tp_size)
] *= self.zxbcdt_multipliers[1]
# B vector 2 * d_ssm -> 2 * d_ssm + (n_group * d_state)
mup_vector[
:,
(2 * self.d_ssm) // self.tp_size : (
2 * self.d_ssm + self.groups_time_state_size
)
// self.tp_size,
] *= self.zxbcdt_multipliers[2]
# C vector 2 * d_ssm + (n_group * d_state)
# -> 2 * d_ssm + 2 * (n_group * d_state)
mup_vector[
:,
(2 * self.d_ssm + self.groups_time_state_size) // self.tp_size : (
2 * self.d_ssm + 2 * self.groups_time_state_size
)
// self.tp_size,
] *= self.zxbcdt_multipliers[3]
# dt vector 2 * d_ssm + 2 * (n_group * d_state)
# -> 2 * d_ssm + 2 * (n_group * d_state) + n_heads
mup_vector[
:,
(2 * self.d_ssm + 2 * self.groups_time_state_size) // self.tp_size :,
] *= self.zxbcdt_multipliers[4]
self.register_buffer("mup_vector", mup_vector, persistent=False)
def forward(
self,
hidden_states: torch.Tensor,
residual: torch.Tensor | None,
**kwargs,
):
output = self.mamba(
hidden_states,
mup_vector=self.mup_vector,
)
return output, residual
class FalconH1AttentionDecoderLayer(nn.Module):
def __init__(
self,
config: FalconH1Config,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
set_default_rope_theta(config, default_theta=1e11)
max_position_embeddings = getattr(config, "max_position_embeddings", 8192)
self.hidden_size = config.hidden_size
tp_size = get_tensor_model_parallel_world_size()
self.total_num_heads = config.num_attention_heads
assert self.total_num_heads % tp_size == 0
self.num_heads = self.total_num_heads // tp_size
self.total_num_kv_heads = config.num_key_value_heads
if self.total_num_kv_heads >= tp_size:
# Number of KV heads is greater than TP size, so we partition
# the KV heads across multiple tensor parallel GPUs.
assert self.total_num_kv_heads % tp_size == 0
else:
# Number of KV heads is less than TP size, so we replicate
# the KV heads across multiple tensor parallel GPUs.
assert tp_size % self.total_num_kv_heads == 0
self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
self.head_dim = (
config.hidden_size // self.total_num_heads
if getattr(config, "head_dim", None) is None
else config.head_dim
)
self.q_size = self.num_heads * self.head_dim
self.kv_size = self.num_kv_heads * self.head_dim
self.scaling = self.head_dim**-0.5
self.max_position_embeddings = max_position_embeddings
rotary_dim = getattr(config, "attn_rotary_emb", self.head_dim)
config.rope_parameters["partial_rotary_factor"] = rotary_dim / self.head_dim
self.rotary_emb = get_rope(
head_size=self.head_dim,
max_position=max_position_embeddings,
rope_parameters=config.rope_parameters,
is_neox_style=True,
dtype=None, # see impl of get_rope
)
self.qkv_proj = QKVParallelLinear(
config.hidden_size,
self.head_dim,
self.total_num_heads,
self.total_num_kv_heads,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.qkv_proj",
)
self.o_proj = RowParallelLinear(
self.total_num_heads * self.head_dim,
config.hidden_size,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.o_proj",
)
self.attn = Attention(
self.num_heads,
self.head_dim,
self.scaling,
num_kv_heads=self.num_kv_heads,
cache_config=cache_config,
quant_config=quant_config,
prefix=f"{prefix}.attn",
)
self.key_multiplier = config.key_multiplier
def self_attention(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
**kwargs,
) -> torch.Tensor:
qkv, _ = self.qkv_proj(hidden_states)
q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
k = k * self.key_multiplier
q, k = self.rotary_emb(positions, q, k)
attn_output = self.attn(q, k, v)
output, _ = self.o_proj(attn_output)
return output
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
residual: torch.Tensor | None,
**kwargs,
):
hidden_states = self.self_attention(
positions=positions,
hidden_states=hidden_states,
)
return hidden_states, residual
class FalconH1ParallelHybrid(nn.Module):
"""
A hybrid decoder layer for FalconH1 where the input is processed
in parallel through both the self-attention branch and the SSM (Mamba)
branch. Their outputs are then summed to produce the final hidden state.
This layer uses:
- FalconH1AttentionDecoderLayer for the multi-head self-attention branch.
- FalconH1SSMDecoderLayer for the state-space (Mamba) branch.
"""
def __init__(
self,
config: FalconH1Config,
layer_idx: int,
model_config: ModelConfig | None = None,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
# Instantiate the attention branch
self.self_attn = FalconH1AttentionDecoderLayer(
config=config,
cache_config=cache_config,
quant_config=quant_config,
prefix=prefix,
)
# In V1 all attention/ssm layers must have
# different index in prefix
ssm_layer_idx = config.num_hidden_layers + layer_idx
ssm_prefix = prefix.split(".")[0] + f".{ssm_layer_idx}"
# Instantiate the SSM branch
self.mamba = FalconH1SSMDecoderLayer(
config=config,
model_config=model_config,
cache_config=cache_config,
quant_config=quant_config,
prefix=ssm_prefix,
)
self.ssm_out_multiplier = config.ssm_out_multiplier
self.ssm_in_multiplier = config.ssm_in_multiplier
self.attention_in_multiplier = config.attention_in_multiplier
self.attn_out_multiplier = config.attention_out_multiplier
self.feed_forward = FalconH1MLP(
config, quant_config=quant_config, prefix=f"{prefix}.feed_forward"
)
self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.pre_ff_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
**kwargs,
):
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Process input through the attention branch.
# FalconH1AttentionDecoderLayer expects positions, hidden_states,
# kv_cache, attn_metadata, and residual.
attn_hidden, _ = self.self_attn(
positions=positions,
hidden_states=hidden_states * self.attention_in_multiplier,
residual=residual,
**kwargs,
)
# Process input through the SSM branch.
# FalconH1SSMDecoderLayer expects hidden_states, attn_metadata,
# residual, and sequence_idx.
ssm_hidden, _ = self.mamba(
hidden_states=hidden_states * self.ssm_in_multiplier,
residual=residual,
**kwargs,
)
# Sum the outputs from both branches.
# We assume both branches produce outputs of the same
# dimensionality (config.hidden_size).
hidden_states = (attn_hidden * self.attn_out_multiplier) + (
ssm_hidden * self.ssm_out_multiplier
)
hidden_states = hidden_states + residual
# feed-forward
residual = hidden_states
hidden_states = self.pre_ff_layernorm(hidden_states)
hidden_states = self.feed_forward(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@support_torch_compile
class FalconH1Model(nn.Module):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config: FalconH1Config = vllm_config.model_config.hf_config
model_config = vllm_config.model_config
cache_config = vllm_config.cache_config
quant_config = vllm_config.quant_config
self.config = config
self.vocab_size = config.vocab_size
if get_pp_group().is_first_rank:
self.embed_tokens = VocabParallelEmbedding(
self.vocab_size,
config.hidden_size,
)
self.embedding_multiplier = config.embedding_multiplier
else:
self.embed_tokens = PPMissingLayer()
self.embedding_multiplier = 1.0
def get_layer(prefix: str):
layer_idx = int(prefix.rsplit(".", 1)[1])
layer_class = FalconH1ParallelHybrid
return layer_class(
config,
layer_idx,
model_config,
cache_config,
quant_config=quant_config,
prefix=prefix,
)
self.start_layer, self.end_layer, self.layers = make_layers(
config.num_hidden_layers, get_layer, prefix=f"{prefix}.layers"
)
self.make_empty_intermediate_tensors = make_empty_intermediate_tensors_factory(
["hidden_states", "residual"], config.hidden_size
)
if get_pp_group().is_last_rank:
self.final_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
else:
self.final_layernorm = PPMissingLayer()
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.embed_tokens(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor:
if get_pp_group().is_first_rank:
if inputs_embeds is not None:
hidden_states = inputs_embeds * self.embedding_multiplier
else:
hidden_states = (
self.embed_input_ids(input_ids) * self.embedding_multiplier
)
else:
assert intermediate_tensors is not None
hidden_states = intermediate_tensors["hidden_states"]
for layer in islice(self.layers, self.start_layer, self.end_layer):
hidden_states = layer(
positions=positions,
hidden_states=hidden_states,
)
if not get_pp_group().is_last_rank:
return IntermediateTensors(
{
"hidden_states": hidden_states,
}
)
hidden_states = self.final_layernorm(hidden_states)
return hidden_states
class FalconH1ForCausalLM(
nn.Module,
HasInnerState,
SupportsLoRA,
SupportsPP,
IsHybrid,
SupportsMambaPrefixCaching,
):
packed_modules_mapping = {
"qkv_proj": ["q_proj", "k_proj", "v_proj"],
"gate_up_proj": ["gate_proj", "up_proj"],
}
embedding_modules = {
"embed_tokens": "input_embeddings",
"lm_head": "output_embeddings",
}
@classmethod
def get_mamba_state_dtype_from_config(
cls,
vllm_config: "VllmConfig",
) -> tuple[torch.dtype, torch.dtype]:
return MambaStateDtypeCalculator.mamba2_state_dtype(
vllm_config.model_config.dtype,
vllm_config.cache_config.mamba_cache_dtype,
vllm_config.cache_config.mamba_ssm_cache_dtype,
)
@classmethod
def get_mamba_state_shape_from_config(
cls,
vllm_config: "VllmConfig",
) -> tuple[tuple[int, int], tuple[int, int, int]]:
"""Calculate shapes for Mamba's convolutional and state caches.
Args:
vllm_config: vLLM config
Returns:
Tuple containing:
- conv_state_shape: Shape for convolutional state cache
- temporal_state_shape: Shape for state space model cache
"""
parallel_config = vllm_config.parallel_config
hf_config = vllm_config.model_config.hf_config
intermediate_size = (
int(hf_config.mamba_expand * hf_config.hidden_size)
if hf_config.mamba_d_ssm is None
else hf_config.mamba_d_ssm
)
return MambaStateShapeCalculator.mamba2_state_shape(
intermediate_size=intermediate_size,
tp_world_size=parallel_config.tensor_parallel_size,
n_groups=hf_config.mamba_n_groups,
num_heads=hf_config.mamba_n_heads,
head_dim=hf_config.mamba_d_head,
state_size=hf_config.mamba_d_state,
conv_kernel=hf_config.mamba_d_conv,
)
@classmethod
def get_mamba_state_copy_func(cls) -> tuple[MambaStateCopyFunc, MambaStateCopyFunc]:
return MambaStateCopyFuncCalculator.mamba2_state_copy_func()
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
config = vllm_config.model_config.hf_config
self.vllm_config = vllm_config
self.model_config = vllm_config.model_config
scheduler_config = vllm_config.scheduler_config
self.quant_config = vllm_config.quant_config
super().__init__()
self.config = config
self.scheduler_config = scheduler_config
self.model = FalconH1Model(
vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")
)
self.tie_word_embeddings = config.tie_word_embeddings
if get_pp_group().is_last_rank:
self.lm_head = ParallelLMHead(
config.vocab_size,
config.hidden_size,
prefix=maybe_prefix(prefix, "lm_head"),
)
self.lm_head_multiplier = config.lm_head_multiplier
if self.tie_word_embeddings:
self.lm_head = self.lm_head.tie_weights(self.model.embed_tokens)
# Used to track and store by the Mamba cache between steps.
self.logits_processor = LogitsProcessor(
config.vocab_size,
config.vocab_size,
scale=config.lm_head_multiplier,
)
else:
self.lm_head = PPMissingLayer()
self.make_empty_intermediate_tensors = (
self.model.make_empty_intermediate_tensors
)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.model.embed_input_ids(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
**kwargs,
):
hidden_states = self.model(
input_ids,
positions,
intermediate_tensors,
inputs_embeds,
)
return hidden_states
def compute_logits(
self,
hidden_states: torch.Tensor,
) -> torch.Tensor | None:
logits = self.logits_processor(self.lm_head, hidden_states)
return logits
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
params_dict = dict(self.named_parameters())
loaded_params: set[str] = set()
for name, loaded_weight in weights:
if "rotary_emb.inv_freq" in name:
continue
if "A_log" in name:
name = name.replace("A_log", "A")
if "mamba" in name:
name = name.replace("mamba", "mamba.mamba")
if "scale" in name:
# Remapping the name of kv-scale.
name = maybe_remap_kv_scale_name(name, params_dict)
if name is None:
continue
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
# Skip layers on other devices.
if is_pp_missing_parameter(name, self):
continue
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
break
else:
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
if is_pp_missing_parameter(name, self):
continue
if self.tie_word_embeddings and "lm_head" in name:
continue
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
loaded_params.add(name)
if self.tie_word_embeddings:
loaded_params.add("lm_head.weight")
return loaded_params
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/falcon_h1.py",
"license": "Apache License 2.0",
"lines": 613,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:examples/offline_inference/prompt_embed_inference.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Demonstrates how to generate prompt embeddings using
Hugging Face Transformers and use them as input to vLLM
for both single and batch inference.
Model: meta-llama/Llama-3.2-1B-Instruct
Note: This model is gated on Hugging Face Hub.
You must request access to use it:
https://huggingface.co/meta-llama/Llama-3.2-1B-Instruct
Requirements:
- vLLM
- transformers
Run:
python examples/offline_inference/prompt_embed_inference.py
"""
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizer
from vllm import LLM
def init_tokenizer_and_llm(model_name: str):
tokenizer = AutoTokenizer.from_pretrained(model_name)
transformers_model = AutoModelForCausalLM.from_pretrained(model_name)
embedding_layer = transformers_model.get_input_embeddings()
llm = LLM(model=model_name, enable_prompt_embeds=True)
return tokenizer, embedding_layer, llm
def get_prompt_embeds(
chat: list[dict[str, str]],
tokenizer: PreTrainedTokenizer,
embedding_layer: torch.nn.Module,
):
token_ids = tokenizer.apply_chat_template(
chat, add_generation_prompt=True, return_tensors="pt", return_dict=True
).input_ids
prompt_embeds = embedding_layer(token_ids).squeeze(0)
return prompt_embeds
def single_prompt_inference(
llm: LLM, tokenizer: PreTrainedTokenizer, embedding_layer: torch.nn.Module
):
chat = [{"role": "user", "content": "Please tell me about the capital of France."}]
prompt_embeds = get_prompt_embeds(chat, tokenizer, embedding_layer)
outputs = llm.generate(
{
"prompt_embeds": prompt_embeds,
}
)
print("\n[Single Inference Output]")
print("-" * 30)
for o in outputs:
print(o.outputs[0].text)
print("-" * 30)
def batch_prompt_inference(
llm: LLM, tokenizer: PreTrainedTokenizer, embedding_layer: torch.nn.Module
):
chats = [
[{"role": "user", "content": "Please tell me about the capital of France."}],
[{"role": "user", "content": "When is the day longest during the year?"}],
[{"role": "user", "content": "Where is bigger, the moon or the sun?"}],
]
prompt_embeds_list = [
get_prompt_embeds(chat, tokenizer, embedding_layer) for chat in chats
]
outputs = llm.generate([{"prompt_embeds": embeds} for embeds in prompt_embeds_list])
print("\n[Batch Inference Outputs]")
print("-" * 30)
for i, o in enumerate(outputs):
print(f"Q{i + 1}: {chats[i][0]['content']}")
print(f"A{i + 1}: {o.outputs[0].text}\n")
print("-" * 30)
def main():
model_name = "meta-llama/Llama-3.2-1B-Instruct"
tokenizer, embedding_layer, llm = init_tokenizer_and_llm(model_name)
single_prompt_inference(llm, tokenizer, embedding_layer)
batch_prompt_inference(llm, tokenizer, embedding_layer)
if __name__ == "__main__":
main()
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/offline_inference/prompt_embed_inference.py",
"license": "Apache License 2.0",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:examples/online_serving/prompt_embed_inference_with_openai_client.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
vLLM OpenAI-Compatible Client with Prompt Embeddings
This script demonstrates how to:
1. Generate prompt embeddings using Hugging Face Transformers
2. Encode them in base64 format
3. Send them to a vLLM server via the OpenAI-compatible Completions API
Run the vLLM server first:
vllm serve meta-llama/Llama-3.2-1B-Instruct \
--runner generate \
--max-model-len 4096 \
--enable-prompt-embeds
Run the client:
python examples/online_serving/prompt_embed_inference_with_openai_client.py
Model: meta-llama/Llama-3.2-1B-Instruct
Note: This model is gated on Hugging Face Hub.
You must request access to use it:
https://huggingface.co/meta-llama/Llama-3.2-1B-Instruct
Dependencies:
- transformers
- torch
- openai
"""
import transformers
from openai import OpenAI
from vllm.utils.serial_utils import tensor2base64
def main():
client = OpenAI(
api_key="EMPTY",
base_url="http://localhost:8000/v1",
)
model_name = "meta-llama/Llama-3.2-1B-Instruct"
# Transformers
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
transformers_model = transformers.AutoModelForCausalLM.from_pretrained(model_name)
# Refer to the HuggingFace repo for the correct format to use
chat = [{"role": "user", "content": "Please tell me about the capital of France."}]
token_ids = tokenizer.apply_chat_template(
chat, add_generation_prompt=True, return_tensors="pt", return_dict=True
).input_ids
embedding_layer = transformers_model.get_input_embeddings()
prompt_embeds = embedding_layer(token_ids).squeeze(0)
# Prompt embeddings
encoded_embeds = tensor2base64(prompt_embeds)
completion = client.completions.create(
model=model_name,
prompt=None,
max_tokens=5,
temperature=0.0,
# NOTE: The OpenAI client allows passing in extra JSON body via the
# `extra_body` argument.
extra_body={"prompt_embeds": encoded_embeds},
)
print("-" * 30)
print(completion.choices[0].text)
print("-" * 30)
if __name__ == "__main__":
main()
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/online_serving/prompt_embed_inference_with_openai_client.py",
"license": "Apache License 2.0",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/quantization/test_auto_round.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Test model set-up and inference for quantized HF models supported
on the AutoRound.
Validating the configuration and printing results for manual checking.
Run `pytest tests/quantization/test_auto_round.py`.
"""
import pytest
from vllm.platforms import current_platform
MODELS = [
"OPEA/Qwen2.5-0.5B-Instruct-int4-sym-inc", ##auto_round:auto_gptq
"Intel/Qwen2-0.5B-Instruct-int4-sym-AutoRound", ##auto_round:auto_awq
]
@pytest.mark.skipif(
not current_platform.is_cpu()
and not current_platform.is_xpu()
and not current_platform.is_cuda(),
reason="only supports CPU/XPU/CUDA backend.",
)
@pytest.mark.parametrize("model", MODELS)
def test_auto_round(vllm_runner, model):
with vllm_runner(model, enforce_eager=True) as llm:
output = llm.generate_greedy(["The capital of France is"], max_tokens=8)
assert output
print(f"{output[0][1]}")
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/quantization/test_auto_round.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/entrypoints/openai/test_completion_with_prompt_embeds.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import base64
import io
import json
import openai # use the official client for correctness check
import pytest
import pytest_asyncio
import torch
# downloading lora to test lora requests
from openai import BadRequestError
from transformers import AutoConfig
from ...utils import RemoteOpenAIServer
# any model with a chat template should work here
MODEL_NAME = "facebook/opt-125m"
LORA_SERVING_MODEL_NAME = "opt125m-lora"
CONFIG = AutoConfig.from_pretrained(MODEL_NAME)
@pytest.fixture(scope="module", params=["use-lora"])
def default_server_args(
request: pytest.FixtureRequest, opt125_lora_files: str
) -> list[str]:
args = [
# use half precision for speed and memory savings in CI environment
"--dtype",
"bfloat16",
"--max-model-len",
"2048",
"--max-num-seqs",
"128",
"--enforce-eager",
# Prompt Embeds server args
"--enable-prompt-embeds",
]
if request.param == "use-lora":
lora_module_1 = {
"name": LORA_SERVING_MODEL_NAME,
"path": opt125_lora_files,
"base_model_name": MODEL_NAME,
}
args.extend(
[
"--enable-lora",
"--lora-module",
json.dumps(lora_module_1),
"--max-lora-rank",
"64",
"--max-cpu-loras",
"2",
]
)
return args
EXAMPLE_PROMPTS = [
"Hello, my name is",
"What is an LLM?",
]
def _encode_embeds(embeds: torch.Tensor):
buffer = io.BytesIO()
torch.save(embeds, buffer)
return base64.b64encode(buffer.getvalue()).decode("utf-8")
@pytest.fixture(scope="module")
def example_prompt_embeds(hf_runner):
"""Create example embeddings and return them as base64 encoded string."""
with hf_runner(MODEL_NAME) as hf_model:
example_embeddings = hf_model.get_prompt_embeddings(EXAMPLE_PROMPTS)
return [_encode_embeds(item) for item in example_embeddings]
@pytest.fixture(scope="module", params=["", "--disable-frontend-multiprocessing"])
def server_with_prompt_embeds(default_server_args, request):
if request.param:
default_server_args.append(request.param)
with RemoteOpenAIServer(MODEL_NAME, default_server_args) as remote_server:
yield remote_server
@pytest_asyncio.fixture
async def client_with_prompt_embeds(server_with_prompt_embeds):
async with server_with_prompt_embeds.get_async_client() as async_client:
yield async_client
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME, LORA_SERVING_MODEL_NAME])
async def test_completions_with_prompt_embeds(
example_prompt_embeds,
client_with_prompt_embeds: openai.AsyncOpenAI,
model_name: str,
):
encoded_embeds, encoded_embeds2 = example_prompt_embeds
# Test case: Single prompt embeds input
completion = await client_with_prompt_embeds.completions.create(
model=model_name,
prompt=None,
max_tokens=5,
temperature=0.0,
extra_body={"prompt_embeds": encoded_embeds},
)
assert len(completion.choices[0].text) >= 1
assert completion.choices[0].prompt_logprobs is None
# Test case: batch completion with prompt_embeds
completion = await client_with_prompt_embeds.completions.create(
model=model_name,
prompt=None,
max_tokens=5,
temperature=0.0,
extra_body={"prompt_embeds": [encoded_embeds, encoded_embeds2]},
)
assert len(completion.choices) == 2
assert len(completion.choices[0].text) >= 1
assert len(completion.choices[1].text) >= 1
# Test case: streaming with prompt_embeds
single_completion = await client_with_prompt_embeds.completions.create(
model=model_name,
prompt=None,
max_tokens=5,
temperature=0.0,
extra_body={"prompt_embeds": encoded_embeds},
)
single_output = single_completion.choices[0].text
stream = await client_with_prompt_embeds.completions.create(
model=model_name,
prompt=None,
max_tokens=5,
temperature=0.0,
stream=True,
extra_body={"prompt_embeds": encoded_embeds},
)
chunks = []
finish_reason_count = 0
async for chunk in stream:
chunks.append(chunk.choices[0].text)
if chunk.choices[0].finish_reason is not None:
finish_reason_count += 1
assert finish_reason_count == 1
assert chunk.choices[0].finish_reason == "length"
assert chunk.choices[0].text
assert "".join(chunks) == single_output
# Test case: batch streaming with prompt_embeds
stream = await client_with_prompt_embeds.completions.create(
model=model_name,
prompt=None,
max_tokens=5,
temperature=0.0,
stream=True,
extra_body={"prompt_embeds": [encoded_embeds, encoded_embeds2]},
)
chunks_stream_embeds: list[list[str]] = [[], []]
finish_reason_count = 0
async for chunk in stream:
chunks_stream_embeds[chunk.choices[0].index].append(chunk.choices[0].text)
if chunk.choices[0].finish_reason is not None:
finish_reason_count += 1
assert finish_reason_count == 2
assert chunk.choices[0].finish_reason == "length"
assert chunk.choices[0].text
assert len(chunks_stream_embeds[0]) > 0
assert len(chunks_stream_embeds[1]) > 0
# Test case: mixed text and prompt_embeds
completion_mixed = await client_with_prompt_embeds.completions.create(
model=model_name,
prompt="This is a prompt",
max_tokens=5,
temperature=0.0,
extra_body={"prompt_embeds": encoded_embeds},
)
assert len(completion.choices) == 2
completion_text_only = await client_with_prompt_embeds.completions.create(
model=model_name,
prompt="This is a prompt",
max_tokens=5,
temperature=0.0,
)
completion_embeds_only = await client_with_prompt_embeds.completions.create(
model=model_name,
prompt=None,
max_tokens=5,
temperature=0.0,
extra_body={"prompt_embeds": encoded_embeds},
)
# Embeddings responses should be handled first
assert completion_mixed.choices[0].text == completion_embeds_only.choices[0].text
assert completion_mixed.choices[1].text == completion_text_only.choices[0].text
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME, LORA_SERVING_MODEL_NAME])
async def test_completions_errors_with_prompt_embeds(
client_with_prompt_embeds: openai.AsyncOpenAI, model_name: str
):
# Test error case: invalid prompt_embeds
with pytest.raises(BadRequestError):
await client_with_prompt_embeds.completions.create(
prompt=None,
model=model_name,
max_tokens=5,
temperature=0.0,
extra_body={"prompt_embeds": "invalid_base64"},
)
@pytest.mark.asyncio
@pytest.mark.parametrize("logprobs_arg", [1, 0])
@pytest.mark.parametrize("model_name", [MODEL_NAME, LORA_SERVING_MODEL_NAME])
async def test_completions_with_logprobs_and_prompt_embeds(
example_prompt_embeds,
client_with_prompt_embeds: openai.AsyncOpenAI,
logprobs_arg: int,
model_name: str,
):
encoded_embeds, encoded_embeds2 = example_prompt_embeds
# Test case: Logprobs using prompt_embeds
completion = await client_with_prompt_embeds.completions.create(
model=model_name,
prompt=None,
max_tokens=5,
temperature=0.0,
echo=False,
logprobs=logprobs_arg,
extra_body={"prompt_embeds": encoded_embeds},
)
logprobs = completion.choices[0].logprobs
assert logprobs is not None
assert len(logprobs.text_offset) == 5
assert len(logprobs.token_logprobs) == 5
assert len(logprobs.top_logprobs) == 5
for top_logprobs in logprobs.top_logprobs[1:]:
assert max(logprobs_arg, 1) <= len(top_logprobs) <= logprobs_arg + 1
assert len(logprobs.tokens) == 5
# Test case: Log probs with batch completion and prompt_embeds
completion = await client_with_prompt_embeds.completions.create(
model=model_name,
prompt=None,
max_tokens=5,
temperature=0.0,
echo=False,
logprobs=logprobs_arg,
extra_body={"prompt_embeds": [encoded_embeds, encoded_embeds2]},
)
assert len(completion.choices) == 2
for choice in completion.choices:
logprobs = choice.logprobs
assert logprobs is not None
assert len(logprobs.text_offset) == 5
assert len(logprobs.token_logprobs) == 5
assert len(logprobs.top_logprobs) == 5
for top_logprobs in logprobs.top_logprobs[1:]:
assert max(logprobs_arg, 1) <= len(top_logprobs) <= logprobs_arg + 1
assert len(logprobs.tokens) == 5
@pytest.mark.asyncio
async def test_prompt_logprobs_raises_error(
example_prompt_embeds,
client_with_prompt_embeds: openai.AsyncOpenAI,
):
encoded_embeds, _ = example_prompt_embeds
with pytest.raises(BadRequestError, match="not compatible"):
await client_with_prompt_embeds.completions.create(
model=MODEL_NAME,
prompt=None,
max_tokens=5,
temperature=0.0,
extra_body={"prompt_embeds": encoded_embeds, "prompt_logprobs": True},
)
@pytest.mark.asyncio
async def test_empty_prompt_embeds(
client_with_prompt_embeds: openai.AsyncOpenAI,
) -> None:
await client_with_prompt_embeds.completions.create(
model=MODEL_NAME,
prompt="Hello",
max_tokens=5,
temperature=0.0,
extra_body={"prompt_embeds": []},
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/entrypoints/openai/test_completion_with_prompt_embeds.py",
"license": "Apache License 2.0",
"lines": 263,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/v1/metrics/test_ray_metrics.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
import ray
from vllm.config.model import ModelDType
from vllm.sampling_params import SamplingParams
from vllm.v1.engine.async_llm import AsyncEngineArgs, AsyncLLM
from vllm.v1.metrics.ray_wrappers import RayPrometheusMetric, RayPrometheusStatLogger
MODELS = [
"distilbert/distilgpt2",
]
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("dtype", ["half"])
@pytest.mark.parametrize("max_tokens", [16])
def test_engine_log_metrics_ray(
example_prompts,
model: str,
dtype: ModelDType,
max_tokens: int,
) -> None:
"""Simple smoke test, verifying this can be used without exceptions.
Need to start a Ray cluster in order to verify outputs."""
@ray.remote(num_gpus=1)
class EngineTestActor:
async def run(self):
engine_args = AsyncEngineArgs(
model=model, dtype=dtype, disable_log_stats=False, enforce_eager=True
)
engine = AsyncLLM.from_engine_args(
engine_args, stat_loggers=[RayPrometheusStatLogger]
)
for i, prompt in enumerate(example_prompts):
results = engine.generate(
request_id=f"request-id-{i}",
prompt=prompt,
sampling_params=SamplingParams(max_tokens=max_tokens),
)
async for _ in results:
pass
# Create the actor and call the async method
actor = EngineTestActor.remote() # type: ignore[attr-defined]
ray.get(actor.run.remote())
def test_sanitized_opentelemetry_name():
"""Test the metric name sanitization logic for Ray."""
# Only a-z, A-Z, 0-9, _, test valid characters are preserved
valid_name = "valid_metric_123_abcDEF"
assert (
RayPrometheusMetric._get_sanitized_opentelemetry_name(valid_name) == valid_name
)
# Test dash, dot, are replaced
name_with_dash_dot = "metric-name.test"
expected = "metric_name_test"
assert (
RayPrometheusMetric._get_sanitized_opentelemetry_name(name_with_dash_dot)
== expected
)
# Test colon is replaced with underscore
name_with_colon = "metric:name"
expected = "metric_name"
assert (
RayPrometheusMetric._get_sanitized_opentelemetry_name(name_with_colon)
== expected
)
# Test multiple invalid characters are replaced
name_with_invalid = "metric:name@with#special%chars"
expected = "metric_name_with_special_chars"
assert (
RayPrometheusMetric._get_sanitized_opentelemetry_name(name_with_invalid)
== expected
)
# Test mixed valid and invalid characters
complex_name = "vllm:engine_stats/time.latency_ms-99p"
expected = "vllm_engine_stats_time_latency_ms_99p"
assert (
RayPrometheusMetric._get_sanitized_opentelemetry_name(complex_name) == expected
)
# Test empty string
assert RayPrometheusMetric._get_sanitized_opentelemetry_name("") == ""
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/metrics/test_ray_metrics.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/v1/metrics/ray_wrappers.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import time
from vllm.distributed.kv_transfer.kv_connector.v1.metrics import KVConnectorPrometheus
from vllm.v1.metrics.loggers import PrometheusStatLogger
from vllm.v1.metrics.perf import PerfMetricsProm
from vllm.v1.spec_decode.metrics import SpecDecodingProm
try:
from ray import serve as ray_serve
from ray.util import metrics as ray_metrics
from ray.util.metrics import Metric
except ImportError:
ray_metrics = None
ray_serve = None
import regex as re
def _get_replica_id() -> str | None:
"""Get the current Ray Serve replica ID, or None if not in a Serve context."""
if ray_serve is None:
return None
try:
return ray_serve.get_replica_context().replica_id.unique_id
except ray_serve.exceptions.RayServeException:
return None
class RayPrometheusMetric:
def __init__(self):
if ray_metrics is None:
raise ImportError("RayPrometheusMetric requires Ray to be installed.")
self.metric: Metric = None
@staticmethod
def _get_tag_keys(labelnames: list[str] | None) -> tuple[str, ...]:
labels = list(labelnames) if labelnames else []
labels.append("ReplicaId")
return tuple(labels)
def labels(self, *labels, **labelskwargs):
if labels:
# -1 because ReplicaId was added automatically
expected = len(self.metric._tag_keys) - 1
if len(labels) != expected:
raise ValueError(
"Number of labels must match the number of tag keys. "
f"Expected {expected}, got {len(labels)}"
)
labelskwargs.update(zip(self.metric._tag_keys, labels))
labelskwargs["ReplicaId"] = _get_replica_id() or ""
if labelskwargs:
for k, v in labelskwargs.items():
if not isinstance(v, str):
labelskwargs[k] = str(v)
self.metric.set_default_tags(labelskwargs)
return self
@staticmethod
def _get_sanitized_opentelemetry_name(name: str) -> str:
"""
For compatibility with Ray + OpenTelemetry, the metric name must be
sanitized. In particular, this replaces disallowed character (e.g., ':')
with '_' in the metric name.
Allowed characters: a-z, A-Z, 0-9, _
# ruff: noqa: E501
Ref: https://github.com/open-telemetry/opentelemetry-cpp/blob/main/sdk/src/metrics/instrument_metadata_validator.cc#L22-L23
Ref: https://github.com/ray-project/ray/blob/master/src/ray/stats/metric.cc#L107
"""
return re.sub(r"[^a-zA-Z0-9_]", "_", name)
class RayGaugeWrapper(RayPrometheusMetric):
"""Wraps around ray.util.metrics.Gauge to provide same API as
prometheus_client.Gauge"""
def __init__(
self,
name: str,
documentation: str | None = "",
labelnames: list[str] | None = None,
multiprocess_mode: str | None = "",
):
# All Ray metrics are keyed by WorkerId, so multiprocess modes like
# "mostrecent", "all", "sum" do not apply. This logic can be manually
# implemented at the observability layer (Prometheus/Grafana).
del multiprocess_mode
tag_keys = self._get_tag_keys(labelnames)
name = self._get_sanitized_opentelemetry_name(name)
self.metric = ray_metrics.Gauge(
name=name,
description=documentation,
tag_keys=tag_keys,
)
def set(self, value: int | float):
return self.metric.set(value)
def set_to_current_time(self):
# ray metrics doesn't have set_to_current time, https://docs.ray.io/en/latest/_modules/ray/util/metrics.html
return self.metric.set(time.time())
class RayCounterWrapper(RayPrometheusMetric):
"""Wraps around ray.util.metrics.Counter to provide same API as
prometheus_client.Counter"""
def __init__(
self,
name: str,
documentation: str | None = "",
labelnames: list[str] | None = None,
):
tag_keys = self._get_tag_keys(labelnames)
name = self._get_sanitized_opentelemetry_name(name)
self.metric = ray_metrics.Counter(
name=name,
description=documentation,
tag_keys=tag_keys,
)
def inc(self, value: int | float = 1.0):
if value == 0:
return
return self.metric.inc(value)
class RayHistogramWrapper(RayPrometheusMetric):
"""Wraps around ray.util.metrics.Histogram to provide same API as
prometheus_client.Histogram"""
def __init__(
self,
name: str,
documentation: str | None = "",
labelnames: list[str] | None = None,
buckets: list[float] | None = None,
):
tag_keys = self._get_tag_keys(labelnames)
name = self._get_sanitized_opentelemetry_name(name)
boundaries = buckets if buckets else []
self.metric = ray_metrics.Histogram(
name=name,
description=documentation,
tag_keys=tag_keys,
boundaries=boundaries,
)
def observe(self, value: int | float):
return self.metric.observe(value)
class RaySpecDecodingProm(SpecDecodingProm):
"""
RaySpecDecodingProm is used by RayMetrics to log to Ray metrics.
Provides the same metrics as SpecDecodingProm but uses Ray's
util.metrics library.
"""
_counter_cls = RayCounterWrapper
class RayKVConnectorPrometheus(KVConnectorPrometheus):
"""
RayKVConnectorPrometheus is used by RayMetrics to log Ray
metrics. Provides the same metrics as KV connectors but
uses Ray's util.metrics library.
"""
_gauge_cls = RayGaugeWrapper
_counter_cls = RayCounterWrapper
_histogram_cls = RayHistogramWrapper
class RayPerfMetricsProm(PerfMetricsProm):
"""
RayPerfMetricsProm is used by RayMetrics to log Ray
metrics. Provides the same MFU metrics as PerfMetricsProm
uses Ray's util.metrics library.
"""
_counter_cls = RayCounterWrapper
class RayPrometheusStatLogger(PrometheusStatLogger):
"""RayPrometheusStatLogger uses Ray metrics instead."""
_gauge_cls = RayGaugeWrapper
_counter_cls = RayCounterWrapper
_histogram_cls = RayHistogramWrapper
_spec_decoding_cls = RaySpecDecodingProm
_kv_connector_cls = RayKVConnectorPrometheus
_perf_metrics_cls = RayPerfMetricsProm
@staticmethod
def _unregister_vllm_metrics():
# No-op on purpose
pass
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/metrics/ray_wrappers.py",
"license": "Apache License 2.0",
"lines": 164,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:examples/online_serving/utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from openai import APIConnectionError, OpenAI
from openai.pagination import SyncPage
from openai.types.model import Model
def get_first_model(client: OpenAI) -> str:
"""
Get the first model from the vLLM server.
"""
try:
models: SyncPage[Model] = client.models.list()
except APIConnectionError as e:
raise RuntimeError(
"Failed to get the list of models from the vLLM server at "
f"{client.base_url} with API key {client.api_key}. Check\n"
"1. the server is running\n"
"2. the server URL is correct\n"
"3. the API key is correct"
) from e
if len(models.data) == 0:
raise RuntimeError(f"No models found on the vLLM server at {client.base_url}")
return models.data[0].id
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/online_serving/utils.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/test_vllm_port.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import os
from unittest.mock import patch
import pytest
from vllm.envs import get_vllm_port
def test_get_vllm_port_not_set():
"""Test when VLLM_PORT is not set."""
with patch.dict(os.environ, {}, clear=True):
assert get_vllm_port() is None
def test_get_vllm_port_valid():
"""Test when VLLM_PORT is set to a valid integer."""
with patch.dict(os.environ, {"VLLM_PORT": "5678"}, clear=True):
assert get_vllm_port() == 5678
def test_get_vllm_port_invalid():
"""Test when VLLM_PORT is set to a non-integer value."""
with (
patch.dict(os.environ, {"VLLM_PORT": "abc"}, clear=True),
pytest.raises(ValueError, match="must be a valid integer"),
):
get_vllm_port()
def test_get_vllm_port_uri():
"""Test when VLLM_PORT is set to a URI."""
with (
patch.dict(os.environ, {"VLLM_PORT": "tcp://localhost:5678"}, clear=True),
pytest.raises(ValueError, match="appears to be a URI"),
):
get_vllm_port()
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/test_vllm_port.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/v1/spec_decode/medusa.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import torch
import torch.nn as nn
from vllm.config import VllmConfig
from vllm.forward_context import set_forward_context
from vllm.logger import init_logger
from vllm.model_executor.model_loader import get_model
from vllm.model_executor.models.interfaces import is_mixture_of_experts
from vllm.v1.sample.metadata import SamplingMetadata
# Initialize logger
logger = init_logger(__name__)
class MedusaProposer:
"""
Medusa proposer class for generating token sequences
"""
def __init__(
self,
vllm_config: VllmConfig,
device: torch.device,
):
# Save config parameters
self.vllm_config = vllm_config
assert vllm_config.speculative_config is not None, (
"Speculative config must be set"
)
self.spec_config = vllm_config.speculative_config
self.device = device
self.max_num_tokens = vllm_config.scheduler_config.max_num_batched_tokens
self.hidden_size = self.spec_config.draft_model_config.get_hidden_size()
self.dtype = vllm_config.model_config.dtype
def propose(
self,
target_hidden_states: torch.Tensor,
sampling_metadata: SamplingMetadata,
slot_mappings: dict[str, torch.Tensor]
| list[dict[str, torch.Tensor]]
| None = None, # unused
) -> torch.Tensor:
# Generate blocks and compute logits
blocks = self.model(target_hidden_states)
logits = self.model.compute_logits(blocks)
# Compute argmax for each Medusa head and stack into a single tensor
# Shape: [batch_size, num_heads]
draft_tokens = torch.stack([logit.argmax(dim=-1) for logit in logits], dim=1)
return draft_tokens
def load_model(self, target_model: nn.Module) -> None:
from vllm.compilation.backends import set_model_tag
with set_model_tag("medusa_head"):
self.model = get_model(
vllm_config=self.vllm_config,
model_config=self.spec_config.draft_model_config,
)
assert not (
is_mixture_of_experts(self.model)
and self.vllm_config.parallel_config.enable_eplb
), "EPLB for Medusa is not supported"
@torch.inference_mode()
def dummy_run(self, num_tokens: int) -> None:
hidden_states = torch.zeros(
(self.max_num_tokens, self.hidden_size),
dtype=self.dtype,
device=self.device,
)
with set_forward_context(None, self.vllm_config, num_tokens=num_tokens):
self.model(hidden_states)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/spec_decode/medusa.py",
"license": "Apache License 2.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/multimodal/test_video.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from pathlib import Path
import numpy as np
import numpy.typing as npt
import pytest
from vllm.multimodal.video import VIDEO_LOADER_REGISTRY, VideoLoader
pytestmark = pytest.mark.cpu_test
ASSETS_DIR = Path(__file__).parent / "assets"
assert ASSETS_DIR.exists()
NUM_FRAMES = 10
FAKE_OUTPUT_1 = np.random.rand(NUM_FRAMES, 1280, 720, 3)
FAKE_OUTPUT_2 = np.random.rand(NUM_FRAMES, 1280, 720, 3)
@VIDEO_LOADER_REGISTRY.register("test_video_loader_1")
class TestVideoLoader1(VideoLoader):
@classmethod
def load_bytes(cls, data: bytes, num_frames: int = -1) -> npt.NDArray:
return FAKE_OUTPUT_1
@VIDEO_LOADER_REGISTRY.register("test_video_loader_2")
class TestVideoLoader2(VideoLoader):
@classmethod
def load_bytes(cls, data: bytes, num_frames: int = -1) -> npt.NDArray:
return FAKE_OUTPUT_2
def test_video_loader_registry():
custom_loader_1 = VIDEO_LOADER_REGISTRY.load("test_video_loader_1")
output_1 = custom_loader_1.load_bytes(b"test")
np.testing.assert_array_equal(output_1, FAKE_OUTPUT_1)
custom_loader_2 = VIDEO_LOADER_REGISTRY.load("test_video_loader_2")
output_2 = custom_loader_2.load_bytes(b"test")
np.testing.assert_array_equal(output_2, FAKE_OUTPUT_2)
def test_video_loader_type_doesnt_exist():
with pytest.raises(AssertionError):
VIDEO_LOADER_REGISTRY.load("non_existing_video_loader")
def test_video_backend_handles_broken_frames(monkeypatch: pytest.MonkeyPatch):
"""
Regression test for handling videos with broken frames.
This test uses a pre-corrupted video file (assets/corrupted.mp4) that
contains broken frames to verify the video loader handles
them gracefully without crashing and returns accurate metadata.
"""
with monkeypatch.context() as m:
m.setenv("VLLM_VIDEO_LOADER_BACKEND", "opencv")
# Load the pre-corrupted video file that contains broken frames
corrupted_video_path = ASSETS_DIR / "corrupted.mp4"
with open(corrupted_video_path, "rb") as f:
video_data = f.read()
loader = VIDEO_LOADER_REGISTRY.load("opencv")
frames, metadata = loader.load_bytes(video_data, num_frames=-1)
# Verify metadata consistency:
# frames_indices must match actual loaded frames
assert frames.shape[0] == len(metadata["frames_indices"]), (
f"Frames array size must equal frames_indices length. "
f"Got {frames.shape[0]} frames but "
f"{len(metadata['frames_indices'])} indices"
)
# Verify that broken frames were skipped:
# loaded frames should be less than total
assert frames.shape[0] < metadata["total_num_frames"], (
f"Should load fewer frames than total due to broken frames. "
f"Expected fewer than {metadata['total_num_frames']} frames, "
f"but loaded {frames.shape[0]} frames"
)
# ============================================================================
# Frame Recovery Tests
# ============================================================================
def test_video_recovery_simulated_failures(monkeypatch: pytest.MonkeyPatch):
"""
Test that frame recovery correctly uses the next valid frame when
target frames fail to load.
Uses corrupted.mp4 and mocks VideoCapture.grab() to fail on specific
frame indices (in addition to the real corruption at frame 17), then
verifies recovery produces more frames.
"""
import cv2
with monkeypatch.context() as m:
m.setenv("VLLM_VIDEO_LOADER_BACKEND", "opencv")
# Load corrupted.mp4 (26 frames, frame 17 is genuinely corrupted)
video_path = ASSETS_DIR / "corrupted.mp4"
with open(video_path, "rb") as f:
video_data = f.read()
# Simulate additional failures on frames 3 and 10
# (in addition to the real corruption at frame 17)
fail_on_frames = {3, 10}
# Store original VideoCapture class
original_video_capture = cv2.VideoCapture
class MockVideoCapture:
"""Wrapper that simulates grab() failures on specific frames."""
def __init__(self, *args, **kwargs):
self._cap = original_video_capture(*args, **kwargs)
self._current_frame = -1
def grab(self):
self._current_frame += 1
if self._current_frame in fail_on_frames:
return False # Simulate failure
return self._cap.grab()
def retrieve(self):
return self._cap.retrieve()
def get(self, prop):
return self._cap.get(prop)
def isOpened(self):
return self._cap.isOpened()
def release(self):
return self._cap.release()
# Patch cv2.VideoCapture
m.setattr(cv2, "VideoCapture", MockVideoCapture)
loader = VIDEO_LOADER_REGISTRY.load("opencv")
# Use num_frames=8 which samples: [0, 3, 7, 10, 14, 17, 21, 25]
# Frame 3: mocked failure, recovery window [3, 7) -> use frame 4
# Frame 10: mocked failure, recovery window [10, 14) -> use frame 11
# Frame 17: real corruption, recovery window [17, 21) -> use frame 18
# Test WITHOUT recovery - should have fewer frames due to failures
frames_no_recovery, meta_no = loader.load_bytes(
video_data, num_frames=8, frame_recovery=False
)
# Test WITH recovery - should recover using next valid frames
frames_with_recovery, meta_yes = loader.load_bytes(
video_data, num_frames=8, frame_recovery=True
)
# With recovery should have MORE frames than without
# Without: 5 frames (3, 10, 17 all fail)
# With: 8 frames (all recovered)
assert frames_with_recovery.shape[0] > frames_no_recovery.shape[0], (
f"Recovery should produce more frames. "
f"Without: {frames_no_recovery.shape[0]}, "
f"With: {frames_with_recovery.shape[0]}"
)
# Verify metadata consistency
assert frames_no_recovery.shape[0] == len(meta_no["frames_indices"])
assert frames_with_recovery.shape[0] == len(meta_yes["frames_indices"])
# Verify temporal order is preserved
assert meta_yes["frames_indices"] == sorted(meta_yes["frames_indices"])
def test_video_recovery_with_corrupted_file(monkeypatch: pytest.MonkeyPatch):
"""
Test frame recovery with an actual corrupted video file using sparse sampling.
This test uses corrupted.mp4 which has genuine H.264 codec errors on
frame 17. With num_frames=8, the target frames are [0, 3, 7, 10, 14, 17, 21, 25].
Frame 17 is corrupted but frames 18-20 are readable, so recovery can use
frame 18 to fill in for the failed frame 17.
This test verifies:
1. Without recovery: frame 17 is skipped (7 frames loaded)
2. With recovery: frame 18 fills in for frame 17 (8 frames loaded)
3. Recovery produces MORE frames than without recovery
4. Metadata is consistent with loaded frames
"""
with monkeypatch.context() as m:
m.setenv("VLLM_VIDEO_LOADER_BACKEND", "opencv")
corrupted_video_path = ASSETS_DIR / "corrupted.mp4"
with open(corrupted_video_path, "rb") as f:
video_data = f.read()
loader = VIDEO_LOADER_REGISTRY.load("opencv")
# Use num_frames=8 which makes frame 17 a target with recovery window [17, 21)
# Target frames: [0, 3, 7, 10, 14, 17, 21, 25]
# Frame 17 is corrupted, but frames 18-20 are readable for recovery
# Test without recovery - frame 17 will be skipped
frames_no_recovery, meta_no_recovery = loader.load_bytes(
video_data, num_frames=8, frame_recovery=False
)
# Test with recovery - frame 18 should fill in for frame 17
frames_with_recovery, meta_with_recovery = loader.load_bytes(
video_data, num_frames=8, frame_recovery=True
)
# Verify metadata consistency for both modes
assert frames_no_recovery.shape[0] == len(meta_no_recovery["frames_indices"]), (
"Frame count must match indices without recovery"
)
assert frames_with_recovery.shape[0] == len(
meta_with_recovery["frames_indices"]
), "Frame count must match indices with recovery"
# KEY ASSERTION: Recovery should produce MORE frames than without recovery
# Without recovery: 7 frames (frame 17 skipped)
# With recovery: 8 frames (frame 18 used for frame 17)
assert frames_with_recovery.shape[0] > frames_no_recovery.shape[0], (
f"Recovery should produce more frames with sparse sampling. "
f"Got {frames_with_recovery.shape[0]} with recovery vs "
f"{frames_no_recovery.shape[0]} without"
)
# Verify we got all 8 requested frames with recovery
assert frames_with_recovery.shape[0] == 8, (
f"With recovery, should load all 8 requested frames. "
f"Got {frames_with_recovery.shape[0]}"
)
# Verify the video metadata is correct
expected_total_frames = 26
assert meta_with_recovery["total_num_frames"] == expected_total_frames, (
f"Expected {expected_total_frames} total frames in metadata"
)
def test_video_recovery_dynamic_backend(monkeypatch: pytest.MonkeyPatch):
"""
Test that frame_recovery works with the dynamic video backend.
The dynamic backend samples frames based on fps/duration rather than
loading all frames. This test verifies recovery works in that context.
"""
with monkeypatch.context() as m:
m.setenv("VLLM_VIDEO_LOADER_BACKEND", "opencv_dynamic")
corrupted_video_path = ASSETS_DIR / "corrupted.mp4"
with open(corrupted_video_path, "rb") as f:
video_data = f.read()
loader = VIDEO_LOADER_REGISTRY.load("opencv_dynamic")
# Test without recovery
frames_no_recovery, meta_no = loader.load_bytes(
video_data, fps=2, max_duration=10, frame_recovery=False
)
# Test with frame_recovery enabled
frames_with_recovery, meta_with = loader.load_bytes(
video_data, fps=2, max_duration=10, frame_recovery=True
)
# Verify basic properties
assert frames_no_recovery.shape[0] > 0, (
"Should load some frames without recovery"
)
assert frames_with_recovery.shape[0] > 0, (
"Should load some frames with recovery"
)
assert "do_sample_frames" in meta_with
assert meta_with["do_sample_frames"] is False # Dynamic backend always False
assert frames_with_recovery.shape[0] == len(meta_with["frames_indices"])
# Key assertion: recovery should help when corrupted frames are sampled
# We expect recovery to produce >= frames than without recovery
assert frames_with_recovery.shape[0] >= frames_no_recovery.shape[0], (
f"Recovery should produce at least as many frames. "
f"Got {frames_with_recovery.shape[0]} with recovery vs "
f"{frames_no_recovery.shape[0]} without"
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/multimodal/test_video.py",
"license": "Apache License 2.0",
"lines": 223,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/v1/kv_connector/unit/test_multi_connector.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import filecmp
import shutil
import tempfile
from pathlib import Path
from typing import Any
import pytest
from vllm import LLM, SamplingParams
from vllm.config import KVTransferConfig
from vllm.distributed.kv_transfer.kv_connector.factory import KVConnectorFactory
from vllm.distributed.kv_transfer.kv_connector.v1.base import KVConnectorBase_V1
from vllm.distributed.kv_transfer.kv_connector.v1.metrics import KVConnectorStats
from vllm.distributed.kv_transfer.kv_connector.v1.multi_connector import (
MultiConnector,
MultiKVConnectorStats,
)
from vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector import (
NixlKVConnectorStats,
)
MODEL_NAME = "meta-llama/Llama-3.2-1B-Instruct"
PROMPT_CONTEXT = "Hi " * 100
PROMPTS = [
PROMPT_CONTEXT + "Hello, my name is",
PROMPT_CONTEXT + "The capital of France is",
]
SAMPLING_PARAMS = SamplingParams(temperature=0, max_tokens=20)
# Test connector with custom stats for testing MultiConnector
class MockConnectorStats(KVConnectorStats):
"""Mock stats class for testing."""
pass
class MockConnector(KVConnectorBase_V1):
"""Mock connector that implements build_kv_connector_stats for testing."""
@classmethod
def build_kv_connector_stats(
cls, data: dict[str, Any] | None = None
) -> KVConnectorStats | None:
return MockConnectorStats(data=data) if data is not None else None
def start_load_kv(self, forward_context, **kwargs):
pass
def wait_for_layer_load(self, layer_name):
pass
def save_kv_layer(self, layer_name, kv_layer, attn_metadata, **kwargs):
pass
def wait_for_save(self):
pass
def build_connector_meta(self, scheduler_output):
return None
def get_num_new_matched_tokens(self, request, num_computed_tokens):
return (0, False)
def update_state_after_alloc(self, request, blocks, num_tokens) -> None:
pass
class MockCrossLayerConnector(MockConnector):
@property
def prefer_cross_layer_blocks(self) -> bool:
return True
# Register the mock connector
KVConnectorFactory.register_connector("MockConnector", __name__, MockConnector.__name__)
# Helper function to compare directories recursively
def _compare_directories(dir1: Path, dir2: Path) -> bool:
"""Compares two directories recursively for identical content."""
dcmp = filecmp.dircmp(dir1, dir2)
if dcmp.left_only or dcmp.right_only or dcmp.diff_files:
print(f"Differences found between {dir1} and {dir2}:")
print(f" Left only: {dcmp.left_only}")
print(f" Right only: {dcmp.right_only}")
print(f" Different files: {dcmp.diff_files}")
return False
for sub_dir in dcmp.common_dirs:
if not _compare_directories(dir1 / sub_dir, dir2 / sub_dir):
return False
return True
def test_multi_example_connector_consistency():
"""
Tests that MultiConnector with two ExampleConnectors saves
identical KV cache data to separate storage locations.
"""
storage_1_path = Path("storage_1/")
storage_2_path = Path("storage_2/")
shutil.rmtree(storage_1_path, ignore_errors=True)
shutil.rmtree(storage_2_path, ignore_errors=True)
storage_1_path.mkdir()
storage_2_path.mkdir()
# Configure MultiConnector with two ExampleConnectors
kv_transfer_config = KVTransferConfig(
kv_connector="MultiConnector",
kv_role="kv_both",
kv_connector_extra_config={
"connectors": [
{
"kv_connector": "TestExampleConnector",
"kv_role": "kv_both",
"kv_connector_extra_config": {
"shared_storage_path": str(storage_1_path),
"name": "storage1",
},
"kv_connector_module_path": "tests.v1.kv_connector.unit.utils",
},
{
"kv_connector": "TestExampleConnector",
"kv_role": "kv_both",
"kv_connector_extra_config": {
"shared_storage_path": str(storage_2_path),
"name": "storage2",
},
"kv_connector_module_path": "tests.v1.kv_connector.unit.utils",
},
]
},
)
llm = LLM(
model=MODEL_NAME,
enforce_eager=True,
gpu_memory_utilization=0.5,
kv_transfer_config=kv_transfer_config,
)
# Run generation - this should trigger saving KV cache
# Use a single prompt to avoid race conditions depending on the order of scheduling
_ = llm.generate(PROMPTS[0], SAMPLING_PARAMS)
# --- Verification ---
# Check that both storage directories were populated
local_subdirs = list(storage_1_path.iterdir())
external_subdirs = list(storage_2_path.iterdir())
assert len(local_subdirs) > 0, (
f"Local storage path {storage_1_path} is empty after generation."
)
assert len(external_subdirs) > 0, (
f"External storage path {storage_2_path} is empty after generation."
)
assert len(local_subdirs) == len(external_subdirs), (
f"Mismatch in number of cache entries: "
f"Local={len(local_subdirs)}, External={len(external_subdirs)}"
)
# The subdirectories should correspond to the prompt hashes
# Since prompts are the same, the hash directories should be the same name
local_subdir_names = sorted([d.name for d in local_subdirs])
external_subdir_names = sorted([d.name for d in external_subdirs])
assert local_subdir_names == external_subdir_names, (
"Cache directory names do not match between local and external storage"
)
# Compare the contents of each corresponding cache directory
for subdir_name in local_subdir_names:
print(f"Comparing contents of cache directory: {subdir_name}")
assert _compare_directories(
storage_1_path / subdir_name, storage_2_path / subdir_name
), (
f"Contents differ for cache directory '{subdir_name}' between "
f"{storage_1_path} and {storage_2_path}"
)
events = get_connector_events()
# First event is set_xfer_handshake_metadata from initialization, then
# get_num_new_matched_tokens and update_state_after_alloc from generate().
assert events["storage1-SCHEDULER"][:4] == [
"set_xfer_handshake_metadata",
"get_num_new_matched_tokens 0",
"update_state_after_alloc num_blocks=[0] 0",
"build_connector_meta",
]
# First three events are from initialization (register_kv_caches,
# set_host_xfer_buffer_ops, get_handshake_metadata), then generate() events.
assert events["storage1-WORKER"][:7] == [
"register_kv_caches",
"set_host_xfer_buffer_ops",
"get_handshake_metadata",
"bind_connector_metadata",
"start_load_kv",
"wait_for_layer_load",
"save_kv_layer",
]
assert events["storage2-SCHEDULER"][:4] == [
"set_xfer_handshake_metadata",
"get_num_new_matched_tokens 0",
"update_state_after_alloc num_blocks=[0] 0",
"build_connector_meta",
]
assert events["storage2-WORKER"][:7] == [
"register_kv_caches",
"set_host_xfer_buffer_ops",
"get_handshake_metadata",
"bind_connector_metadata",
"start_load_kv",
"wait_for_layer_load",
"save_kv_layer",
]
# Reset prefix cache or else we'll just get the tokens back from there.
llm.reset_prefix_cache()
# Run generation again - this should trigger loading from the first
# connector.
_ = llm.generate(PROMPTS[1], SAMPLING_PARAMS)
events = get_connector_events()
# get_num_new_matched_tokens will return new tokens from the first
# connector so update_state_after_alloc will be with allocated blocks
# on that one but with zero blocks for others (first nonzero match is
# chosen).
assert events["storage1-SCHEDULER"][:3] == [
"get_num_new_matched_tokens 0",
"update_state_after_alloc num_blocks=[7] 96",
"build_connector_meta",
]
assert events["storage2-SCHEDULER"][:3] == [
"get_num_new_matched_tokens 0",
"update_state_after_alloc num_blocks=[0] 0",
"build_connector_meta",
]
# Delete storage1 connector state
shutil.rmtree(storage_1_path)
# Reset prefix cache or else we'll just get the tokens back from there.
llm.reset_prefix_cache()
# Run generation again - this should trigger loading from the first
# connector.
_ = llm.generate(PROMPTS[0], SAMPLING_PARAMS)
events = get_connector_events()
# get_num_new_matched_tokens will be called for both connectors but will
# return 0 from the first connector, but the second connector should have
# a hit, so update_state_after_alloc will only be called with allocated
# blocks for the second connector.
assert events["storage1-SCHEDULER"][:3] == [
"get_num_new_matched_tokens 0",
"update_state_after_alloc num_blocks=[0] 0",
"build_connector_meta",
]
assert events["storage2-SCHEDULER"][:3] == [
"get_num_new_matched_tokens 0",
"update_state_after_alloc num_blocks=[7] 96",
"build_connector_meta",
]
# Clean up
shutil.rmtree(storage_1_path)
shutil.rmtree(storage_2_path)
def get_connector_events() -> dict[str, list[str]]:
# Read in connector events and reset the files.
import glob
event_files = glob.glob(tempfile.gettempdir() + "/connector_*_events.log")
connector_events = {}
for fname in event_files:
name = fname.split("connector_")[1].split("_events.log")[0]
try:
with open(fname, "r+") as f:
connector_events[name] = [line.strip() for line in f if line.strip()]
f.truncate(0)
except Exception as e:
print(f"[ERROR] Could not read connector events for {name}: {e}")
return connector_events
def test_engine_id_conflict():
configs = [KVTransferConfig() for _ in range(2)]
ids = [config.engine_id for config in configs]
assert ids[0] != ids[1], (
f"Engine IDs should be different for different configs. Got {ids}"
)
def test_multi_connector_handle_preemptions_integration():
"""
Integration test: verify MultiConnector delegates handle_preemptions
to all sub-connectors.
Uses TestExampleConnector which logs all method calls to temp files.
This test directly calls handle_preemptions on a MultiConnector with
TestExampleConnector sub-connectors and verifies the calls are logged.
"""
from tests.v1.kv_connector.unit.utils import (
create_scheduler,
create_vllm_config,
)
storage_path = Path(tempfile.mkdtemp())
try:
# Configure MultiConnector with two TestExampleConnectors
kv_transfer_config = KVTransferConfig(
kv_connector="MultiConnector",
kv_role="kv_both",
kv_connector_extra_config={
"connectors": [
{
"kv_connector": "TestExampleConnector",
"kv_role": "kv_both",
"kv_connector_extra_config": {
"shared_storage_path": str(storage_path / "s1"),
"name": "preempt1",
},
"kv_connector_module_path": "tests.v1.kv_connector.unit.utils",
},
{
"kv_connector": "TestExampleConnector",
"kv_role": "kv_both",
"kv_connector_extra_config": {
"shared_storage_path": str(storage_path / "s2"),
"name": "preempt2",
},
"kv_connector_module_path": "tests.v1.kv_connector.unit.utils",
},
]
},
)
vllm_config = create_vllm_config(
block_size=16,
max_num_batched_tokens=100,
kv_connector_extra_config=kv_transfer_config.kv_connector_extra_config,
)
vllm_config.kv_transfer_config = kv_transfer_config
# Create scheduler - this initializes the MultiConnector with SCHEDULER role
scheduler = create_scheduler(vllm_config, num_blocks=10)
# Clear any events from initialization
get_connector_events()
# Directly call handle_preemptions on the scheduler's connector
# Note: handle_preemptions is normally a worker-side method, but we're
# testing the delegation behavior of MultiConnector here.
# The connector attribute contains the KV connector.
assert scheduler.connector is not None, "Scheduler should have a connector"
preempted_req_ids = {"req-1", "req-2", "req-3"}
scheduler.connector.handle_preemptions(preempted_req_ids)
# Verify both connectors received the handle_preemptions call
events = get_connector_events()
# Both SCHEDULER-role connectors should have logged handle_preemptions
assert "handle_preemptions" in events.get("preempt1-SCHEDULER", []), (
f"preempt1-SCHEDULER should have handle_preemptions call. "
f"Got events: {events}"
)
assert "handle_preemptions" in events.get("preempt2-SCHEDULER", []), (
f"preempt2-SCHEDULER should have handle_preemptions call. "
f"Got events: {events}"
)
finally:
# Cleanup
shutil.rmtree(storage_path, ignore_errors=True)
class TestMultiConnectorStats:
"""Tests for MultiConnector stats reconstruction and operations."""
def test_build_kv_connector_stats_with_none(self):
"""Test that build_kv_connector_stats returns empty stats when given None."""
stats = MultiConnector.build_kv_connector_stats(data=None)
assert stats is not None
assert isinstance(stats, MultiKVConnectorStats)
assert len(stats.data) == 0
assert stats.is_empty()
def test_build_kv_connector_stats_with_empty_dict(self):
"""Test that build_kv_connector_stats returns empty stats with empty dict."""
stats = MultiConnector.build_kv_connector_stats(data={})
assert stats is not None
assert isinstance(stats, MultiKVConnectorStats)
assert len(stats.data) == 0
assert stats.is_empty()
def test_build_kv_connector_stats_reconstructs_nixl_stats(self):
"""Test that NixlConnector stats are properly reconstructed with
correct data."""
serialized_data = {
"NixlConnector": {
"data": {
"transfer_duration": [1.5, 2.3],
"post_duration": [0.1, 0.2],
"bytes_transferred": [1024, 2048],
"num_descriptors": [10, 20],
"num_failed_transfers": [],
"num_failed_notifications": [],
}
}
}
stats = MultiConnector.build_kv_connector_stats(data=serialized_data)
assert "NixlConnector" in stats.data
nixl_stats = stats.data["NixlConnector"]
assert isinstance(nixl_stats, NixlKVConnectorStats)
assert nixl_stats.data["transfer_duration"] == [1.5, 2.3]
assert nixl_stats.data["post_duration"] == [0.1, 0.2]
assert nixl_stats.data["bytes_transferred"] == [1024, 2048]
assert nixl_stats.data["num_descriptors"] == [10, 20]
def test_build_kv_connector_stats_with_multiple_connectors(self):
"""Test reconstruction with multiple connector types that have custom stats."""
serialized_data = {
"NixlConnector": {
"data": {
"transfer_duration": [1.5],
"post_duration": [0.1],
"bytes_transferred": [1024],
"num_descriptors": [10],
"num_failed_transfers": [],
"num_failed_notifications": [],
}
},
"MockConnector": {"data": {"mock_field": [1, 2, 3]}},
}
stats = MultiConnector.build_kv_connector_stats(data=serialized_data)
assert stats is not None
assert isinstance(stats, MultiKVConnectorStats)
# Both connectors should be reconstructed
assert len(stats.data) == 2
assert "NixlConnector" in stats.data
assert "MockConnector" in stats.data
assert isinstance(stats.data["NixlConnector"], NixlKVConnectorStats)
assert isinstance(stats.data["MockConnector"], MockConnectorStats)
# Verify data is preserved
assert stats.data["MockConnector"].data == {"mock_field": [1, 2, 3]}
def test_build_kv_connector_stats_raises_error_for_unknown_connector(self):
"""Test that unknown connectors raise an error."""
serialized_data = {
"UnknownConnector": {"data": {"some_field": [1, 2, 3]}},
"NixlConnector": {
"data": {
"transfer_duration": [1.5],
"post_duration": [0.1],
"bytes_transferred": [1024],
"num_descriptors": [10],
"num_failed_transfers": [],
"num_failed_notifications": [],
}
},
}
with pytest.raises(
ValueError, match="Connector 'UnknownConnector' is not registered."
):
MultiConnector.build_kv_connector_stats(data=serialized_data)
def test_build_kv_connector_stats_with_already_instantiated_objects(self):
"""Test that already-instantiated stats objects are preserved (same process)."""
# This simulates the in-process case where stats are not serialized
nixl_stats = NixlKVConnectorStats(
data={
"transfer_duration": [1.5],
"post_duration": [0.1],
"bytes_transferred": [1024],
"num_descriptors": [10],
"num_failed_transfers": [],
"num_failed_notifications": [],
}
)
mock_stats = MockConnectorStats(data={"mock_field": [1, 2, 3]})
data_with_objects = {
"NixlConnector": nixl_stats,
"MockConnector": mock_stats,
}
stats = MultiConnector.build_kv_connector_stats(data=data_with_objects)
assert stats is not None
assert isinstance(stats, MultiKVConnectorStats)
assert len(stats.data) == 2
# Verify objects are preserved as-is
assert stats.data["NixlConnector"] is nixl_stats
assert stats.data["MockConnector"] is mock_stats
def test_build_kv_connector_stats_with_mixed_objects_and_dicts(self):
"""Test handling mixed already-instantiated and serialized stats."""
# This can happen during transition or partial serialization
nixl_stats = NixlKVConnectorStats(
data={
"transfer_duration": [1.5],
"post_duration": [0.1],
"bytes_transferred": [1024],
"num_descriptors": [10],
"num_failed_transfers": [],
"num_failed_notifications": [],
}
)
mixed_data = {
"NixlConnector": nixl_stats, # Already instantiated
"MockConnector": {"data": {"mock_field": [1, 2, 3]}}, # Serialized
}
stats = MultiConnector.build_kv_connector_stats(data=mixed_data)
assert stats is not None
assert isinstance(stats, MultiKVConnectorStats)
assert len(stats.data) == 2
# Instantiated object preserved
assert stats.data["NixlConnector"] is nixl_stats
# Serialized object reconstructed
assert isinstance(stats.data["MockConnector"], MockConnectorStats)
assert stats.data["MockConnector"].data == {"mock_field": [1, 2, 3]}
def test_build_kv_connector_stats_skips_connectors_without_custom_stats(self):
"""Test that connectors without custom stats (return None) are skipped."""
# ExampleConnector doesn't override build_kv_connector_stats,
# so it returns None and should be skipped
serialized_data = {
"NixlConnector": {
"data": {
"transfer_duration": [1.5],
"post_duration": [0.1],
"bytes_transferred": [1024],
"num_descriptors": [10],
"num_failed_transfers": [],
"num_failed_notifications": [],
}
},
"ExampleConnector": {"data": {"some_field": [1, 2, 3]}},
}
stats = MultiConnector.build_kv_connector_stats(data=serialized_data)
assert stats is not None
assert isinstance(stats, MultiKVConnectorStats)
# Only NixlConnector should be reconstructed
assert len(stats.data) == 1
assert "NixlConnector" in stats.data
assert isinstance(stats.data["NixlConnector"], NixlKVConnectorStats)
# ExampleConnector should be skipped (returns None)
assert "ExampleConnector" not in stats.data
def test_build_kv_connector_stats_handles_malformed_data(self):
"""Test that malformed data raises appropriate errors."""
serialized_data = {
"NixlConnector": {"wrong_field": {"transfer_duration": [1.5]}}
}
with pytest.raises(AssertionError, match="Expected a dict with a 'data' field"):
MultiConnector.build_kv_connector_stats(data=serialized_data)
def test_aggregate_same_connector(self):
"""Test aggregating stats from the same connector type."""
stats1 = MultiKVConnectorStats(
data={
"NixlConnector": NixlKVConnectorStats(
data={
"transfer_duration": [1.0],
"post_duration": [0.1],
"bytes_transferred": [1024],
"num_descriptors": [10],
"num_failed_transfers": [],
"num_failed_notifications": [],
}
)
}
)
stats2 = MultiKVConnectorStats(
data={
"NixlConnector": NixlKVConnectorStats(
data={
"transfer_duration": [2.0],
"post_duration": [0.2],
"bytes_transferred": [2048],
"num_descriptors": [20],
"num_failed_transfers": [],
"num_failed_notifications": [],
}
)
}
)
result = stats1.aggregate(stats2)
assert result is stats1 # Should return self
assert "NixlConnector" in result.data
nixl_stats = result.data["NixlConnector"]
assert nixl_stats.data["transfer_duration"] == [1.0, 2.0]
assert nixl_stats.data["post_duration"] == [0.1, 0.2]
assert nixl_stats.data["bytes_transferred"] == [1024, 2048]
assert nixl_stats.data["num_descriptors"] == [10, 20]
def test_aggregate_new_connector(self):
"""Test aggregating stats when a new connector type appears."""
from vllm.distributed.kv_transfer.kv_connector.v1.metrics import (
KVConnectorStats,
)
stats1 = MultiKVConnectorStats(
data={
"NixlConnector": NixlKVConnectorStats(
data={
"transfer_duration": [1.0],
"post_duration": [0.1],
"bytes_transferred": [1024],
"num_descriptors": [10],
"num_failed_transfers": [],
"num_failed_notifications": [],
}
)
}
)
stats2 = MultiKVConnectorStats(
data={"ExampleConnector": KVConnectorStats(data={"field": [1, 2]})}
)
result = stats1.aggregate(stats2)
assert "NixlConnector" in result.data
assert "ExampleConnector" in result.data
def test_reduce(self):
"""Test that reduce() correctly reduces all nested connector stats."""
stats = MultiKVConnectorStats(
data={
"NixlConnector": NixlKVConnectorStats(
data={
"transfer_duration": [1.0, 2.0],
"post_duration": [0.1, 0.2],
"bytes_transferred": [1024, 2048],
"num_descriptors": [10, 20],
"num_failed_transfers": [],
"num_failed_notifications": [],
}
)
}
)
reduced = stats.reduce()
assert "NixlConnector" in reduced
assert isinstance(reduced["NixlConnector"], dict)
# Check that the stats were reduced (should have aggregated values)
assert "Num successful transfers" in reduced["NixlConnector"]
assert reduced["NixlConnector"]["Num successful transfers"] == 2
def test_reset(self):
"""Test that reset() resets all nested connector stats."""
stats = MultiKVConnectorStats(
data={
"NixlConnector": NixlKVConnectorStats(
data={
"transfer_duration": [1.0, 2.0],
"post_duration": [0.1, 0.2],
"bytes_transferred": [1024, 2048],
"num_descriptors": [10, 20],
"num_failed_transfers": [],
"num_failed_notifications": [],
}
)
}
)
assert not stats.is_empty()
stats.reset()
# After reset, stats should be empty
assert stats.is_empty()
nixl_stats = stats.data["NixlConnector"]
assert len(nixl_stats.data["transfer_duration"]) == 0
def test_is_empty_with_multiple_connectors(self):
"""Test is_empty() returns correct value with multiple connectors."""
# All empty
stats = MultiKVConnectorStats(
data={
"NixlConnector": NixlKVConnectorStats(data={}),
}
)
# Initialize empty stats
stats.data["NixlConnector"].reset()
assert stats.is_empty()
# One non-empty
stats.data["NixlConnector"].data["transfer_duration"].append(1.0)
assert not stats.is_empty()
class TestMultiConnectorPreferCrossLayerBlocks:
def test_all_connectors_prefer_cross_layer_blocks(self):
mc = MultiConnector.__new__(MultiConnector)
mc._connectors = [
MockCrossLayerConnector.__new__(MockCrossLayerConnector),
MockCrossLayerConnector.__new__(MockCrossLayerConnector),
]
assert mc.prefer_cross_layer_blocks is True
def test_mixed_connectors_do_not_prefer_cross_layer_blocks(self):
mc = MultiConnector.__new__(MultiConnector)
mc._connectors = [
MockCrossLayerConnector.__new__(MockCrossLayerConnector),
MockConnector.__new__(MockConnector), # default False
]
assert mc.prefer_cross_layer_blocks is False
def test_multi_connector_overrides_all_base_methods():
"""
Ensure MultiConnector overrides all public methods from KVConnectorBase_V1.
"""
# These are fine to inherit from KVConnectorBase_V1
# TODO(https://github.com/vllm-project/vllm/pull/31811): Remove
# get_kv_connector_kv_cache_events from INHERITED_OK once implemented.
INHERITED_OK = {
"role",
"has_connector_metadata",
"get_kv_connector_kv_cache_events",
}
base_members = {
name for name in dir(KVConnectorBase_V1) if not name.startswith("_")
} - KVConnectorBase_V1.__abstractmethods__
missing = [
name
for name in sorted(base_members)
if name not in INHERITED_OK and name not in MultiConnector.__dict__
]
if missing:
pytest.fail(f"""
MultiConnector does not override these KVConnectorBase_V1 methods: {missing}
MultiConnector wraps other connectors and must delegate all methods.
Please add overrides that delegate to self._connectors.
Options:
1. Add delegation in MultiConnector (preferred)
2. Add to INHERITED_OK if the base implementation works correctly
""")
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/kv_connector/unit/test_multi_connector.py",
"license": "Apache License 2.0",
"lines": 654,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import copy
from collections.abc import Iterable
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any
import torch
from vllm.config import VllmConfig
from vllm.config.kv_transfer import KVTransferConfig
from vllm.distributed.kv_transfer.kv_connector.base import KVConnectorBaseType
from vllm.distributed.kv_transfer.kv_connector.factory import KVConnectorFactory
from vllm.distributed.kv_transfer.kv_connector.v1.base import (
CopyBlocksOp,
KVConnectorBase_V1,
KVConnectorHandshakeMetadata,
KVConnectorMetadata,
KVConnectorRole,
)
from vllm.distributed.kv_transfer.kv_connector.v1.metrics import (
KVConnectorPromMetrics,
KVConnectorStats,
PromMetric,
PromMetricT,
)
from vllm.logger import init_logger
from vllm.v1.attention.backend import AttentionBackend, AttentionMetadata
from vllm.v1.core.sched.output import SchedulerOutput
from vllm.v1.outputs import KVConnectorOutput
if TYPE_CHECKING:
from vllm.distributed.kv_events import KVCacheEvent
from vllm.forward_context import ForwardContext
from vllm.v1.core.kv_cache_manager import KVCacheBlocks
from vllm.v1.kv_cache_interface import KVCacheConfig
from vllm.v1.request import Request
logger = init_logger(__name__)
@dataclass
class MultiKVConnectorMetadata(KVConnectorMetadata):
metadata: tuple[KVConnectorMetadata, ...]
extra_async_saves: dict[str, int] | None = None
@dataclass
class MultiKVConnectorStats(KVConnectorStats):
"""
Maintain a dict of KVConnectorStats objects, one for each connector.
This is used to aggregate the stats from all connectors separately.
"""
def aggregate(self, other: KVConnectorStats) -> KVConnectorStats:
for connector_id, stats in other.data.items():
if connector_id not in self.data:
self[connector_id] = stats
else:
assert isinstance(stats, type(self.data[connector_id]))
self[connector_id] = self[connector_id].aggregate(stats)
return self
def reset(self):
for stats in self.data.values():
stats.reset()
def reduce(self) -> dict[str, Any]:
# TODO (NickLucche) Adjust for logging on separate lines
return {
connector_id: stats.reduce() for connector_id, stats in self.data.items()
}
def is_empty(self) -> bool:
return all(stats.is_empty() for stats in self.data.values())
def __getitem__(self, connector_id: str) -> KVConnectorStats:
return self.data[connector_id]
def __setitem__(self, connector_id: str, stats: KVConnectorStats):
self.data[connector_id] = stats
class MultiKVConnectorPromMetrics(KVConnectorPromMetrics):
def __init__(
self,
vllm_config: "VllmConfig",
metric_types: dict[type[PromMetric], type[PromMetricT]],
labelnames: list[str],
per_engine_labelvalues: dict[int, list[object]],
prom_metrics: dict[str, KVConnectorPromMetrics],
):
super().__init__(vllm_config, metric_types, labelnames, per_engine_labelvalues)
self._prom_metrics = prom_metrics
def observe(self, transfer_stats_data: dict[str, Any], engine_idx: int = 0):
for connector_id, stats_data in transfer_stats_data.items():
assert connector_id in self._prom_metrics, (
f"{connector_id} is not contained in the list of registered connectors "
f"with Prometheus metrics support: {self._prom_metrics.keys()}"
)
self._prom_metrics[connector_id].observe(stats_data["data"], engine_idx)
class MultiConnector(KVConnectorBase_V1):
"""
A wrapper for using multiple KVConnectors at the same time.
The current logic is:
- Load KV from the first connector that advertises available tokens from
get_num_new_matched_tokens(), based on the order in the config.
- Save to all connectors.
"""
def __init__(
self,
vllm_config: "VllmConfig",
role: KVConnectorRole,
kv_cache_config: "KVCacheConfig",
):
super().__init__(
vllm_config=vllm_config, role=role, kv_cache_config=kv_cache_config
)
self._connectors: list[KVConnectorBase_V1] = []
self._ktc_kv_transfer_config = []
for connector_cls, temp_config in self._get_connector_classes_and_configs(
vllm_config
):
self._connectors.append(connector_cls(temp_config, role, kv_cache_config))
self._ktc_kv_transfer_config.append(temp_config.kv_transfer_config)
# A mapping from request id to the index of the connector chosen to
# load the request from (if any).
self._requests_to_connector: dict[str, int] = {}
# Keeps track of *additional* remaining async saves (beyond 1) to be
# finished per request. Not needed for async loads since we only allow
# a single connector to load.
# Propagated from scheduler to worker side via the connector metadata.
self._extra_async_saves: dict[str, int] = {}
@property
def prefer_cross_layer_blocks(self) -> bool:
if not self._connectors:
return False
return all(c.prefer_cross_layer_blocks for c in self._connectors)
@classmethod
def _get_connector_classes_and_configs(
cls, vllm_config: "VllmConfig"
) -> list[tuple[type[KVConnectorBaseType], "VllmConfig"]]:
assert vllm_config.kv_transfer_config is not None
ktcs = vllm_config.kv_transfer_config.kv_connector_extra_config.get(
"connectors"
)
assert ktcs is not None
ret: list[tuple[type[KVConnectorBaseType], VllmConfig]] = []
for ktc in ktcs:
temp_config = copy.copy(vllm_config)
engine_id = ktc.get("engine_id", vllm_config.kv_transfer_config.engine_id)
temp_config.kv_transfer_config = KVTransferConfig(
**ktc, engine_id=engine_id
)
ret.append(
(
KVConnectorFactory.get_connector_class(
temp_config.kv_transfer_config
),
temp_config,
)
)
return ret
def register_cross_layers_kv_cache(
self, kv_cache: torch.Tensor, attn_backend: type[AttentionBackend]
):
# Register on all connectors
for c in self._connectors:
c.register_cross_layers_kv_cache(kv_cache, attn_backend)
def register_kv_caches(self, kv_caches: dict[str, torch.Tensor]):
for c in self._connectors:
c.register_kv_caches(kv_caches)
# We must override the base class method here because we need to bind
# the metadata to each connector in the order of the connectors in the
# MultiKVConnectorMetadata.
#
# Note: Call the base class method to ensure metadata is also set on the
# MultiConnector instance itself; otherwise, `has_connector_metadata()` will
# always return False.
def bind_connector_metadata(self, connector_metadata: KVConnectorMetadata) -> None:
assert isinstance(connector_metadata, MultiKVConnectorMetadata)
if connector_metadata.extra_async_saves:
self._extra_async_saves.update(connector_metadata.extra_async_saves)
for c, cm in zip(self._connectors, connector_metadata.metadata):
c.bind_connector_metadata(cm)
super().bind_connector_metadata(connector_metadata)
def clear_connector_metadata(self) -> None:
for c in self._connectors:
c.clear_connector_metadata()
super().clear_connector_metadata()
def shutdown(self):
exception: Exception | None = None
for c in self._connectors:
try:
c.shutdown()
except Exception as e:
logger.exception(
"Exception during connector %s shutdown.", c.__class__.__name__
)
exception = e
if exception:
raise exception
# ==============================
# Worker-side methods
# ==============================
def start_load_kv(self, forward_context: "ForwardContext", **kwargs) -> None:
for c in self._connectors:
c.start_load_kv(forward_context, **kwargs)
def wait_for_layer_load(self, layer_name: str) -> None:
for c in self._connectors:
c.wait_for_layer_load(layer_name)
def save_kv_layer(
self,
layer_name: str,
kv_layer: torch.Tensor,
attn_metadata: AttentionMetadata,
**kwargs,
) -> None:
for c in self._connectors:
c.save_kv_layer(layer_name, kv_layer, attn_metadata, **kwargs)
def wait_for_save(self):
for c in self._connectors:
c.wait_for_save()
def get_finished(
self, finished_req_ids: set[str]
) -> tuple[set[str] | None, set[str] | None]:
finished_sending: set[str] = set()
finished_recving: set[str] = set()
for c in self._connectors:
sending, recving = c.get_finished(finished_req_ids)
if not recving and not sending:
continue
# Aggregate finished recving request ids.
finished_recving.update(recving or ())
# Aggregate finished sending request ids - only include
# once we've drained the "extra" count (for cases where
# more than one connector is async-saving the same request).
for req_id in sending or ():
extra_pending = self._extra_async_saves.get(req_id)
if extra_pending is None:
finished_sending.add(req_id)
continue
assert extra_pending > 0
if extra_pending == 1:
del self._extra_async_saves[req_id]
else:
self._extra_async_saves[req_id] = extra_pending - 1
return finished_sending or None, finished_recving or None
def get_block_ids_with_load_errors(self) -> set[int]:
agg_block_ids: set[int] = set()
for c in self._connectors:
agg_block_ids |= c.get_block_ids_with_load_errors()
return agg_block_ids
def set_host_xfer_buffer_ops(self, copy_operation: CopyBlocksOp):
"""Set xPU-specific copy ops for all sub-connectors."""
for c in self._connectors:
c.set_host_xfer_buffer_ops(copy_operation)
def handle_preemptions(self, preempted_req_ids: set[str]):
"""Handle preempted requests for all sub-connectors."""
for c in self._connectors:
c.handle_preemptions(preempted_req_ids)
def get_finished_count(self) -> int | None:
# TODO(https://github.com/vllm-project/vllm/issues/33400)
# Currently no connectors return non-None
return None
# TODO: Add a generic implementation of 'get_kv_connector_kv_cache_events'
# method for the MultiConnector. It should be able to get events from
# multiple connectors, handling the case where only a subset of the
# requested connectors implements the 'get_kv_connector_kv_cache_events'
# WIP: https://github.com/vllm-project/vllm/pull/31811
# ==============================
# Scheduler-side methods
# ==============================
def get_num_new_matched_tokens(
self,
request: "Request",
num_computed_tokens: int,
) -> tuple[int | None, bool]:
to_return = (0, False)
for i, c in enumerate(self._connectors):
toks, load_async = c.get_num_new_matched_tokens(
request, num_computed_tokens
)
# If there is a connector still looking up the matches,
# we return None to indicate that we are not done yet.
if toks is None:
return (None, False)
# The first connector that has new matched tokens will be assigned
# to this request.
if to_return[0] == 0 and toks > 0:
self._requests_to_connector[request.request_id] = i
to_return = (toks, load_async)
return to_return
def update_state_after_alloc(
self, request: "Request", blocks: "KVCacheBlocks", num_external_tokens: int
):
chosen_connector = self._requests_to_connector.get(request.request_id, -1)
empty_blocks = blocks.new_empty()
for i, c in enumerate(self._connectors):
if i == chosen_connector:
# Forward call to the chosen connector (if any).
c.update_state_after_alloc(request, blocks, num_external_tokens)
else:
# Call with empty blocks for other connectors.
c.update_state_after_alloc(request, empty_blocks, 0)
def build_connector_meta(
self, scheduler_output: SchedulerOutput
) -> MultiKVConnectorMetadata:
metadata = MultiKVConnectorMetadata(
metadata=tuple(
c.build_connector_meta(scheduler_output) for c in self._connectors
)
)
if self._extra_async_saves:
metadata.extra_async_saves = self._extra_async_saves
self._extra_async_saves = {}
return metadata
def update_connector_output(self, connector_output: KVConnectorOutput):
for c in self._connectors:
c.update_connector_output(connector_output)
def get_handshake_metadata(self) -> KVConnectorHandshakeMetadata | None:
"""
Get the KVConnector handshake metadata from sub-connectors.
Returns the first non-None metadata from sub-connectors.
"""
for c in self._connectors:
metadata = c.get_handshake_metadata()
if metadata is not None:
return metadata
return None
def set_xfer_handshake_metadata(
self, metadata: dict[int, KVConnectorHandshakeMetadata]
) -> None:
"""
Set the KV connector handshake metadata for all sub-connectors.
This is needed to start the NIXL listener thread for NixlConnector.
"""
for c in self._connectors:
c.set_xfer_handshake_metadata(metadata)
def request_finished(
self,
request: "Request",
blocks: list[int],
) -> tuple[bool, dict[str, Any] | None]:
async_saves = 0
kv_txfer_params = None
for c in self._connectors:
async_save, txfer_params = c.request_finished(request, blocks)
if async_save:
async_saves += 1
if txfer_params is not None:
if kv_txfer_params is not None:
# TODO we can probably change this to merge the dicts here,
# checking for key clashes.
raise RuntimeError(
"Only one connector can produce KV transfer params"
)
kv_txfer_params = txfer_params
if async_saves > 1:
self._extra_async_saves[request.request_id] = async_saves - 1
# Clean up other state for this request.
self._requests_to_connector.pop(request.request_id, None)
return async_saves > 0, kv_txfer_params
def take_events(self) -> Iterable["KVCacheEvent"]:
for c in self._connectors:
yield from c.take_events()
@classmethod
def get_required_kvcache_layout(cls, vllm_config: "VllmConfig") -> str | None:
"""
Get the required KV cache layout for this connector.
Args:
vllm_config (VllmConfig): the vllm config.
Returns:
str: the required KV cache layout. e.g. HND, or NHD.
None if the connector does not require a specific layout.
"""
assert vllm_config.kv_transfer_config is not None
layouts: set[str] = set()
for connector_cls, temp_config in cls._get_connector_classes_and_configs(
vllm_config
):
required_kvcache_layout = connector_cls.get_required_kvcache_layout(
temp_config
)
if required_kvcache_layout is not None:
layouts.add(required_kvcache_layout)
if len(layouts) > 1:
raise ValueError(
f"KV cache layout mismatch: "
f"found {len(layouts)} different layouts "
f"({', '.join(layouts)})."
f"All connectors must use the same layout."
)
return next(iter(layouts), None)
@classmethod
def build_kv_connector_stats(
cls, data: dict[str, Any] | None = None
) -> KVConnectorStats | None:
if data is None:
return MultiKVConnectorStats()
# data is a dict mapping connector name to their stats data.
# The stats data can be either:
# 1. Already-instantiated KVConnectorStats objects (same process)
# 2. Serialized dicts (cross-process after serialization)
# We need to reconstruct proper KVConnectorStats objects from dicts
reconstructed_data = {}
for connector_name, stats_value in data.items():
# If already a KVConnectorStats object, use it directly
if isinstance(stats_value, KVConnectorStats):
reconstructed_data[connector_name] = stats_value
continue
# Otherwise, reconstruct from serialized dict
# Get the connector class to reconstruct its stats
connector_cls = KVConnectorFactory.get_connector_class_by_name(
connector_name
)
# stats_value is the serialized dataclass which contains {'data': {...}}
# We need to extract the inner 'data' field to avoid double-nesting
assert isinstance(stats_value, dict) and "data" in stats_value, (
f"Expected a dict with a 'data' field, got {stats_value}"
)
inner_data = stats_value["data"]
# Use the connector's build_kv_connector_stats to reconstruct
if reconstructed_stats := connector_cls.build_kv_connector_stats(
data=inner_data
):
reconstructed_data[connector_name] = reconstructed_stats
return MultiKVConnectorStats(data=reconstructed_data)
def get_kv_connector_stats(self) -> MultiKVConnectorStats | None:
# Group connector stats by connector type.
stats_by_connector: MultiKVConnectorStats | None = None
for c in self._connectors:
stats = c.get_kv_connector_stats()
if stats is None:
continue
if stats_by_connector is None:
# Lazy init to allow optional return value.
stats_by_connector = MultiKVConnectorStats()
stats_by_connector[c.__class__.__name__] = stats
return stats_by_connector
@classmethod
def build_prom_metrics(
cls,
vllm_config: "VllmConfig",
metric_types: dict[type["PromMetric"], type["PromMetricT"]],
labelnames: list[str],
per_engine_labelvalues: dict[int, list[object]],
) -> KVConnectorPromMetrics:
prom_metrics: dict[str, KVConnectorPromMetrics] = {}
for connector_cls, temp_config in cls._get_connector_classes_and_configs(
vllm_config
):
connector_prom = connector_cls.build_prom_metrics(
temp_config, metric_types, labelnames, per_engine_labelvalues
)
if connector_prom is not None:
prom_metrics[connector_cls.__name__] = connector_prom
return MultiKVConnectorPromMetrics(
vllm_config,
metric_types,
labelnames,
per_engine_labelvalues,
prom_metrics,
)
def reset_cache(self) -> bool:
results = [c.reset_cache() is not False for c in self._connectors]
return all(results)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/distributed/kv_transfer/kv_connector/v1/multi_connector.py",
"license": "Apache License 2.0",
"lines": 451,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/kernels/moe/test_batched_moe.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from dataclasses import dataclass
import pytest
import torch
from tests.kernels.moe.utils import (
batched_moe,
make_quantized_test_activations,
make_test_weights,
naive_batched_moe,
)
from tests.kernels.quant_utils import native_batched_masked_quant_matmul
from tests.kernels.utils import torch_experts
from vllm.config import VllmConfig, set_current_vllm_config
from vllm.model_executor.layers.fused_moe import fused_topk
from vllm.model_executor.layers.fused_moe.fused_batched_moe import (
invoke_moe_batched_triton_kernel,
)
from vllm.platforms import current_platform
from vllm.triton_utils import tl
from vllm.utils.torch_utils import set_random_seed
MNK_FACTORS = [
(1, 128, 128),
(1, 512, 512),
(1, 1024, 2048),
(32, 128, 128),
(32, 512, 512),
(32, 1024, 2048),
(45, 128, 2048),
(45, 1024, 128),
(64, 512, 512),
(64, 1024, 2048),
(222, 128, 2048),
(222, 1024, 2048),
]
NUM_EXPERTS = [8, 64]
TOP_KS = [1, 2, 6]
DTYPES = [torch.bfloat16]
if not current_platform.is_fp8_fnuz():
DTYPES.append(torch.float8_e4m3fn)
vllm_config = VllmConfig()
@dataclass
class BatchedMMConfig:
in_dtype: torch.dtype
quant_dtype: torch.dtype | None
out_dtype: torch.dtype
num_experts: int
max_tokens_per_expert: int
K: int
N: int
@dataclass
class BatchedMMTensors:
A: torch.Tensor # [E, max_tokens, K]
B: torch.Tensor # [E, K, N] - column major
C: torch.Tensor # [E, max_tokens, N]
num_expert_tokens: torch.Tensor # [E]
@staticmethod
def make_tensors(config: BatchedMMConfig):
A = (
torch.randn(
(config.num_experts, config.max_tokens_per_expert, config.K),
device="cuda",
dtype=config.in_dtype,
)
/ 10
)
B = torch.randn(
(config.num_experts, config.N, config.K),
device="cuda",
dtype=config.in_dtype,
)
C = torch.zeros(
(config.num_experts, config.max_tokens_per_expert, config.N),
device="cuda",
dtype=config.out_dtype,
)
num_expert_tokens = torch.randint(
low=0,
high=config.max_tokens_per_expert,
size=(config.num_experts,),
device="cuda",
dtype=torch.int32,
)
return BatchedMMTensors(A, B, C, num_expert_tokens)
@pytest.mark.parametrize("num_experts", [8, 32])
@pytest.mark.parametrize("max_tokens_per_expert", [32, 224, 512])
@pytest.mark.parametrize("K", [128, 1024])
@pytest.mark.parametrize("N", [128, 1024])
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("block_shape", [None, [128, 128]])
@pytest.mark.parametrize("per_act_token_quant", [False, True])
def test_batched_mm(
num_experts: int,
max_tokens_per_expert: int,
K: int,
N: int,
dtype: torch.dtype,
block_shape: list[int] | None,
per_act_token_quant: bool,
):
"""Note: float8_e4m3fn is not supported on CUDA architecture < 89,
and those tests will be skipped on unsupported hardware."""
set_random_seed(7)
use_fp8_w8a8 = dtype == torch.float8_e4m3fn
if (dtype == torch.float8_e4m3fn) and not current_platform.has_device_capability(
89
):
pytest.skip(
"Triton limitation: fp8e4nv data type is not supported on CUDA arch < 89"
)
if (per_act_token_quant or block_shape is not None) and not use_fp8_w8a8:
pytest.skip("Don't test blocking for non-quantized types.")
if per_act_token_quant and block_shape is not None:
pytest.skip("Skip illegal quantization test.")
if dtype.itemsize == 1:
act_dtype = torch.bfloat16
quant_dtype = dtype
else:
act_dtype = dtype
quant_dtype = None
num_expert_tokens = torch.randint(
low=0,
high=max_tokens_per_expert,
size=(num_experts,),
device="cuda",
dtype=torch.int32,
)
A, A_q, A_scale = make_quantized_test_activations(
num_experts,
max_tokens_per_expert,
K,
in_dtype=act_dtype,
quant_dtype=quant_dtype,
block_shape=block_shape,
per_act_token_quant=per_act_token_quant,
)
(B, B_q, B_scale, _), _ = make_test_weights(
num_experts,
N // 2,
K,
in_dtype=act_dtype,
quant_dtype=quant_dtype,
block_shape=block_shape,
per_out_ch_quant=per_act_token_quant,
)
out_shape = (num_experts, max_tokens_per_expert, N)
test_output = torch.zeros(out_shape, dtype=act_dtype, device="cuda")
ref_output = torch.zeros(out_shape, dtype=act_dtype, device="cuda")
q_ref_output = torch.zeros(out_shape, dtype=act_dtype, device="cuda")
compute_tl_dtype = {
torch.float16: tl.float16,
torch.bfloat16: tl.bfloat16,
torch.float32: tl.float32,
}[test_output.dtype]
assert A_q.dtype == B_q.dtype
invoke_moe_batched_triton_kernel(
A_q,
B_q,
test_output,
num_expert_tokens,
compute_tl_dtype,
# Quantization data
A_scale,
B_scale,
None,
# Quantization schemes
use_fp8_w8a8,
False,
False,
config={
"BLOCK_SIZE_M": 16,
"BLOCK_SIZE_N": 16,
"BLOCK_SIZE_K": 16 if dtype.itemsize > 1 else 32,
},
per_act_token_quant=per_act_token_quant,
block_shape=block_shape,
)
ref_output = native_batched_masked_quant_matmul(
A,
B,
ref_output,
num_expert_tokens,
)
q_ref_output = native_batched_masked_quant_matmul(
A_q,
B_q,
q_ref_output,
num_expert_tokens,
A_scale,
B_scale,
block_shape,
per_act_token_quant,
)
rtol, atol = {
torch.float16: (6e-2, 6e-2),
torch.bfloat16: (6e-2, 6e-2),
torch.float32: (1e-2, 1e-2),
}[test_output.dtype]
torch.testing.assert_close(ref_output, q_ref_output, atol=atol, rtol=rtol)
torch.testing.assert_close(test_output, q_ref_output, atol=atol, rtol=rtol)
@pytest.mark.parametrize(("m", "n", "k"), MNK_FACTORS)
@pytest.mark.parametrize("e", NUM_EXPERTS)
@pytest.mark.parametrize("topk", TOP_KS)
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("per_act_token_quant", [False, True])
@pytest.mark.parametrize("block_shape", [None, [128, 128]])
@pytest.mark.parametrize("input_scales", [False])
def test_fused_moe_batched_experts(
m: int,
n: int,
k: int,
e: int,
topk: int,
dtype: torch.dtype,
per_act_token_quant: bool,
block_shape: list[int] | None,
input_scales: bool,
workspace_init,
):
"""Note: float8_e4m3fn is not supported on CUDA architecture < 89,
and those tests will be skipped on unsupported hardware."""
set_random_seed(7)
use_fp8_w8a8 = dtype == torch.float8_e4m3fn
if (dtype == torch.float8_e4m3fn) and not current_platform.has_device_capability(
89
):
pytest.skip(
"Triton limitation: fp8e4nv data type is not supported on CUDA arch < 89"
)
if topk > e:
pytest.skip("topk > e")
if not use_fp8_w8a8 and (per_act_token_quant or block_shape is not None):
pytest.skip("Skip quantization test for non-quantized type")
if per_act_token_quant and block_shape is not None:
pytest.skip("Skip illegal quantization test.")
a = torch.randn((m, k), device="cuda", dtype=torch.bfloat16) / 10
score = torch.randn((m, e), device="cuda", dtype=torch.bfloat16)
if dtype.itemsize == 1:
act_dtype = torch.bfloat16
quant_dtype = dtype
else:
act_dtype = dtype
quant_dtype = None
(w1_16, w1, w1_s, _), (w2_16, w2, w2_s, _) = make_test_weights(
e,
n,
k,
block_shape=block_shape,
in_dtype=act_dtype,
quant_dtype=quant_dtype,
per_out_ch_quant=per_act_token_quant,
)
if input_scales and quant_dtype is not None:
a1_scale = torch.tensor(1, device="cuda", dtype=torch.float32)
a2_scale = torch.tensor(1, device="cuda", dtype=torch.float32)
else:
a1_scale = None
a2_scale = None
with set_current_vllm_config(vllm_config):
topk_weight, topk_ids, _ = fused_topk(a, score, topk, False)
baseline_output = torch_experts(
a,
w1,
w2,
topk_weight,
topk_ids,
w1_scale=w1_s,
w2_scale=w2_s,
a1_scale=a1_scale,
a2_scale=a2_scale,
quant_dtype=quant_dtype,
per_act_token_quant=per_act_token_quant,
block_shape=block_shape,
)
batched_output = naive_batched_moe(
a,
w1,
w2,
topk_weight,
topk_ids,
w1_scale=w1_s,
w2_scale=w2_s,
a1_scale=a1_scale,
a2_scale=a2_scale,
quant_dtype=quant_dtype,
per_act_token_quant=per_act_token_quant,
block_shape=block_shape,
)
triton_output = batched_moe(
a,
w1,
w2,
topk_weight,
topk_ids,
w1_scale=w1_s,
w2_scale=w2_s,
a1_scale=a1_scale,
a2_scale=a2_scale,
quant_dtype=quant_dtype,
per_act_token_quant=per_act_token_quant,
block_shape=block_shape,
)
torch.testing.assert_close(batched_output, baseline_output, atol=3e-2, rtol=2e-2)
torch.testing.assert_close(triton_output, batched_output, atol=2e-2, rtol=2e-2)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/kernels/moe/test_batched_moe.py",
"license": "Apache License 2.0",
"lines": 304,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_executor/layers/fused_moe/fused_batched_moe.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Fused batched MoE kernel."""
import torch
import vllm.model_executor.layers.fused_moe.modular_kernel as mk
from vllm.model_executor.layers.fused_moe.activation import MoEActivation
from vllm.model_executor.layers.fused_moe.config import (
FusedMoEConfig,
FusedMoEParallelConfig,
FusedMoEQuantConfig,
)
from vllm.model_executor.layers.fused_moe.fused_moe import try_get_optimal_moe_config
from vllm.model_executor.layers.fused_moe.topk_weight_and_reduce import (
TopKWeightAndReduceDelegate,
TopKWeightAndReduceNaiveBatched,
)
from vllm.model_executor.layers.fused_moe.utils import (
_resize_cache,
moe_kernel_quantize_input,
normalize_batched_scales_shape,
normalize_scales_shape,
)
from vllm.model_executor.layers.quantization.utils.quant_utils import (
QuantKey,
group_broadcast,
kFp8Dynamic128Sym,
kFp8DynamicTensorSym,
kFp8DynamicTokenSym,
kFp8Static128BlockSym,
kFp8StaticChannelSym,
kFp8StaticTensorSym,
)
from vllm.platforms import current_platform
from vllm.triton_utils import tl, triton
@triton.jit
def moe_mmk(
a_ptrs,
b_ptrs,
K,
expert_id,
a_scale_ptr,
b_scale_ptr,
# The stride variables represent how much to increase the ptr by when
# moving by 1 element in a particular dimension. E.g. `stride_am` is
# how much to increase `a_ptr` by to get the element one row down
# (A has M rows).
stride_ak: tl.int64,
stride_bk: tl.int64,
stride_ase: tl.int64,
stride_asm: tl.int64,
stride_ask: tl.int64,
stride_bse: tl.int64,
stride_bsk: tl.int64,
stride_bsn: tl.int64,
# Offsets and masks
offs_m,
offs_n,
offs_bn,
mask_m,
# Block size for block-wise quantization
group_n: tl.constexpr,
group_k: tl.constexpr,
# Meta-parameters
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
BLOCK_K: tl.constexpr,
compute_type: tl.constexpr,
use_w8a8: tl.constexpr,
use_w8a16: tl.constexpr,
per_act_token_quant: tl.constexpr,
):
offs_k = tl.arange(0, BLOCK_K)
if use_w8a16:
b_scale_ptrs = (
b_scale_ptr + expert_id * stride_bse + offs_n[None, :] * stride_bsn
)
b_scale = tl.load(b_scale_ptrs)
if use_w8a8:
# block-wise
if group_k > 0 and group_n > 0:
a_scale_ptrs = a_scale_ptr + offs_m * stride_asm
offs_bsn = offs_bn // group_n
b_scale_ptrs = b_scale_ptr + offs_bsn * stride_bsn
# per act token
elif per_act_token_quant:
# Load per-token scale for activations
a_scale_ptrs = a_scale_ptr + offs_m * stride_asm
a_scale = tl.load(a_scale_ptrs, mask=mask_m, other=0.0)[:, None]
b_scale_ptrs = b_scale_ptr + offs_bn[None, :] * stride_bsn
b_scale = tl.load(b_scale_ptrs)
# tensor-wise
else:
a_scale = tl.load(a_scale_ptr)
b_scale = tl.load(b_scale_ptr)
# -----------------------------------------------------------
# Iterate to compute a block of the C matrix.
# We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block
# of fp32 values for higher accuracy.
# `accumulator` will be converted back to fp16 after the loop.
accumulator = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.float32)
for k in range(0, tl.cdiv(K, BLOCK_K)):
# Load the next block of A and B, generate a mask by checking the
# K dimension.
a = tl.load(
a_ptrs,
mask=mask_m[:, None] & (offs_k[None, :] < K - k * BLOCK_K),
other=0.0,
)
b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_K, other=0.0)
# We accumulate along the K dimension.
if use_w8a16:
accumulator = tl.dot(a, b.to(compute_type), acc=accumulator)
elif use_w8a8:
if group_k > 0 and group_n > 0:
k_start = k * BLOCK_K
offs_ks = k_start // group_k
a_scale = tl.load(
a_scale_ptrs + offs_ks * stride_ask, mask=mask_m, other=0.0
)
b_scale = tl.load(b_scale_ptrs + offs_ks * stride_bsk)
accumulator += tl.dot(a, b) * a_scale[:, None] * b_scale[None, :]
else:
# acc used to enable fp8_fast_accum
accumulator = tl.dot(a, b, acc=accumulator)
else:
accumulator += tl.dot(a, b)
# Advance the ptrs to the next K block.
a_ptrs += BLOCK_K * stride_ak
b_ptrs += BLOCK_K * stride_bk
if use_w8a16:
accumulator = (accumulator * b_scale).to(compute_type)
elif use_w8a8:
if group_k > 0 and group_n > 0:
accumulator = accumulator.to(compute_type)
else:
accumulator = (accumulator * a_scale * b_scale).to(compute_type)
else:
accumulator = accumulator.to(compute_type)
return accumulator
@triton.jit
def expert_triton_kernel(
a_ptr, # [max_tokens, K]
b_ptr, # [K, N]
c_ptr, # [max_tokens, N]
expert_id,
compute_type: tl.constexpr,
# Dimensions
M,
N,
K,
# Quantization data
a_scale_ptr,
b_scale_ptr,
b_zp_ptr,
# strides
stride_am: tl.int64,
stride_ak: tl.int64,
stride_bk: tl.int64,
stride_bn: tl.int64,
stride_cm: tl.int64,
stride_cn: tl.int64,
stride_ase: tl.int64,
stride_asm: tl.int64,
stride_ask: tl.int64,
stride_bse: tl.int64,
stride_bsk: tl.int64,
stride_bsn: tl.int64,
# offsets
offs_bn,
# Blockwise quantization data
group_n,
group_k,
# Quantization schemes
use_fp8_w8a8: tl.constexpr,
use_int8_w8a16: tl.constexpr,
per_act_token_quant: tl.constexpr,
# Kernel config
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
BLOCK_K: tl.constexpr,
):
offs_m = tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N) % N
offs_k = tl.arange(0, BLOCK_K)
mask_m = offs_m < M
# Make grids of a + b pointers
a_ptrs = a_ptr + offs_m[:, None] * stride_am + offs_k[None, :] * stride_ak
b_ptrs = b_ptr + offs_k[:, None] * stride_bk + offs_n[None, :] * stride_bn
accumulator = moe_mmk(
a_ptrs,
b_ptrs,
K,
expert_id,
a_scale_ptr,
b_scale_ptr,
# The stride variables represent how much to increase the ptr by when
# moving by 1 element in a particular dimension. E.g. `stride_am` is
# how much to increase `a_ptr` by to get the element one row down
# (A has M rows).
stride_ak,
stride_bk,
stride_ase,
stride_asm,
stride_ask,
stride_bse,
stride_bsk,
stride_bsn,
# Offsets and masks
offs_m,
offs_n,
offs_bn,
mask_m,
# Block size for block-wise quantization
group_n,
group_k,
# Meta-parameters
BLOCK_M,
BLOCK_N,
BLOCK_K,
compute_type,
use_fp8_w8a8,
use_int8_w8a16,
per_act_token_quant,
)
# store in C
offs_cn = tl.arange(0, BLOCK_N)
c_ptrs = c_ptr + offs_m[:, None] * stride_cm + offs_cn[None, :] * stride_cn
c_mask = mask_m[:, None] & (offs_cn[None, :] < N)
tl.store(c_ptrs, accumulator, mask=c_mask)
@triton.jit
def batched_triton_kernel(
a_ptr, # [E, max_num_tokens, K]
b_ptr, # [E, K, N]
c_ptr, # [E, max_num_tokens, N]
expert_num_tokens, # [E]
compute_type: tl.constexpr,
# Dimensions
max_num_tokens,
K,
N,
# Quantization data
a_scale_ptr,
b_scale_ptr,
b_zp_ptr,
# The stride variables represent how much to increase the ptr by when
# moving by 1 element in a particular dimension. E.g. `stride_am` is
# how much to increase `a_ptr` by to get the element one row down
# (A has M rows).
stride_ae: tl.int64,
stride_am: tl.int64,
stride_ak: tl.int64,
stride_be: tl.int64,
stride_bk: tl.int64,
stride_bn: tl.int64,
stride_ce: tl.int64,
stride_cm: tl.int64,
stride_cn: tl.int64,
stride_ase: tl.int64,
stride_asm: tl.int64,
stride_ask: tl.int64,
stride_bse: tl.int64,
stride_bsk: tl.int64,
stride_bsn: tl.int64,
# Blockwise quantization data
group_n: tl.constexpr,
group_k: tl.constexpr,
# Quantization schemes
use_fp8_w8a8: tl.constexpr,
use_int8_w8a16: tl.constexpr,
per_act_token_quant: tl.constexpr,
# Kernel config
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
BLOCK_K: tl.constexpr,
):
expert_id = tl.program_id(axis=0)
e_num_tokens = tl.load(expert_num_tokens + expert_id)
if e_num_tokens == 0:
# Early exit
return
# axis 1 is M_blocks * N_blocks
pid_mn = tl.program_id(axis=1)
# num_pid_m = tl.cdiv(max_num_tokens, BLOCK_M)
num_pid_n = tl.cdiv(N, BLOCK_N)
pid_m = pid_mn // num_pid_n
pid_n = pid_mn % num_pid_n
cta_m_start = pid_m * BLOCK_M
cta_n_start = pid_n * BLOCK_N
if cta_m_start >= e_num_tokens:
# Early exit
return
cta_m_size = min(BLOCK_M, e_num_tokens - cta_m_start)
cta_n_size = min(BLOCK_N, N - cta_n_start)
a_ptr = a_ptr + expert_id * stride_ae + cta_m_start * stride_am
b_ptr = b_ptr + expert_id * stride_be + cta_n_start * stride_bn
c_ptr = (
c_ptr
+ expert_id * stride_ce
+ cta_m_start * stride_cm
+ cta_n_start * stride_cn
)
offs_bn = (pid_n * BLOCK_N + tl.arange(0, BLOCK_N).to(tl.int64)) % N
if use_fp8_w8a8:
a_scale_ptr = a_scale_ptr + expert_id * stride_ase
b_scale_ptr = b_scale_ptr + expert_id * stride_bse
# block-wise
if group_k > 0 and group_n > 0 or per_act_token_quant:
a_scale_ptr = a_scale_ptr + cta_m_start * stride_asm
expert_triton_kernel(
a_ptr,
b_ptr,
c_ptr,
expert_id,
compute_type,
cta_m_size, # M
cta_n_size, # N
K, # K
a_scale_ptr,
b_scale_ptr,
b_zp_ptr,
# Strides
stride_am,
stride_ak,
stride_bk,
stride_bn,
stride_cm,
stride_cn,
stride_ase,
stride_asm,
stride_ask,
stride_bse,
stride_bsk,
stride_bsn,
# offsets
offs_bn,
# Blockwise quantization data
group_n,
group_k,
# Quantization schemes
use_fp8_w8a8,
use_int8_w8a16,
per_act_token_quant,
# Kernel config
BLOCK_M,
BLOCK_N,
BLOCK_K,
)
def invoke_moe_batched_triton_kernel(
A: torch.Tensor, # [E, max_tokens, K]
B: torch.Tensor, # [E, N, K]
C: torch.Tensor, # [E, max_tokens, N]
expert_num_tokens: torch.Tensor, # [E]
compute_type: tl.dtype,
# Quantization data
A_scale: torch.Tensor | None,
B_scale: torch.Tensor | None,
B_zp: torch.Tensor,
# Quantization schemes
use_fp8_w8a8: bool,
use_int8_w8a16: bool,
use_int4_w4a16: bool,
config: dict[str, int],
per_act_token_quant: bool,
block_shape: list[int] | None = None,
):
assert not use_int4_w4a16
max_num_tokens = A.size(1)
K = A.size(2)
N = C.size(2)
BLOCK_M = config["BLOCK_SIZE_M"]
BLOCK_N = config["BLOCK_SIZE_N"]
BLOCK_K = config["BLOCK_SIZE_K"]
grid = (
expert_num_tokens.size(0),
triton.cdiv(max_num_tokens, BLOCK_M) * triton.cdiv(B.size(1), BLOCK_N),
)
A_scale = normalize_batched_scales_shape(A_scale, expert_num_tokens.shape[0])
if B_scale is not None and B_scale.ndim == 1:
assert B_scale.numel() == expert_num_tokens.shape[0]
B_scale = B_scale.view(-1, 1, 1)
assert A_scale is None or A_scale.ndim == 3, (
f"{0 if A_scale is None else A_scale.shape}"
)
assert B_scale is None or B_scale.ndim == 1 or B_scale.ndim == 3, (
f"{0 if B_scale is None else B_scale.shape}"
)
if B_scale is not None:
if B_scale.ndim == 1:
stride_bse = 1
stride_bsk = 0
stride_bsn = 0
else:
stride_bse = B_scale.stride(0)
stride_bsk = B_scale.stride(2)
stride_bsn = B_scale.stride(1)
else:
stride_bse = 0
stride_bsk = 0
stride_bsn = 0
if A_scale is not None:
stride_ase = A_scale.stride(0)
stride_asm = A_scale.stride(1)
stride_ask = A_scale.stride(2)
else:
stride_ase = 0
stride_asm = 0
stride_ask = 0
batched_triton_kernel[grid](
A,
B,
C,
expert_num_tokens,
compute_type,
# Dimensions
max_num_tokens,
K,
N,
# Quantization data
A_scale,
B_scale,
B_zp,
# Strides
A.stride(0),
A.stride(1),
A.stride(2),
B.stride(0),
B.stride(2),
B.stride(1),
C.stride(0),
C.stride(1),
C.stride(2),
stride_ase,
stride_asm,
stride_ask,
stride_bse,
stride_bsk,
stride_bsn,
# Blockwise quantization data
0 if block_shape is None else block_shape[0],
0 if block_shape is None else block_shape[1],
# Quantization schemes
use_fp8_w8a8,
use_int8_w8a16,
per_act_token_quant,
# Kernel config
BLOCK_M=BLOCK_M,
BLOCK_N=BLOCK_N,
BLOCK_K=BLOCK_K,
)
class BatchedPrepareAndFinalize(mk.FusedMoEPrepareAndFinalize):
"""
A reference prepare/finalize class that reorganizes the tokens into
expert batched format, i.e. E x max_num_tokens x K. This is the format
that the batched dispatch/combine kernels use.
"""
def __init__(
self,
max_num_tokens: int,
num_local_experts: int,
num_dispatchers: int,
rank: int,
):
super().__init__()
self.max_num_tokens = max_num_tokens
self.num_local_experts = num_local_experts
self.rank = rank
self.num_dispatchers_ = num_dispatchers
@property
def activation_format(self) -> mk.FusedMoEActivationFormat:
return mk.FusedMoEActivationFormat.BatchedExperts
def max_num_tokens_per_rank(self) -> int | None:
return self.max_num_tokens
def topk_indices_dtype(self) -> torch.dtype | None:
return None
def num_dispatchers(self) -> int:
return self.num_dispatchers_
def output_is_reduced(self) -> bool:
return False
def prepare(
self,
a1: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
num_experts: int,
expert_map: torch.Tensor | None,
apply_router_weight_on_input: bool,
quant_config: FusedMoEQuantConfig,
defer_input_quant: bool = False,
) -> mk.PrepareResultType:
if defer_input_quant:
raise NotImplementedError(
f"{self.__class__.__name__} does not support defer_input_quant=True. "
"Please select an MoE kernel that accepts quantized inputs."
)
assert a1.dim() == 2
assert topk_ids.dim() == 2
assert topk_ids.size(0) == a1.size(0)
if apply_router_weight_on_input:
topk = topk_ids.size(1)
# TODO: this only works for topK=1, will need to update for topK>1
assert topk == 1, (
"apply_router_weight_on_input is only implemented for topk=1"
)
a1.mul_(topk_weights.to(a1.dtype))
num_tokens, hidden_dim = a1.size()
topk = topk_ids.size(1)
tokens_per_expert = torch.zeros(num_experts, dtype=torch.int, device=a1.device)
num_local_experts = self.num_local_experts
if quant_config.quant_dtype is None:
b_type = a1.dtype
else:
b_type = quant_config.quant_dtype
b_a1 = torch.zeros(
(num_local_experts, self.max_num_tokens, hidden_dim),
dtype=b_type,
device=a1.device,
)
if quant_config.is_quantized:
scale_shape = quant_config.batched_scale_shape(
num_local_experts, self.max_num_tokens, hidden_dim
)
b_a1_scale = torch.empty(scale_shape, dtype=torch.float32, device=a1.device)
else:
assert quant_config.a1_scale is None
b_a1_scale = None
first_expert = num_local_experts * self.rank
last_expert = first_expert + num_local_experts
a1_scale = normalize_scales_shape(quant_config.a1_scale)
for expert_id in range(first_expert, last_expert):
topks = torch.any(topk_ids == expert_id, dim=1).flatten()
rows = torch.count_nonzero(topks.flatten())
if rows == 0:
continue
idx = expert_id - first_expert
tokens_per_expert[idx] = rows
rhs = a1[: topks.numel()][topks]
if quant_config.quant_dtype is not None:
if a1_scale is not None:
if quant_config.is_per_act_token:
rhs_a1_scale = a1_scale[: topks.numel()][topks]
else:
rhs_a1_scale = a1_scale
else:
rhs_a1_scale = None
b_a1[idx, :rows, :], b_s = moe_kernel_quantize_input(
rhs,
rhs_a1_scale,
quant_config.quant_dtype,
quant_config.per_act_token_quant,
quant_config.block_shape,
)
assert b_s is not None
if quant_config.is_per_act_token:
b_a1_scale[idx, :rows] = b_s[:rows]
else:
b_a1_scale[idx, : b_s.shape[0]] = b_s
else:
b_a1[idx, :rows, :] = rhs
assert b_a1_scale is None or b_a1_scale.ndim == 3
expert_tokens_meta = mk.ExpertTokensMetadata(
expert_num_tokens=tokens_per_expert, expert_num_tokens_cpu=None
)
return b_a1, b_a1_scale, expert_tokens_meta, None, None
def finalize(
self,
output: torch.Tensor,
fused_expert_output: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
apply_router_weight_on_input: bool,
weight_and_reduce_impl: mk.TopKWeightAndReduce,
) -> None:
if isinstance(weight_and_reduce_impl, TopKWeightAndReduceDelegate):
weight_and_reduce_impl = TopKWeightAndReduceNaiveBatched(self.rank)
weight_and_reduce_impl.apply(
output=output,
fused_expert_output=fused_expert_output,
topk_weights=topk_weights,
topk_ids=topk_ids,
apply_router_weight_on_input=apply_router_weight_on_input,
)
class NaiveBatchedExperts(mk.FusedMoEPermuteExpertsUnpermute):
"""
A reference MoE expert class that operates on expert batched format,
i.e. E x max_num_tokens x K. This is the format that the batched
dispatch/combine kernels use.
"""
def __init__(
self,
moe_config: FusedMoEConfig,
quant_config: FusedMoEQuantConfig,
max_num_tokens: int,
num_dispatchers: int,
):
super().__init__(
moe_config=moe_config,
quant_config=quant_config,
max_num_tokens=max_num_tokens,
num_dispatchers=num_dispatchers,
)
assert not self.quant_config.use_int8_w8a8, "NYI"
assert not self.quant_config.use_int8_w8a16, "NYI"
assert not self.quant_config.use_int4_w4a16, "NYI"
assert self.quant_config.ocp_mx_scheme is None, "NYI"
@staticmethod
def activation_format() -> mk.FusedMoEActivationFormat:
return mk.FusedMoEActivationFormat.BatchedExperts
@staticmethod
def _supports_current_device() -> bool:
raise NotImplementedError(
"NaiveBatchedExperts is not yet used by an Oracle. "
"This method should not be called."
)
@staticmethod
def _supports_no_act_and_mul() -> bool:
raise NotImplementedError(
"NaiveBatchedExperts is not yet used by an Oracle. "
"This method should not be called."
)
@staticmethod
def _supports_quant_scheme(
weight_key: QuantKey | None,
activation_key: QuantKey | None,
) -> bool:
raise NotImplementedError(
"NaiveBatchedExperts is not yet used by an Oracle. "
"This method should not be called."
)
@staticmethod
def _supports_activation(activation: MoEActivation) -> bool:
raise NotImplementedError(
"NaiveBatchedExperts is not yet used by an Oracle. "
"This method should not be called."
)
@staticmethod
def _supports_parallel_config(moe_parallel_config: FusedMoEParallelConfig) -> bool:
raise NotImplementedError(
"NaiveBatchedExperts is not yet used by an Oracle. "
"This method should not be called."
)
def supports_chunking(self) -> bool:
return False
def supports_expert_map(self) -> bool:
return False
def finalize_weight_and_reduce_impl(self) -> mk.TopKWeightAndReduce:
# Let PrepareAndFinalize::finalize() decide the impl.
return TopKWeightAndReduceDelegate()
def workspace_shapes(
self,
M: int,
N: int,
K: int,
topk: int,
global_num_experts: int,
local_num_experts: int,
expert_tokens_meta: mk.ExpertTokensMetadata | None,
activation: MoEActivation,
) -> tuple[tuple[int, ...], tuple[int, ...], tuple[int, ...]]:
assert self.num_dispatchers is not None
assert self.max_num_tokens is not None
num_dp = self.num_dispatchers
num_experts = local_num_experts
workspace13 = (num_experts, self.max_num_tokens * num_dp, K)
workspace2 = (self.max_num_tokens * num_dp, N)
output = workspace13
return (workspace13, workspace2, output)
def dequant(self, t: torch.Tensor, scale: torch.Tensor) -> torch.Tensor:
assert self.quant_config.is_quantized
f32 = torch.float32
if self.quant_config.is_per_act_token or self.quant_config.is_per_tensor:
return t.to(f32) * scale
else:
return t.to(f32) * group_broadcast(scale, t.shape)
def apply(
self,
output: torch.Tensor,
hidden_states: torch.Tensor,
w1: torch.Tensor,
w2: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
activation: MoEActivation,
global_num_experts: int,
expert_map: torch.Tensor | None,
a1q_scale: torch.Tensor | None,
a2_scale: torch.Tensor | None,
workspace13: torch.Tensor,
workspace2: torch.Tensor,
expert_tokens_meta: mk.ExpertTokensMetadata | None,
apply_router_weight_on_input: bool,
):
assert hidden_states.dim() == 3
assert expert_tokens_meta is not None
expert_num_tokens = expert_tokens_meta.expert_num_tokens
num_local_experts = w1.size(0)
assert num_local_experts == w1.size(0), f"{num_local_experts} == {w1.size(0)}"
N = w1.size(1) // 2
for expert in range(num_local_experts):
# Indexing expert_num_tokens doesn't work w/cudagraphs or inductor
if (
torch.compiler.is_compiling()
or torch.cuda.is_current_stream_capturing()
):
num = hidden_states.shape[1]
else:
num = int(expert_num_tokens[expert].item())
if num == 0:
continue
tmp = _resize_cache(workspace2, (num, N))
if self.quant_config.is_quantized:
assert a1q_scale is not None and self.w1_scale is not None
input = self.dequant(hidden_states[expert, :, :], a1q_scale[expert])
w1_dq = self.dequant(w1[expert], self.w1_scale[expert])
input = input[:num] @ w1_dq.transpose(0, 1)
else:
input = hidden_states[expert, :num, :] @ w1[expert].transpose(0, 1)
self.activation(activation, tmp, input.to(tmp.dtype))
if self.quant_config.is_quantized:
assert self.w2_scale is not None
w2_dq = self.dequant(w2[expert], self.w2_scale[expert])
else:
w2_dq = w2[expert]
output[expert, :num, :] = tmp @ w2_dq.transpose(0, 1).to(tmp.dtype)
def batched_moe_kernel_quantize_input(
A: torch.Tensor,
A_scale: torch.Tensor | None,
num_tokens: int,
E: int,
N: int,
expert_num_tokens: torch.Tensor,
qtype: torch.dtype | None,
per_act_token_quant: bool,
block_shape: list[int] | None = None,
) -> tuple[torch.Tensor, torch.Tensor | None]:
if torch.compiler.is_compiling() or torch.cuda.is_current_stream_capturing():
# Note: this does a bunch of extra work because expert_num_tokens is
# ignored but it does support torch.compile + cudagraphs.
hidden_dim = A.size(-1)
assert A_scale is None or A_scale.ndim <= 2, (
f"{A_scale.shape if A_scale is not None else None}"
)
A_q, A_q_scale = moe_kernel_quantize_input(
A.view(-1, hidden_dim), A_scale, qtype, per_act_token_quant, block_shape
)
A_q = A_q.view(E, -1, hidden_dim)
A_q_scale = normalize_batched_scales_shape(A_q_scale, E)
return A_q, A_q_scale
elif qtype is None:
return A, normalize_batched_scales_shape(A_scale, E)
else:
A_q = torch.empty_like(A, dtype=qtype)
if per_act_token_quant:
assert block_shape is None
scale_shape = (E, num_tokens, 1)
elif block_shape is not None:
_, block_k = block_shape
k_tiles = (A.shape[-1] + block_k - 1) // block_k
scale_shape = (E, num_tokens, k_tiles)
else:
scale_shape = (E, 1, 1)
A_q_scale = torch.zeros(scale_shape, dtype=torch.float32, device=A.device)
num_experts = expert_num_tokens.numel()
A_scale = normalize_batched_scales_shape(A_scale, num_experts)
for e in range(E):
num_tokens = int(expert_num_tokens[e].item())
if num_tokens > 0:
if A_scale is not None:
scales = A_scale[e, : min(num_tokens, A_scale.shape[1])]
else:
scales = None
A_q[e, :num_tokens], tmp_scale = moe_kernel_quantize_input(
A[e, :num_tokens],
scales,
qtype,
per_act_token_quant,
block_shape,
)
assert tmp_scale is not None
A_q_scale[e, : tmp_scale.shape[0]] = tmp_scale
return A_q, A_q_scale
class BatchedTritonExperts(mk.FusedMoEPermuteExpertsUnpermute):
"""
A Triton based MoE expert class that operates on expert batched format,
i.e. E x max_num_tokens x K. This is the format that the batched
dispatch/combine kernels use.
"""
def __init__(
self,
moe_config: FusedMoEConfig,
quant_config: FusedMoEQuantConfig,
max_num_tokens: int,
num_dispatchers: int,
):
super().__init__(
moe_config=moe_config,
quant_config=quant_config,
max_num_tokens=max_num_tokens,
num_dispatchers=num_dispatchers,
)
assert not self.quant_config.use_int8_w8a8, "NYI"
assert not self.quant_config.use_int8_w8a16, "NYI"
assert not self.quant_config.use_int4_w4a16, "NYI"
assert self.quant_config.ocp_mx_scheme is None, "NYI"
@staticmethod
def activation_format() -> mk.FusedMoEActivationFormat:
return mk.FusedMoEActivationFormat.BatchedExperts
@staticmethod
def _supports_current_device() -> bool:
return current_platform.is_cuda_alike()
@staticmethod
def _supports_no_act_and_mul() -> bool:
return False
@staticmethod
def _supports_quant_scheme(
weight_key: QuantKey | None,
activation_key: QuantKey | None,
) -> bool:
p = current_platform
if p.is_rocm():
from vllm.platforms.rocm import on_gfx9
is_rocm_on_gfx9 = on_gfx9()
else:
is_rocm_on_gfx9 = False
device_supports_fp8 = is_rocm_on_gfx9 or (
p.is_cuda() and p.has_device_capability((8, 9))
)
SUPPORTED_W_A_FP8 = [
(kFp8Static128BlockSym, kFp8Dynamic128Sym),
(kFp8StaticChannelSym, kFp8DynamicTokenSym),
(kFp8StaticTensorSym, kFp8DynamicTokenSym),
(kFp8StaticTensorSym, kFp8StaticTensorSym),
(kFp8StaticTensorSym, kFp8DynamicTensorSym),
]
return (weight_key, activation_key) == (None, None) or (
device_supports_fp8 and (weight_key, activation_key) in SUPPORTED_W_A_FP8
)
@staticmethod
def _supports_activation(activation: MoEActivation) -> bool:
return activation in [
MoEActivation.SILU,
MoEActivation.GELU,
MoEActivation.SWIGLUOAI,
MoEActivation.SILU_NO_MUL,
MoEActivation.GELU_NO_MUL,
MoEActivation.RELU2_NO_MUL,
]
@staticmethod
def _supports_parallel_config(moe_parallel_config: FusedMoEParallelConfig) -> bool:
return True
def supports_chunking(self) -> bool:
return False
def supports_expert_map(self) -> bool:
return False
def finalize_weight_and_reduce_impl(self) -> mk.TopKWeightAndReduce:
# Let PrepareAndFinalize::finalize() decide the impl.
return TopKWeightAndReduceDelegate()
def workspace_shapes(
self,
M: int,
N: int,
K: int,
topk: int,
global_num_experts: int,
local_num_experts: int,
expert_tokens_meta: mk.ExpertTokensMetadata | None,
activation: MoEActivation,
) -> tuple[tuple[int, ...], tuple[int, ...], tuple[int, ...]]:
assert self.num_dispatchers is not None
assert self.max_num_tokens is not None
num_dp = self.num_dispatchers
num_experts = local_num_experts
max_num_tokens = self.max_num_tokens
activation_out_dim = self.adjust_N_for_activation(N, activation)
workspace13 = (num_experts, max_num_tokens * num_dp, max(K, N))
workspace2 = (num_experts, max_num_tokens * num_dp, activation_out_dim)
output = (num_experts, max_num_tokens * num_dp, K)
return (workspace13, workspace2, output)
def apply(
self,
output: torch.Tensor,
hidden_states: torch.Tensor,
w1: torch.Tensor,
w2: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
activation: MoEActivation,
global_num_experts: int,
expert_map: torch.Tensor | None,
a1q_scale: torch.Tensor | None,
a2_scale: torch.Tensor | None,
workspace13: torch.Tensor,
workspace2: torch.Tensor,
expert_tokens_meta: mk.ExpertTokensMetadata | None,
apply_router_weight_on_input: bool,
):
# Check constraints.
if self.quant_config.use_int4_w4a16:
assert hidden_states.size(-1) // 2 == w1.size(2), "Hidden size mismatch"
else:
assert hidden_states.size(-1) == w1.size(2), (
f"Hidden size mismatch {hidden_states.size(-1)} != {w1.size(2)}"
)
assert hidden_states.is_contiguous(), "Hidden_states must be contiguous"
assert w1.stride(-1) == 1, "Stride of last dimension must be 1"
assert w2.stride(-1) == 1, "Stride of last dimension must be 1"
assert hidden_states.dtype in [
torch.float32,
torch.float16,
torch.bfloat16,
torch.float8_e4m3fn,
]
assert expert_tokens_meta is not None
expert_num_tokens = expert_tokens_meta.expert_num_tokens
E, max_num_tokens, N, K, top_k_num = self.moe_problem_size(
hidden_states, w1, w2, topk_ids
)
assert w1.size(0) == E
assert w2.size(0) == E
config_dtype = self.quant_config.config_name(hidden_states.dtype)
config = try_get_optimal_moe_config(
w1.size(),
w2.size(),
top_k_num,
config_dtype,
max_num_tokens,
block_shape=self.block_shape,
)
if hidden_states.dtype == torch.bfloat16:
compute_type = tl.bfloat16
elif hidden_states.dtype == torch.float16:
compute_type = tl.float16
elif hidden_states.dtype == torch.float32:
compute_type = tl.float32
elif hidden_states.dtype == torch.float8_e4m3fn:
compute_type = tl.bfloat16
else:
raise ValueError(f"Unsupported compute_type: {hidden_states.dtype}")
# We can reuse the memory between these because by the time we need
# cache3, we're done with cache1
intermediate_cache1 = _resize_cache(workspace13, (E, max_num_tokens, N))
activation_out_dim = self.adjust_N_for_activation(N, activation)
intermediate_cache2 = _resize_cache(
workspace2, (E, max_num_tokens, activation_out_dim)
)
# TODO(bnell): should this be done for any quantized type?
if self.quant_config.use_fp8_w8a8:
intermediate_cache1.fill_(0)
a1q_scale = normalize_batched_scales_shape(a1q_scale, E)
# MM1
invoke_moe_batched_triton_kernel(
A=hidden_states,
B=w1,
C=intermediate_cache1,
expert_num_tokens=expert_num_tokens,
compute_type=compute_type,
A_scale=a1q_scale,
B_scale=self.w1_scale,
B_zp=self.w1_zp,
use_fp8_w8a8=self.quant_config.use_fp8_w8a8,
use_int8_w8a16=self.quant_config.use_int8_w8a16,
use_int4_w4a16=self.quant_config.use_int4_w4a16,
config=config,
per_act_token_quant=self.per_act_token_quant,
block_shape=self.block_shape,
)
intermediate_cache2.fill_(0)
# TODO (bnell): use triton utility from batched deep gemm.
self.activation(
activation,
intermediate_cache2.view(-1, activation_out_dim),
intermediate_cache1.view(-1, N),
)
qintermediate_cache2, a2q_scale = batched_moe_kernel_quantize_input(
intermediate_cache2,
a2_scale,
max_num_tokens,
E,
N,
expert_num_tokens,
self.quant_dtype,
self.per_act_token_quant,
self.block_shape,
)
invoke_moe_batched_triton_kernel(
A=qintermediate_cache2,
B=w2,
C=output,
expert_num_tokens=expert_num_tokens,
compute_type=compute_type,
A_scale=a2q_scale,
B_scale=self.w2_scale,
B_zp=self.w2_zp,
use_fp8_w8a8=self.quant_config.use_fp8_w8a8,
use_int8_w8a16=self.quant_config.use_int8_w8a16,
use_int4_w4a16=self.quant_config.use_int4_w4a16,
config=config,
per_act_token_quant=self.per_act_token_quant,
block_shape=self.block_shape,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/fused_moe/fused_batched_moe.py",
"license": "Apache License 2.0",
"lines": 999,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/fused_moe/modular_kernel.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from abc import ABC, abstractmethod
from collections.abc import Callable
from dataclasses import dataclass
from enum import Enum
from math import prod
from typing import final
import torch
import vllm.envs as envs
from vllm.forward_context import get_forward_context, is_forward_context_available
from vllm.logger import init_logger
from vllm.model_executor.layers.fused_moe.activation import (
MoEActivation,
apply_moe_activation,
)
from vllm.model_executor.layers.fused_moe.config import (
FusedMoEConfig,
FusedMoEParallelConfig,
FusedMoEQuantConfig,
)
from vllm.model_executor.layers.fused_moe.utils import (
_resize_cache,
count_expert_num_tokens,
disable_inplace,
)
from vllm.model_executor.layers.quantization.utils.quant_utils import (
QuantKey,
)
from vllm.platforms import current_platform
from vllm.utils.math_utils import cdiv
from vllm.v1.worker.ubatching import (
dbo_enabled,
dbo_maybe_run_recv_hook,
dbo_register_recv_hook,
dbo_yield,
)
from vllm.v1.worker.workspace import current_workspace_manager
logger = init_logger(__name__)
#
# This file defines a set of base classes used to make MoE kernels more modular.
# The goal is to be able to utilize different communication mechanisms with
# any fused MoE kernel without needing to have combinatoric implementations.
#
# The fused moe kernels are broken down into the following components:
#
# [Router] → [Quantize-Dispatch] → [Permute-Experts-Unpermute] → [Combine]
#
# Each component will be independent of (but may inform) the others except for
# [Quantize-Dispatch] and `[Combine] (see below). The components can then be
# mixed and matched with so that DP+EP can be supported easily for multiple
# MoE kernel implementations.
#
# The following main classes are defined:
# * FusedMoEPrepareAndFinalize - an abstract base class for preparation of MoE
# inputs (e.g. quantization, distribution) and finalization of Moe outputs.
# The prepare method must take care of any needed quantization and the
# finalize method, informed by the FusedMoEPermuteExpertsUnpermute method,
# may apply weights and/or do the final reduction of the output.
# * FusedMoEPermuteExpertsUnpermute - an abstract base class for the main fused
# MoE operation, i.e matmul + act_mul + optionally quant + matmul.
# Some FusedMoEPermuteExpertsUnpermute implementations may choose to do
# the weight application and/or reduction. The class communicates this
# to [Finalize] via a TopKWeightAndReduce object.
# * FusedMoEModularKernel - an interface class that combines a
# FusedMoEPrepareAndFinalize and a FusedMoEPermuteExpertsUnpermute to
# provide the standard fused MoE kernel interface.
# * TopKWeightAndReduce - A TopKWeightAndReduce implementation chosen
# by the FusedMoEPermuteExpertsUnpermute implementation that is passed
# on to [Finalize].
#
# [Quantize-Prepare] and [Finalize] functionality are bundled into a single
# class `FusedMoEPrepareAndFinalize` since they could use collective
# communication mechanisms that need to be consistent.
#
class FusedMoEActivationFormat(Enum):
"""
The standard activation format (num_tokens, hidden dim).
"""
Standard = ("standard",)
"""
The batched experts format (num experts, max tokens per expert, hidden dim)
"""
BatchedExperts = ("batched_experts",)
@dataclass
class ExpertTokensMetadata:
"""
Metadata regarding expert-token routing.
"""
expert_num_tokens: torch.Tensor
expert_num_tokens_cpu: torch.Tensor | None
@staticmethod
def make_from_list(
expert_num_tokens_list: list[int], device: str
) -> "ExpertTokensMetadata":
expert_num_tokens_cpu = torch.tensor(
expert_num_tokens_list, device="cpu", dtype=torch.int32
)
return ExpertTokensMetadata(
expert_num_tokens=expert_num_tokens_cpu.to(device, non_blocking=True),
expert_num_tokens_cpu=expert_num_tokens_cpu,
)
class TopKWeightAndReduce(ABC):
"""
An abstract base class for weight application and reduction implementations.
"""
@abstractmethod
def apply(
self,
output: torch.Tensor | None,
fused_expert_output: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
apply_router_weight_on_input: bool,
) -> torch.Tensor:
"""
Apply topk_weights to the fused_experts_outputs and/or reduce.
If an output tensor is not passed, it will be created in the
function.
"""
raise NotImplementedError
#
# PrepareResultType is a tuple of:
# - quantized + dispatched a.
# - quantized + dispatched a1_scales.
# - Optional ExpertTokensMetadata containing gpu/cpu tensors
# as big as the number of local experts with the information about the
# number of tokens assigned to each local expert.
# - Optional dispatched expert topk IDs
# - Optional dispatched expert topk weight
#
# See `prepare` method below.
#
PrepareResultType = tuple[
torch.Tensor,
torch.Tensor | None,
ExpertTokensMetadata | None,
torch.Tensor | None,
torch.Tensor | None,
]
ReceiverType = Callable[[], PrepareResultType]
# TODO: pass FusedMoEParallelConfig in as ctor parameter?
class FusedMoEPrepareAndFinalize(ABC):
"""
An abstract base class for the [Quantize-Prepare] and [Finalize] steps
described above.
"""
def post_init_setup(self, fused_experts: "FusedMoEPermuteExpertsUnpermute"):
"""
Initialize FusedMoEPrepareAndFinalize settings that depend on
FusedMoEPermuteExpertsUnpermute experts object.
The FusedMoEPrepareAndFinalize implementations that have such
dependencies may choose to override this function.
"""
return
@abstractmethod
def prepare(
self,
a1: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
num_experts: int,
expert_map: torch.Tensor | None,
apply_router_weight_on_input: bool,
quant_config: FusedMoEQuantConfig,
defer_input_quant: bool,
) -> PrepareResultType:
"""
Perform any quantization (and/or) dispatching needed for this kernel.
- a1: The (unquantized) input to the MoE layer.
- topk_ids: The topk ids.
- topk_weights: The topk weights.
- num_experts: The total number of experts in the global expert space.
- expert_map: A tensor mapping expert indices from the global expert
space to the local expert space of the expert parallel shard.
- apply_router_weight_on_input: When True, apply the weights to the
activations, before quantization + dispatching.
- quant_config: Quantization info provided by the fused experts.
- defer_input_quant: Runtime parameter indicating whether or not to
defer input quantization to the FusedMoEPermuteExpertsUnpermute
in cases where the compute kernel expects unquantized inputs
Returns a tuple of:
- quantized + dispatched a.
- Optional quantized + dispatched a1_scales.
- Optional ExpertTokensMetadata containing gpu/cpu tensors
as big as the number of local experts with the information about the
number of tokens assigned to each local expert.
- Optional dispatched expert topk IDs
- Optional dispatched expert topk weight
"""
raise NotImplementedError
def supports_async(self) -> bool:
"""
Indicates whether or not this class implements prepare_async and
finalize_async.
"""
return False
def prepare_async(
self,
a1: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
num_experts: int,
expert_map: torch.Tensor | None,
apply_router_weight_on_input: bool,
quant_config: FusedMoEQuantConfig,
defer_input_quant: bool,
) -> tuple[Callable, ReceiverType] | ReceiverType:
"""
Perform any quantization (and/or) dispatching needed for this kernel
but do not wait for results from other workers.
- a1: The (unquantized) input to the MoE layer.
- a1_scale: Optional scales for a1
- a2_scale: Optional scales for the second MoE gemm. Required to make
sure the quantization is consistent for both gemms.
- topk_ids: The topk ids.
- topk_weights: The topk weights.
- num_experts: The total number of experts in the global expert space.
- expert_map: A tensor mapping expert indices from the global expert
space to the local expert space of the expert parallel shard.
- apply_router_weight_on_input: When True, apply the weights to the
activations, before quantization + dispatching.
- defer_input_quant: Runtime parameter indicating whether or not to
defer input quantization to the FusedMoEPermuteExpertsUnpermute
in cases where the compute kernel expects unquantized inputs
Returns a callback or a hook callback pair that when invoked waits for
results from other workers and has the same return signature as
`prepare`, if a hook is returned this is more lightweight check that
the recv is complete without doing extra work (used by DBO, will be
refactored in the very near future)
e.g.
ret = obj.prepare_async(...)
if isinstance(ret, tuple):
hook, receiver = ret
hook()
if hook is not None:
a, a_scales, expert_meta, topk_ids, topk_weights = receiver()
is equivalent to:
a, a_scales, expert_meta, topk_ids, topk_weights = obj.prepare(...)
"""
raise NotImplementedError
@abstractmethod
def finalize(
self,
output: torch.Tensor,
fused_expert_output: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
apply_router_weight_on_input: bool,
weight_and_reduce_impl: TopKWeightAndReduce,
) -> None:
"""
Perform any combine plus apply weights and perform a reduction on the
fused experts output.
- output: The output tensor, written in place. Must be (M, K) shape.
- fused_expert_output: The unweighted, unreduced output of the fused
experts, it will have (M, topk, K) shape.
- topk_weights: The weights to be applied to the fused_experts_output.
- topk_ids: The topk_ids.
- apply_router_weight_on_input: When False, apply the weights to
fused_expert_output.
- weight_and_reduce_impl: An optional TopKWeightAndReduce
implementation.
"""
raise NotImplementedError
def finalize_async(
self,
output: torch.Tensor,
fused_expert_output: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
apply_router_weight_on_input: bool,
weight_and_reduce_impl: TopKWeightAndReduce,
) -> tuple[Callable, Callable] | Callable:
"""
Perform any combine plus apply weights and perform a reduction on the
fused experts output but do not wait for results from other workers.
- output: The output tensor, written in place. Must be (M, K) shape.
- fused_expert_output: The unweighted, unreduced output of the fused
experts, it will have (M, topk, K) shape.
- topk_weights: The weights to be applied to the fused_experts_output.
- topk_ids: The topk_ids.
- apply_router_weight_on_input: When False, apply the weights to
fused_expert_output.
- weight_and_reduce_impl: An optional TopKWeightAndReduce
implementation.
Returns a callback or a hook callback pair that when invoked waits for
results from other workers and has the same return signature as
`finalize`, if a hook is returned this is more lightweight check that
the recv is complete without doing extra work (used by DBO, will be
refactored in the very near future)
ret = obj.finalize_async(output, ...)
... output not valid yet ...
if isinstance(ret, tuple):
hook, receiver = ret
hook()
receiver()
... output valid here ...
is equivalent to:
obj.finalize(output, ...)
"""
raise NotImplementedError
@property
@abstractmethod
def activation_format(self) -> FusedMoEActivationFormat:
"""
A property indicating the output format of the activations for the
'prepare' method.
"""
raise NotImplementedError
@abstractmethod
def topk_indices_dtype(self) -> torch.dtype | None:
"""
The PrepareFinalize All2All implementations generally constrain the
dtype of the topk_ids they support. This function returns the
required topk indices dtype so it can be respected.
Return None if there are no such restrictions.
"""
raise NotImplementedError
@abstractmethod
def max_num_tokens_per_rank(self) -> int | None:
"""
Some PrepareFinalize All2All implementations are batched. Meaning,
they can process only as set of tokens at a time. This
function returns the batch size i.e the maximum number of tokens
the implementation can process at a time.
Return None if there are no such restrictions.
"""
raise NotImplementedError
@abstractmethod
def num_dispatchers(self) -> int:
raise NotImplementedError
@abstractmethod
def output_is_reduced(self) -> bool:
"""
Indicates whether or not the output of finalize is reduced across all
ranks.
"""
raise NotImplementedError
# TODO: add supported activations method (return string)
class FusedMoEPermuteExpertsUnpermute(ABC):
"""
An abstract base class for the [Permute-Experts-Unpermute] step described
above.
"""
def __init__(
self,
moe_config: FusedMoEConfig,
quant_config: FusedMoEQuantConfig,
max_num_tokens: int | None = None,
num_dispatchers: int | None = None,
):
"""
moe_config: MoE layer configuration.
quant_config: Quantization parameters for this experts instance.
"""
if self.activation_format() == FusedMoEActivationFormat.Standard and (
max_num_tokens is not None or num_dispatchers is not None
):
raise ValueError(
"max_num_tokens and num_dispatchers should only be set for "
"BatchedExperts activation format."
)
elif self.activation_format() == FusedMoEActivationFormat.BatchedExperts and (
max_num_tokens is None or num_dispatchers is None
):
raise ValueError(
"max_num_tokens and num_dispatchers must be set for "
"BatchedExperts activation format."
)
self.moe_config = moe_config
self.quant_config = quant_config
self.max_num_tokens = max_num_tokens
self.num_dispatchers = num_dispatchers
@property
def expects_unquantized_inputs(self) -> bool:
"""
Whether or not the PrepareFinalize should defer input quantization
in the prepare step. If True, then the Experts kernel will
execute the input quantization itself.
Sample subclasses that override are AITER and FlashInfer CUTLASS.
"""
return False
@staticmethod
@abstractmethod
def activation_format() -> FusedMoEActivationFormat:
"""
A property which is a tuple of the input and output activation formats
for the 'apply' method.
"""
raise NotImplementedError
def moe_problem_size(
self,
a1: torch.Tensor,
w1: torch.Tensor,
w2: torch.Tensor,
topk_ids: torch.Tensor,
) -> tuple[int, int, int, int, int]:
"""
Extract the MoE problem size from the given tensor arguments:
- a: The hidden states, input to the MoE layer.
- w1: The first set of expert weights.
- w2: The second set of expert weights.
- topk_ids: The topk ids.
Note: extracting the problem shape from the weight and activation
tensors is not obvious. It needs to be done this way specifically
due to subtle issues with particular kernels, e.g. the int4 kernels
divide the trailing dimension by two, so it's not "correct" to
extract N or K from the trailing dimension of w1 or w2. Similarly,
some kernels transpose the weights, so this needs to be kept in mind.
Note: This implementation covers most cases. However, if experts
require a specialized implementation, like MarlinExperts, they are free
to override this function.
"""
assert w1.dim() == 3 and w2.dim() == 3
E, N, _ = w1.size()
K = a1.size(-1)
if a1.dim() == 2:
# Make sure we are using the correct a1 (pre-permute).
assert topk_ids.size(0) == a1.size(0), f"{topk_ids.size(0)} != {a1.size(0)}"
M = a1.size(0)
else:
assert a1.dim() == 3
assert a1.size(0) == E, f"{a1.size(0)} == {E}"
M = a1.size(1) # This is max_num_tokens
assert topk_ids.dim() == 2
topk = topk_ids.size(1)
return E, M, N, K, topk
#
# Various helpers for registering support for various features.
# Used by the oracle to select a particular kernel for a deployment.
#
@staticmethod
def is_supported_config(
cls: type["FusedMoEPermuteExpertsUnpermute"],
moe_config: FusedMoEConfig,
weight_key: QuantKey | None,
activation_key: QuantKey | None,
activation_format: FusedMoEActivationFormat,
) -> tuple[bool, str | None]:
def _make_reason(reason: str) -> str:
return f"kernel does not support {reason}"
if not cls._supports_current_device():
return False, _make_reason(f"current device {current_platform.device_name}")
elif not (moe_config.is_act_and_mul or cls._supports_no_act_and_mul()):
return False, _make_reason("no act_and_mul MLP layer")
elif not cls._supports_activation(moe_config.activation):
return False, _make_reason(f"{moe_config.activation} activation")
elif not cls._supports_quant_scheme(weight_key, activation_key):
return False, _make_reason(
f"quantization scheme {weight_key}x{activation_key}"
)
elif not cls._supports_parallel_config(moe_config.moe_parallel_config):
return False, _make_reason(
f"parallel config {moe_config.moe_parallel_config}"
)
elif activation_format != cls.activation_format():
return False, _make_reason(f"{activation_format.value} activation format")
return True, None
@staticmethod
@abstractmethod
def _supports_current_device() -> bool:
"""
Whether the kernel supports the current device type
(compute cability and current platform).
"""
raise NotImplementedError
@staticmethod
@abstractmethod
def _supports_no_act_and_mul() -> bool:
"""
Whether the kernel supports act_and_mul=False, i.e.
non-gated MoE models like Nemotron-Nano.
"""
raise NotImplementedError
@staticmethod
@abstractmethod
def _supports_quant_scheme(
weight_key: QuantKey | None,
activation_key: QuantKey | None,
) -> bool:
raise NotImplementedError
@staticmethod
@abstractmethod
def _supports_activation(activation: MoEActivation) -> bool:
"""
Whether the kernel supports a particular act function.
"""
raise NotImplementedError
@staticmethod
@abstractmethod
def _supports_parallel_config(moe_parallel_config: FusedMoEParallelConfig) -> bool:
"""
Whether the kernel supports deployment in expert parallel.
"""
raise NotImplementedError
#
# Various helpers for accessing quantization parameters from the
# quant_config.
#
@property
def quant_dtype(self) -> torch.dtype | str | None:
return self.quant_config.quant_dtype
@property
def weight_quant_dtype(self) -> torch.dtype | str | None:
return self.quant_config.weight_quant_dtype
@property
def block_shape(self) -> list[int] | None:
return self.quant_config.block_shape
@property
def per_act_token_quant(self) -> bool:
return self.quant_config.per_act_token_quant
@property
def per_out_ch_quant(self) -> bool:
return self.quant_config.per_out_ch_quant
@property
def a1_scale(self) -> torch.Tensor | None:
return self.quant_config.a1_scale
@property
def a2_scale(self) -> torch.Tensor | None:
return self.quant_config.a2_scale
@property
def a1_gscale(self) -> torch.Tensor | None:
return self.quant_config.a1_gscale
@property
def a2_gscale(self) -> torch.Tensor | None:
return self.quant_config.a2_gscale
@property
def w1_scale(self) -> torch.Tensor | None:
return self.quant_config.w1_scale
@property
def w2_scale(self) -> torch.Tensor | None:
return self.quant_config.w2_scale
@property
def w1_zp(self) -> torch.Tensor | None:
return self.quant_config.w1_zp
@property
def w2_zp(self) -> torch.Tensor | None:
return self.quant_config.w2_zp
@property
def w1_bias(self) -> torch.Tensor | None:
return self.quant_config.w1_bias
@property
def w2_bias(self) -> torch.Tensor | None:
return self.quant_config.w2_bias
@property
def g1_alphas(self) -> torch.Tensor | None:
return self.quant_config.g1_alphas
@property
def g2_alphas(self) -> torch.Tensor | None:
return self.quant_config.g2_alphas
# TODO (bnell): make this return a CHUNK_SIZE or None instead?
@abstractmethod
def supports_chunking(self) -> bool:
"""
A flag indicating whether or not this class supports activation
chunking.
"""
raise NotImplementedError
@abstractmethod
def supports_expert_map(self) -> bool:
"""
A flag indicating whether or not this class supports expert maps
"""
raise NotImplementedError
def supports_packed_ue8m0_act_scales(self) -> bool:
"""
A flag indicating whether or not this class can process packed ue8m0
activation scales.
"""
return False
def workspace_dtype(self, act_dtype: torch.dtype) -> torch.dtype:
"""
Workspace type: The dtype to use for the workspace tensors.
"""
return act_dtype
@abstractmethod
def workspace_shapes(
self,
M: int,
N: int,
K: int,
topk: int,
global_num_experts: int,
local_num_experts: int,
expert_tokens_meta: ExpertTokensMetadata | None,
activation: MoEActivation,
) -> tuple[tuple[int, ...], tuple[int, ...], tuple[int, ...]]:
"""
Compute the shapes for the temporary and final outputs of the two gemms
and activation in the fused expert function. Since the gemms are
independent, the workspace for the first gemm can be shared with the
workspace for the last gemm.
Inputs:
- M: number of tokens.
- N: Row (or column) dimension of expert weights.
- K: hidden dimension
- topk: The number of top-k experts to select.
- global_num_experts: global number of experts.
- local_num_experts: local number of experts due to DP/EP.
- expert_tokens_meta: number of tokens per expert metadata for batched
format.
Returns a tuple of:
- workspace13 shape tuple: must be large enough to hold the
result of either expert gemm.
- workspace2 shape tuple: must be large enough to hold the
result of the activation function.
- output shape tuple: must be exact size of the final gemm output.
- Note: workspace shapes can be 0 if the workspace is not needed.
But in order for activation chunking to work, the first dimension
of each tuple must be the number of tokens when the shape is
not 0.
"""
raise NotImplementedError
@staticmethod
def adjust_N_for_activation(N: int, activation: MoEActivation) -> int:
"""
Calculate the output dimension for the activation function.
For *_no_mul activations (e.g. relu2_no_mul),
there's no gate/up split, so output size equals input size (N).
For regular gated activations (e.g., silu, gelu, swigluoai),
output size is N // 2 due to gate × activation(up) multiplication.
Args:
N: The intermediate size (width of w1/w3 weights).
activation: The activation function enum.
Returns:
The output dimension after activation.
"""
return N if not activation.is_gated else N // 2
def activation(
self, activation: MoEActivation, output: torch.Tensor, input: torch.Tensor
) -> None:
apply_moe_activation(activation, output, input)
def enable_chunking(self):
return (
envs.VLLM_ENABLE_FUSED_MOE_ACTIVATION_CHUNKING and self.supports_chunking()
)
def finalize_weight_and_reduce_impl(self) -> TopKWeightAndReduce:
raise NotImplementedError
@abstractmethod
def apply(
self,
output: torch.Tensor,
hidden_states: torch.Tensor,
w1: torch.Tensor,
w2: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
activation: MoEActivation,
global_num_experts: int,
expert_map: torch.Tensor | None,
a1q_scale: torch.Tensor | None,
a2_scale: torch.Tensor | None,
workspace13: torch.Tensor,
workspace2: torch.Tensor,
expert_tokens_meta: ExpertTokensMetadata | None,
apply_router_weight_on_input: bool,
) -> None:
"""
This function computes the intermediate result of a Mixture of Experts
(MoE) layer using two sets of weights, w1 and w2.
Parameters:
- output: (torch.Tensor): The unweighted, unreduced output tensor.
- hidden_states: (torch.Tensor): The (quantized) input tensor to the MoE
layer.
- w1 (torch.Tensor): The first set of expert weights.
- w2 (torch.Tensor): The second set of expert weights.
- topk_weights: A map of row to expert weights. Some implementations
choose to do weight application.
- topk_ids (torch.Tensor): A map of row to expert id.
- activation (str): The activation function to apply after the first
MoE layer.
- global_num_experts (int): The total number of experts in the global
expert space.
- expert_map (Optional[torch.Tensor]): A tensor mapping expert indices
from the global expert space to the local expert space of the expert
parallel shard.
- a1q_scale (Optional[torch.Tensor]): Optional quantized scale to be
used for a1. Result of quantization from prepare/finalize and not
from the FusedMoEQuantConfig.
- workspace13 (torch.Tensor): A scratch tensor used for gemm outputs
must be large enough to hold output of either MoE gemm.
- workspace2 (torch.Tensor): A scratch tensor used for the activation
function.
- expert_tokens_meta (Optional[ExpertTokensMetadata]) - An optional
ExpertTokensMetadata object containing gpu/cpu tensors
as big as the number of local experts with the information about the
number of tokens assigned to each local expert.
- apply_router_weight_on_input: True if router weights are already
applied on the input. This is relevant if the implementation
chooses to do weight application.
"""
raise NotImplementedError
def _slice_scales(
scales: torch.Tensor | None, start: int, end: int
) -> torch.Tensor | None:
if scales is not None:
if scales.numel() == 1:
return scales
else:
return scales[start:end]
return None
@final
class FusedMoEModularKernel(torch.nn.Module):
"""
This class combines a FusedMoEPrepareAndFinalize instance and
a FusedMoEPermuteExpertsUnpermute to provide an interface that
is compatible with the `fused_experts` function in fused_moe.py.
It takes care of managing any required scratch space.
Note: Instances of this class should only be used for a single model
layer due to any layer specific state that may be used by the component
objects.
"""
def __init__(
self,
prepare_finalize: FusedMoEPrepareAndFinalize,
fused_experts: FusedMoEPermuteExpertsUnpermute,
shared_experts: torch.nn.Module | None = None,
moe_parallel_config: FusedMoEParallelConfig | None = None,
inplace: bool = False,
):
super().__init__()
self.prepare_finalize = prepare_finalize
self.fused_experts = fused_experts
self.shared_experts = shared_experts
self.inplace = inplace
# prefer an explicit FusedMoEParallelConfig when available (from
# FusedMoE layers / tests).
# if not provided, assume this kernel is
# running in a non-DP+EP context
self.moe_parallel_config: FusedMoEParallelConfig | None = moe_parallel_config
self.is_dp_ep = (
moe_parallel_config is not None
and moe_parallel_config.dp_size > 1
and moe_parallel_config.use_ep
)
self._post_init_setup()
assert (
prepare_finalize.activation_format == fused_experts.activation_format()
), (
f"{prepare_finalize.__class__.__name__}."
f"{prepare_finalize.activation_format} == "
f"{fused_experts.__class__.__name__}."
f"{fused_experts.activation_format()}"
)
def _post_init_setup(self):
"""
Resolve any leftover setup dependencies between self.prepare_finalize
and self.fused_experts here.
"""
self.prepare_finalize.post_init_setup(self.fused_experts)
def supports_expert_map(self) -> bool:
"""
A flag indicating whether or not this class supports expert maps.
"""
return self.fused_experts.supports_expert_map()
def output_is_reduced(self) -> bool:
"""
Indicates whether or not the output of fused MoE kernel
is reduced across all ranks.
"""
return self.prepare_finalize.output_is_reduced()
def _chunk_info(self, M: int) -> tuple[int, int]:
"""
Compute number of chunks and chunk size for given M.
If chunking is not supported, set the CHUNK_SIZE to M so we
get num_chunks == 1. Take max(M, 1) to avoid divide by zero.
If there are no tokens to process, the number of chunks will be zero.
"""
CHUNK_SIZE = max(
1,
(
M
if not self.fused_experts.enable_chunking()
else min(M, envs.VLLM_FUSED_MOE_CHUNK_SIZE)
),
)
num_chunks = cdiv(M, CHUNK_SIZE)
# If there are no tokens, then there should be no loop iterations.
assert M > 0 or num_chunks == 0
return num_chunks, CHUNK_SIZE
def _allocate_buffers(
self,
out_dtype: torch.dtype,
device: torch.device,
M_chunk: int,
M_full: int,
N: int,
K: int,
top_k: int,
global_num_experts: int,
local_num_experts: int,
expert_tokens_meta: ExpertTokensMetadata | None,
activation: MoEActivation,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Allocate temporary and output buffers for the fused experts op.
Inputs:
- out_dtype: output type of workspace and output tensors.
- device: the device of the workspace and output tensors.
See `workspace_shapes` for a description of the remainder of arguments.
Returns a tuple of (workspace13, workspace2, output) tensors.
"""
assert M_full > 0 and M_chunk > 0
num_chunks, _ = self._chunk_info(M_full)
workspace_dtype = self.fused_experts.workspace_dtype(out_dtype)
# Force worst-case allocation in profiling run for
# "mk.FusedMoEModularKernel.Standard" formats where this is only bounded
# by `VLLM_FUSED_MOE_CHUNK_SIZE` and may not be seen during profiling with
# DP+EP due to the random token routing.
is_profile_run = (
is_forward_context_available()
and get_forward_context().attn_metadata is None
)
if is_profile_run and self.fused_experts.enable_chunking() and self.is_dp_ep:
max_workspace_13, max_workspace_2, max_fused_out_shape = (
self.fused_experts.workspace_shapes(
envs.VLLM_FUSED_MOE_CHUNK_SIZE,
N,
K,
top_k,
global_num_experts,
local_num_experts,
# expert_tokens_meta help in allocating optimal/minimal
# amount of workspace. Mark it None, so we allocate for
# the worst-case scenario.
expert_tokens_meta=None,
activation=activation,
)
)
current_workspace_manager().get_simultaneous(
(max_workspace_13, workspace_dtype),
(max_workspace_2, workspace_dtype),
(max_fused_out_shape, out_dtype),
)
# Get intermediate workspace shapes based off the chunked M size.
workspace13_shape, workspace2_shape, _ = self.fused_experts.workspace_shapes(
M_chunk,
N,
K,
top_k,
global_num_experts,
local_num_experts,
expert_tokens_meta,
activation,
)
# Get final output shape based on the full M size.
_, _, fused_out_shape = self.fused_experts.workspace_shapes(
M_full,
N,
K,
top_k,
global_num_experts,
local_num_experts,
expert_tokens_meta,
activation,
)
# We can reuse the memory between cache1 and cache3 because by the
# time we need cache3, we're done with cache1.
# Construct the entire output that can then be processed in chunks.
# Reuse workspace13 for the output in the non-chunked case.
# This will not always be the case for standard
# format experts and with experts that have empty workspaces.
if num_chunks == 1:
max_shape_size = max(prod(workspace13_shape), prod(fused_out_shape))
common_workspace, workspace2 = current_workspace_manager().get_simultaneous(
((max_shape_size,), workspace_dtype),
(workspace2_shape, workspace_dtype),
)
workspace13 = _resize_cache(common_workspace, workspace13_shape)
fused_out = _resize_cache(common_workspace, fused_out_shape)
else:
workspace13, workspace2, fused_out = (
current_workspace_manager().get_simultaneous(
(workspace13_shape, workspace_dtype),
(workspace2_shape, workspace_dtype),
(fused_out_shape, out_dtype),
)
)
return workspace13, workspace2, fused_out
@staticmethod
def _slice_output_tensor(
fused_out: torch.Tensor,
chunk_idx: int,
num_chunks: int,
CHUNK_SIZE: int,
M: int,
) -> torch.Tensor:
if num_chunks == 1:
return fused_out
assert fused_out.size(0) % M == 0, f"fused_out shape {fused_out.shape} vs M {M}"
factor = fused_out.size(0) // M
out_chunk_size = CHUNK_SIZE * factor
s = chunk_idx * out_chunk_size
e = min(s + out_chunk_size, fused_out.size(0))
return fused_out[s:e]
@staticmethod
def _slice_expert_tokens_metadata(
num_chunks: int,
full_expert_tokens_meta: ExpertTokensMetadata | None,
chunk_topk_ids: torch.Tensor,
local_num_experts: int,
expert_map: torch.Tensor | None,
) -> ExpertTokensMetadata | None:
if num_chunks == 1 or full_expert_tokens_meta is None:
return full_expert_tokens_meta
# The existing expert_num_tokens is for the entire a1q
# input. Chunking forces recomputation of the number
# of tokens assigned to each expert.
c_expert_num_tokens = count_expert_num_tokens(
chunk_topk_ids, local_num_experts, expert_map
)
c_expert_num_tokens_cpu = None
need_expert_num_tokens_cpu = (
full_expert_tokens_meta.expert_num_tokens_cpu is not None
)
if need_expert_num_tokens_cpu:
# This is blocking as some implementations need the count
# on the CPU to determine appropriate input/out fused-moe
# buffers
c_expert_num_tokens_cpu = c_expert_num_tokens.to("cpu", non_blocking=False)
return ExpertTokensMetadata(
expert_num_tokens=c_expert_num_tokens,
expert_num_tokens_cpu=c_expert_num_tokens_cpu,
)
def _prepare(
self,
hidden_states: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
global_num_experts: int,
expert_map: torch.Tensor | None,
apply_router_weight_on_input: bool,
) -> tuple[
torch.Tensor,
torch.Tensor | None,
ExpertTokensMetadata | None,
torch.Tensor,
torch.Tensor,
]:
"""
The _prepare method is a wrapper around self.prepare_finalize.prepare
that handles DBO and async.
"""
if not self.prepare_finalize.supports_async():
# We shouldn't be running an a2a kernel that doesn't
# support async prepare/finalize
# TODO(lucas): enable in follow-up
assert not dbo_enabled()
(
a1q,
a1q_scale,
expert_tokens_meta,
_expert_topk_ids,
_expert_topk_weights,
) = self.prepare_finalize.prepare(
hidden_states,
topk_weights,
topk_ids,
global_num_experts,
expert_map,
apply_router_weight_on_input,
self.fused_experts.quant_config,
defer_input_quant=self.fused_experts.expects_unquantized_inputs,
)
else:
# Overlap shared expert compute with all2all dispatch.
dbo_maybe_run_recv_hook()
prepare_ret = self.prepare_finalize.prepare_async(
hidden_states,
topk_weights,
topk_ids,
global_num_experts,
expert_map,
apply_router_weight_on_input,
self.fused_experts.quant_config,
defer_input_quant=self.fused_experts.expects_unquantized_inputs,
)
# TODO(lucas): refactor this in the alternative schedules followup
# currently unpack if we have hook + receiver pair or just
# receiver (see finalize_async docstring)
hook, receiver = (
prepare_ret if isinstance(prepare_ret, tuple) else (None, prepare_ret)
)
if hook is not None:
if dbo_enabled():
# If DBO is being used, register the hook with the ubatch
# context and call it in dbo_maybe_run_recv_hook instead of
# passing it to the receiver.
dbo_register_recv_hook(hook)
dbo_yield()
else:
hook()
(
a1q,
a1q_scale,
expert_tokens_meta,
_expert_topk_ids,
_expert_topk_weights,
) = receiver()
# Maybe prepare gathered topk_ids and topk_weights from other EP ranks.
topk_ids = topk_ids if _expert_topk_ids is None else _expert_topk_ids
topk_weights = (
topk_weights if _expert_topk_weights is None else _expert_topk_weights
)
return a1q, a1q_scale, expert_tokens_meta, topk_ids, topk_weights
def _fused_experts(
self,
in_dtype: torch.dtype,
a1q: torch.Tensor,
a1q_scale: torch.Tensor | None,
w1: torch.Tensor,
w2: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
activation: MoEActivation,
global_num_experts: int,
local_num_experts: int,
expert_map: torch.Tensor | None,
apply_router_weight_on_input: bool,
expert_tokens_meta: ExpertTokensMetadata | None,
) -> torch.Tensor:
_, M_full, N, K, top_k = self.fused_experts.moe_problem_size(
a1q, w1, w2, topk_ids
)
num_chunks, CHUNK_SIZE = self._chunk_info(M_full)
def input_chunk_range(chunk_idx: int) -> tuple[int, int]:
if num_chunks == 1:
# Use a1q.size(0) here since batched format does not
# keep M in the first dimension.
return 0, a1q.size(0)
else:
s = chunk_idx * CHUNK_SIZE
e = min(s + CHUNK_SIZE, M_full)
return s, e
# This happens when none of the tokens from the all2all reach this
# EP rank. Also, note that this is only relevant for CUDAGraph
# incompatible all2all kernels like the DeepEP high-throughput
# kernels. CUDAGraph compatible all2all kernels like the DeepEP
# low-latency kernels are always batched and can never run into
# the tensor.numel() == 0 case.
if M_full == 0:
assert num_chunks == 0
workspace13 = None
workspace2 = None
fused_out = torch.empty_like(a1q, dtype=in_dtype)
else:
assert num_chunks > 0
workspace13, workspace2, fused_out = self._allocate_buffers(
in_dtype,
a1q.device,
CHUNK_SIZE,
M_full,
N,
K,
top_k,
global_num_experts,
local_num_experts,
expert_tokens_meta,
activation,
)
for chunk_idx in range(num_chunks):
s, e = input_chunk_range(chunk_idx)
c_expert_tokens_meta = self._slice_expert_tokens_metadata(
num_chunks,
expert_tokens_meta,
topk_ids[s:e],
local_num_experts,
expert_map,
)
c_fused_out = self._slice_output_tensor(
fused_out, chunk_idx, num_chunks, CHUNK_SIZE, M_full
)
self.fused_experts.apply(
output=c_fused_out,
hidden_states=a1q[s:e],
w1=w1,
w2=w2,
topk_weights=topk_weights[s:e],
topk_ids=topk_ids[s:e],
activation=activation,
global_num_experts=global_num_experts,
expert_map=expert_map,
a1q_scale=_slice_scales(a1q_scale, s, e),
a2_scale=_slice_scales(self.fused_experts.a2_scale, s, e),
workspace13=workspace13,
workspace2=workspace2,
expert_tokens_meta=c_expert_tokens_meta,
apply_router_weight_on_input=apply_router_weight_on_input,
)
return fused_out
def _finalize(
self,
output: torch.Tensor,
fused_out: torch.Tensor,
hidden_states: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
apply_router_weight_on_input: bool,
shared_experts_input: torch.Tensor | None,
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
"""
The _finalize method is a wrapper around self.prepare_finalize.finalize
that handles DBO, async and shared expert overlap.
Args:
shared_experts_input: Optional separate input for shared experts.
When latent MoE is used, hidden_states is the latent-projected
tensor (smaller dimension) used by routed experts, while
shared_experts_input is the original hidden_states (full
dimension) needed by the shared expert MLP.
"""
shared_output: torch.Tensor | None = None
# For latent MoE: shared experts need the original hidden_states
# (full hidden_size), not the latent-projected version used by
# routed experts.
se_hidden_states = (
shared_experts_input if shared_experts_input is not None else hidden_states
)
if not self.prepare_finalize.supports_async():
assert not dbo_enabled()
self.prepare_finalize.finalize(
output,
fused_out,
topk_weights,
topk_ids,
apply_router_weight_on_input,
self.fused_experts.finalize_weight_and_reduce_impl(),
)
if self.shared_experts is not None:
shared_output = self.shared_experts(se_hidden_states)
else:
finalize_ret = self.prepare_finalize.finalize_async(
output,
fused_out,
topk_weights,
topk_ids,
apply_router_weight_on_input,
self.fused_experts.finalize_weight_and_reduce_impl(),
)
if self.shared_experts is not None:
shared_output = self.shared_experts(se_hidden_states)
# TODO(lucas): refactor this in the alternative schedules followup
# currently unpack if we have hook + receiver pair or just
# receiver (see finalize_async docstring)
hook, receiver = (
finalize_ret
if isinstance(finalize_ret, tuple)
else (None, finalize_ret)
)
if hook is not None:
if dbo_enabled():
# If DBO is being used, register the hook with the ubatch
# context and call it in dbo_maybe_run_recv_hook instead of
# passing it to the receiver.
dbo_register_recv_hook(hook)
dbo_yield()
else:
hook()
receiver()
if self.shared_experts is None:
return output
else:
assert shared_output is not None
return shared_output, output
def forward(
self,
hidden_states: torch.Tensor,
w1: torch.Tensor,
w2: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
activation: MoEActivation = MoEActivation.SILU,
global_num_experts: int = -1,
expert_map: torch.Tensor | None = None,
apply_router_weight_on_input: bool = False,
shared_experts_input: torch.Tensor | None = None,
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
"""
This function computes a Mixture of Experts (MoE) layer using two sets
of weights, w1 and w2, and top-k gating mechanism.
Parameters:
- hidden_states: (torch.Tensor): The input tensor to the MoE layer.
- w1 (torch.Tensor): The first set of expert weights.
- w2 (torch.Tensor): The second set of expert weights.
- topk_weights (torch.Tensor): The topk weights applied at the end of
the layer.
- topk_ids (torch.Tensor): A map of row to expert id.
- activation (MoEActivation): The activation function to apply after the first
MoE layer.
- global_num_experts (int): The total number of experts in the global
expert space.
- expert_map (Optional[torch.Tensor]): A tensor mapping expert indices
from the global expert space to the local expert space of the expert
parallel shard.
- apply_router_weight_on_input (bool): When true, the topk weights are
applied directly on the inputs. This is only applicable when topk is
1.
- shared_experts_input (Optional[torch.Tensor]): Optional separate
input for shared experts. For latent MoE, this is the original
hidden_states before latent projection.
Returns:
- torch.Tensor: The output tensor after applying the MoE layer.
"""
if self.inplace:
assert self.shared_experts is None
assert not disable_inplace()
output = hidden_states
else:
output = torch.zeros_like(hidden_states)
local_num_experts = w1.size(0)
if global_num_experts == -1:
global_num_experts = local_num_experts
a1q, a1q_scale, expert_tokens_meta, topk_ids, topk_weights = self._prepare(
hidden_states,
topk_weights,
topk_ids,
global_num_experts,
expert_map,
apply_router_weight_on_input,
)
fused_out = self._fused_experts(
in_dtype=hidden_states.dtype,
a1q=a1q,
a1q_scale=a1q_scale,
w1=w1,
w2=w2,
topk_weights=topk_weights,
topk_ids=topk_ids,
activation=activation,
global_num_experts=global_num_experts,
local_num_experts=local_num_experts,
expert_map=expert_map,
apply_router_weight_on_input=apply_router_weight_on_input,
expert_tokens_meta=expert_tokens_meta,
)
return self._finalize(
output,
fused_out,
hidden_states,
topk_weights,
topk_ids,
apply_router_weight_on_input,
shared_experts_input=shared_experts_input,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/fused_moe/modular_kernel.py",
"license": "Apache License 2.0",
"lines": 1244,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/fused_moe/prepare_finalize.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import torch
import vllm.model_executor.layers.fused_moe.modular_kernel as mk
from vllm.distributed import get_ep_group
from vllm.model_executor.layers.fused_moe.config import FusedMoEQuantConfig
from vllm.model_executor.layers.fused_moe.topk_weight_and_reduce import (
TopKWeightAndReduceContiguous,
TopKWeightAndReduceDelegate,
)
from vllm.model_executor.layers.fused_moe.utils import moe_kernel_quantize_input
from vllm.utils.flashinfer import nvfp4_block_scale_interleave
class MoEPrepareAndFinalizeNaiveEP(mk.FusedMoEPrepareAndFinalize):
def __init__(
self,
is_sequence_parallel: bool = False,
num_dispatchers: int = 1,
) -> None:
super().__init__()
self.is_sequence_parallel = is_sequence_parallel
self._num_dispatchers = num_dispatchers
@property
def activation_format(self) -> mk.FusedMoEActivationFormat:
return mk.FusedMoEActivationFormat.Standard
def max_num_tokens_per_rank(self) -> int | None:
return None
def topk_indices_dtype(self) -> torch.dtype | None:
return None
def num_dispatchers(self) -> int:
return self._num_dispatchers
def output_is_reduced(self) -> bool:
return False
def prepare(
self,
a1: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
num_experts: int,
expert_map: torch.Tensor | None,
apply_router_weight_on_input: bool,
quant_config: FusedMoEQuantConfig,
defer_input_quant: bool = False,
) -> mk.PrepareResultType:
if apply_router_weight_on_input:
topk = topk_ids.size(1)
assert topk == 1, (
"apply_router_weight_on_input is only implemented for topk=1"
)
# Note: do not use inplace for shared experts overlap
a1 = a1 * topk_weights.to(a1.dtype)
# Defer input quantization to the MoE kernel.
use_nvfp4 = quant_config.use_nvfp4_w4a4
if defer_input_quant:
a1q = a1
a1q_scale = None
else:
a1q, a1q_scale = moe_kernel_quantize_input(
a1,
quant_config.a1_gscale if use_nvfp4 else quant_config.a1_scale,
quant_config.quant_dtype,
quant_config.per_act_token_quant,
quant_config.block_shape,
# NOTE: swizzling pads the scales to multiple of 128
# which makes the scales tensor different shape than
# the hidden states, breaking the A2A kernel. So, we
# delay the swizzling until after the A2A.
is_fp4_scale_swizzled=False,
)
# Skip gathering scales if we have static quantization
# (the scale is a scalar, replicated on all ranks) or
# if quantization is deferred.
skip_gather_scales = a1q_scale is None or a1q_scale.ndim == 0
scales = None if skip_gather_scales else [a1q_scale]
res = get_ep_group().dispatch(
a1q,
topk_weights,
topk_ids,
is_sequence_parallel=self.is_sequence_parallel,
extra_tensors=scales,
)
if skip_gather_scales:
a1q, topk_weights, topk_ids = res
else:
a1q, topk_weights, topk_ids, scales = res
assert scales is not None and len(scales) == 1
a1q_scale = scales[0]
if quant_config.quant_dtype == "nvfp4":
assert a1q_scale is not None
if a1q_scale.element_size() == 1:
a1q_scale = a1q_scale.view(torch.uint8)
a1q_scale = nvfp4_block_scale_interleave(a1q_scale)
return a1q, a1q_scale, None, topk_ids, topk_weights
def finalize(
self,
output: torch.Tensor,
fused_expert_output: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
apply_router_weight_on_input: bool,
weight_and_reduce_impl: mk.TopKWeightAndReduce,
) -> None:
if isinstance(weight_and_reduce_impl, TopKWeightAndReduceDelegate):
weight_and_reduce_impl = TopKWeightAndReduceContiguous()
out = weight_and_reduce_impl.apply(
output=None,
fused_expert_output=fused_expert_output,
topk_weights=topk_weights,
topk_ids=topk_ids,
apply_router_weight_on_input=apply_router_weight_on_input,
)
output.copy_(
get_ep_group().combine(out, is_sequence_parallel=self.is_sequence_parallel)
)
class MoEPrepareAndFinalizeNoEP(mk.FusedMoEPrepareAndFinalize):
"""MoE prepare and finalize without expert parallelism."""
@property
def activation_format(self) -> mk.FusedMoEActivationFormat:
return mk.FusedMoEActivationFormat.Standard
def max_num_tokens_per_rank(self) -> int | None:
return None
def topk_indices_dtype(self) -> torch.dtype | None:
return None
def num_dispatchers(self) -> int:
return 1
def output_is_reduced(self) -> bool:
return False
def prepare(
self,
a1: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
num_experts: int,
expert_map: torch.Tensor | None,
apply_router_weight_on_input: bool,
quant_config: FusedMoEQuantConfig,
defer_input_quant: bool = False,
) -> mk.PrepareResultType:
if apply_router_weight_on_input:
topk = topk_ids.size(1)
# TODO: this only works for topK=1, will need to update for topK>1
assert topk == 1, (
"apply_router_weight_on_input is only implemented for topk=1"
)
# Note: do not use inplace for shared experts overlap
a1 = a1 * topk_weights.to(a1.dtype)
# Defer input quant to moe kernel for backends (e.g. AITER, FI)
# which use a single kernel call for quant + experts.
if defer_input_quant:
return a1, None, None, None, None
input_sf = (
quant_config.a1_gscale
if quant_config.use_nvfp4_w4a4
else quant_config.a1_scale
)
a1q, a1q_scale = moe_kernel_quantize_input(
a1,
input_sf,
quant_config.quant_dtype,
quant_config.per_act_token_quant,
quant_config.block_shape,
)
return a1q, a1q_scale, None, None, None
def finalize(
self,
output: torch.Tensor,
fused_expert_output: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
apply_router_weight_on_input: bool,
weight_and_reduce_impl: mk.TopKWeightAndReduce,
) -> None:
if isinstance(weight_and_reduce_impl, TopKWeightAndReduceDelegate):
weight_and_reduce_impl = TopKWeightAndReduceContiguous()
weight_and_reduce_impl.apply(
output=output,
fused_expert_output=fused_expert_output,
topk_weights=topk_weights,
topk_ids=topk_ids,
apply_router_weight_on_input=apply_router_weight_on_input,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/fused_moe/prepare_finalize.py",
"license": "Apache License 2.0",
"lines": 180,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import torch
import vllm.model_executor.layers.fused_moe.modular_kernel as mk
from vllm.model_executor.layers.fused_moe.activation import MoEActivation
from vllm.model_executor.layers.fused_moe.config import (
FusedMoEConfig,
FusedMoEQuantConfig,
)
from vllm.model_executor.layers.fused_moe.deep_gemm_moe import (
DeepGemmExperts,
_valid_deep_gemm,
_valid_deep_gemm_shape,
)
from vllm.model_executor.layers.fused_moe.fallback import FallbackExperts
from vllm.model_executor.layers.fused_moe.fused_moe import TritonExperts
from vllm.utils.deep_gemm import (
is_deep_gemm_e8m0_used,
)
class TritonOrDeepGemmExperts(FallbackExperts):
"""DeepGemm with fallback to Triton for low latency shapes."""
def __init__(self, moe_config: FusedMoEConfig, quant_config: FusedMoEQuantConfig):
super().__init__(
experts=DeepGemmExperts(moe_config, quant_config),
fallback_experts=TritonExperts(moe_config, quant_config),
)
@staticmethod
def get_clses() -> tuple[
type[mk.FusedMoEPermuteExpertsUnpermute],
type[mk.FusedMoEPermuteExpertsUnpermute],
]:
return (DeepGemmExperts, TritonExperts)
def workspace_shapes(
self,
M: int,
N: int,
K: int,
topk: int,
global_num_experts: int,
local_num_experts: int,
expert_tokens_meta: mk.ExpertTokensMetadata | None,
activation: MoEActivation,
) -> tuple[tuple[int, ...], tuple[int, ...], tuple[int, ...]]:
# Note: the deep gemm workspaces are strictly larger than the triton
# workspaces so we can be pessimistic here and allocate for DeepGemm
# even if we fall back to triton later, e.g. if expert maps are set.
if is_deep_gemm_e8m0_used() or _valid_deep_gemm_shape(M, N, K):
return self.experts.workspace_shapes(
M,
N,
K,
topk,
global_num_experts,
local_num_experts,
expert_tokens_meta,
activation,
)
else:
return self.fallback_experts.workspace_shapes(
M,
N,
K,
topk,
global_num_experts,
local_num_experts,
expert_tokens_meta,
activation,
)
def _select_experts_impl(
self,
hidden_states: torch.Tensor,
w1: torch.Tensor,
w2: torch.Tensor,
) -> mk.FusedMoEPermuteExpertsUnpermute:
if is_deep_gemm_e8m0_used() or _valid_deep_gemm(hidden_states, w1, w2):
return self.experts
else:
return self.fallback_experts
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/fused_moe/triton_deep_gemm_moe.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/kernels/moe/test_rocm_aiter_topk.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# This is a test for the AITER ops.
# It tests if the AITER ops are
# 1. correctly registered as custom ops
# 2. correctly defined the relationship between
# implementation and fake function
# 3. can be used with torch.compile
# This file will be skipped if AITER is not installed
# and the platform is not ROCm.
import importlib.util
import os
import pytest
import torch
from vllm.platforms import current_platform
if not current_platform.is_rocm():
pytest.skip("This test can only run on ROCm.", allow_module_level=True)
# This environment variable must be set so ops will be registered.
os.environ["VLLM_ROCM_USE_AITER"] = "1"
# this import statement is needed to ensure the ops are registered
import vllm.model_executor.layers.fused_moe.rocm_aiter_fused_moe # noqa: F401
# need to import once to ensure the ops are registered
# Check if aiter package is installed
aiter_available = importlib.util.find_spec("aiter") is not None
if not aiter_available:
pytest.skip("These tests require AITER to run.", allow_module_level=True)
def test_rocm_aiter_biased_grouped_topk_custom_op_registration():
"""Test that the custom op is correctly registered."""
# Check if the op exists in torch.ops.vllm
assert hasattr(torch.ops.vllm, "rocm_aiter_biased_grouped_topk")
# Check if the op is callable
assert callable(torch.ops.vllm.rocm_aiter_biased_grouped_topk)
def test_rocm_aiter_grouped_topk_custom_op_registration():
"""Test that the custom op is correctly registered."""
# Check if the op exists in torch.ops.vllm
assert hasattr(torch.ops.vllm, "rocm_aiter_grouped_topk")
# Check if the op is callable
assert callable(torch.ops.vllm.rocm_aiter_grouped_topk)
def test_rocm_aiter_biased_grouped_topk_torch_compile_compatibility():
"""Test that the op can be used with torch.compile."""
# Create test tensors
token = 64
expert = 256
num_expert_group = 8
topk = 8
topk_group = 4
renormalize = True
scale_factor = 1.0
gating_output = torch.randn((token, expert), dtype=torch.bfloat16, device="cuda")
e_score_correction_bias = torch.randn(
(expert,), dtype=torch.bfloat16, device="cuda"
)
device = gating_output.device
topk_ids = torch.empty((token, topk), dtype=torch.int32, device=device)
topk_weights = torch.empty((token, topk), dtype=torch.float32, device=device)
# Define a function that uses the op
def biased_grouped_topk_fn(
gating_output, e_score_correction_bias, topk_weights, topk_ids
):
return torch.ops.vllm.rocm_aiter_biased_grouped_topk(
gating_output,
e_score_correction_bias,
topk_weights,
topk_ids,
num_expert_group,
topk_group,
renormalize,
scale_factor,
)
# Verify the op's fake implementation
torch.library.opcheck(
torch.ops.vllm.rocm_aiter_biased_grouped_topk,
(gating_output, e_score_correction_bias, topk_weights, topk_ids),
kwargs={
"num_expert_group": num_expert_group,
"topk_group": topk_group,
"need_renorm": renormalize,
"routed_scaling_factor": scale_factor,
},
test_utils=("test_faketensor"),
)
# Compile the function with appropriate settings
compiled_fn = torch.compile(
biased_grouped_topk_fn,
fullgraph=True,
backend="inductor",
mode="reduce-overhead",
dynamic=False,
)
topk_weights_original = torch.empty(
(token, topk), dtype=torch.float32, device=device
)
topk_ids_original = torch.empty((token, topk), dtype=torch.int32, device=device)
topk_weights_compiled = torch.empty(
(token, topk), dtype=torch.float32, device=device
)
topk_ids_compiled = torch.empty((token, topk), dtype=torch.int32, device=device)
# Run both compiled (V1 graph mode) and uncompiled versions (V1 eager mode)
biased_grouped_topk_fn(
gating_output, e_score_correction_bias, topk_weights_original, topk_ids_original
)
compiled_fn(
gating_output, e_score_correction_bias, topk_weights_compiled, topk_ids_compiled
)
# Sort the results for comparison since the order might not be deterministic
topk_ids_original, indices_original = torch.sort(topk_ids_original)
topk_weights_original = torch.gather(topk_weights_original, 1, indices_original)
topk_ids_compiled, indices_compiled = torch.sort(topk_ids_compiled)
topk_weights_compiled = torch.gather(topk_weights_compiled, 1, indices_compiled)
# Verify results match
assert torch.allclose(
topk_weights_original, topk_weights_compiled, rtol=1e-2, atol=1e-2
)
assert torch.allclose(topk_ids_original, topk_ids_compiled)
def test_rocm_aiter_grouped_topk_torch_compile_compatibility():
"""Test that the op can be used with torch.compile."""
# Create test tensors
token = 64
expert = 256
num_expert_group = 8
topk = 8
topk_group = 4
renormalize = True
scoring_func = "softmax"
scale_factor = 1.0
gating_output = torch.randn((token, expert), dtype=torch.bfloat16, device="cuda")
device = gating_output.device
topk_ids = torch.empty((token, topk), dtype=torch.int32, device=device)
topk_weights = torch.empty((token, topk), dtype=torch.float32, device=device)
# Define a function that uses the op
def grouped_topk_fn(gating_output, topk_weights, topk_ids, scoring_func):
return torch.ops.vllm.rocm_aiter_grouped_topk(
gating_output,
topk_weights,
topk_ids,
num_expert_group,
topk_group,
renormalize,
scoring_func,
scale_factor,
)
# Verify the op's fake implementation
torch.library.opcheck(
torch.ops.vllm.rocm_aiter_grouped_topk,
(gating_output, topk_weights, topk_ids),
kwargs={
"num_expert_group": num_expert_group,
"topk_group": topk_group,
"need_renorm": renormalize,
"scoring_func": scoring_func,
"routed_scaling_factor": scale_factor,
},
test_utils=("test_faketensor"),
)
# Compile the function with appropriate settings
compiled_fn = torch.compile(
grouped_topk_fn,
fullgraph=True,
backend="inductor",
mode="reduce-overhead",
dynamic=False,
)
topk_weights_original = torch.empty(
(token, topk), dtype=torch.float32, device=device
)
topk_ids_original = torch.empty((token, topk), dtype=torch.int32, device=device)
topk_weights_compiled = torch.empty(
(token, topk), dtype=torch.float32, device=device
)
topk_ids_compiled = torch.empty((token, topk), dtype=torch.int32, device=device)
# Run both compiled (V1 graph mode) and uncompiled versions (V1 eager mode)
grouped_topk_fn(
gating_output, topk_weights_original, topk_ids_original, scoring_func
)
compiled_fn(gating_output, topk_weights_compiled, topk_ids_compiled, scoring_func)
# Sort the results for comparison since the order might not be deterministic
topk_ids_original, indices_original = torch.sort(topk_ids_original)
topk_weights_original = torch.gather(topk_weights_original, 1, indices_original)
topk_ids_compiled, indices_compiled = torch.sort(topk_ids_compiled)
topk_weights_compiled = torch.gather(topk_weights_compiled, 1, indices_compiled)
# Verify results match
assert torch.allclose(
topk_weights_original, topk_weights_compiled, rtol=1e-2, atol=1e-2
)
assert torch.allclose(topk_ids_original, topk_ids_compiled)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/kernels/moe/test_rocm_aiter_topk.py",
"license": "Apache License 2.0",
"lines": 185,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/distributed/device_communicators/all2all.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from typing import Any
import torch
import vllm.envs as envs
from vllm.distributed import get_dp_group, get_ep_group
from vllm.forward_context import get_forward_context
from vllm.logger import init_logger
from vllm.utils.flashinfer import has_flashinfer_all2all
from vllm.utils.import_utils import has_deep_ep, has_mori
from .base_device_communicator import All2AllManagerBase, Cache
if has_flashinfer_all2all():
from flashinfer.comm import Mapping # type: ignore[import-not-found]
from flashinfer.comm.mnnvl import MnnvlConfig # type: ignore[import-not-found]
from flashinfer.comm.trtllm_alltoall import (
MnnvlMoe, # type: ignore[import-not-found]
)
logger = init_logger(__name__)
class NaiveAll2AllManager(All2AllManagerBase):
"""
A naive implementation of all2all communication.
It uses all-reduce under the hood, which is not
efficient at all. The main purpose is for testing and
debugging.
"""
def __init__(self, cpu_group, tcp_store_group=None):
super().__init__(cpu_group, tcp_store_group)
def naive_multicast(
self,
x: torch.Tensor,
cu_tokens_across_sp_cpu: torch.Tensor,
is_sequence_parallel: bool,
) -> torch.Tensor:
assert len(x.shape) == 2
buffer = torch.empty(
(cu_tokens_across_sp_cpu[-1], x.size(1)), device=x.device, dtype=x.dtype
)
rank = self.rank if is_sequence_parallel else self.dp_rank
world_size = self.world_size if is_sequence_parallel else self.dp_world_size
start = 0 if rank == 0 else cu_tokens_across_sp_cpu[rank - 1]
end = cu_tokens_across_sp_cpu[rank]
buffer[start:end, :].copy_(x)
for idx in range(world_size):
start = 0 if idx == 0 else cu_tokens_across_sp_cpu[idx - 1]
end = cu_tokens_across_sp_cpu[idx]
get_ep_group().broadcast(buffer[start:end, :], idx)
return buffer
def dispatch_router_logits(
self,
hidden_states: torch.Tensor,
router_logits: torch.Tensor,
is_sequence_parallel: bool = False,
extra_tensors: list[torch.Tensor] | None = None,
) -> tuple[torch.Tensor, torch.Tensor]:
if extra_tensors is not None:
raise NotImplementedError(
"extra_tensors is not supported for NaiveAll2AllManager"
)
sp_size = self.tp_group.world_size if is_sequence_parallel else 1
dp_metadata = get_forward_context().dp_metadata
assert dp_metadata is not None
cu_tokens_across_sp_cpu = dp_metadata.cu_tokens_across_sp(sp_size)
hidden_states = self.naive_multicast(
hidden_states, cu_tokens_across_sp_cpu, is_sequence_parallel
)
router_logits = self.naive_multicast(
router_logits, cu_tokens_across_sp_cpu, is_sequence_parallel
)
return hidden_states, router_logits
def dispatch(
self,
hidden_states: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
is_sequence_parallel: bool = False,
extra_tensors: list[torch.Tensor] | None = None,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
if extra_tensors is not None:
raise NotImplementedError(
"extra_tensors is not supported for NaiveAll2AllManager"
)
sp_size = self.tp_group.world_size if is_sequence_parallel else 1
dp_metadata = get_forward_context().dp_metadata
assert dp_metadata is not None
cu_tokens_across_sp_cpu = dp_metadata.cu_tokens_across_sp(sp_size)
hidden_states = self.naive_multicast(
hidden_states, cu_tokens_across_sp_cpu, is_sequence_parallel
)
topk_weights = self.naive_multicast(
topk_weights, cu_tokens_across_sp_cpu, is_sequence_parallel
)
topk_ids = self.naive_multicast(
topk_ids, cu_tokens_across_sp_cpu, is_sequence_parallel
)
return hidden_states, topk_weights, topk_ids
def combine(
self, hidden_states: torch.Tensor, is_sequence_parallel: bool = False
) -> torch.Tensor:
ep_rank = self.rank if is_sequence_parallel else self.dp_rank
dp_metadata = get_forward_context().dp_metadata
assert dp_metadata is not None
sp_size = self.tp_group.world_size if is_sequence_parallel else 1
cu_tokens_across_sp_cpu = dp_metadata.cu_tokens_across_sp(sp_size)
start = 0 if ep_rank == 0 else cu_tokens_across_sp_cpu[ep_rank - 1]
end = cu_tokens_across_sp_cpu[ep_rank]
all_hidden_states = get_ep_group().all_reduce(hidden_states)
hidden_states = all_hidden_states[start:end, :]
return hidden_states
def destroy(self):
pass
class AgRsAll2AllManager(All2AllManagerBase):
"""
An implementation of all2all communication based on
all-gather (dispatch) and reduce-scatter (combine).
"""
def __init__(self, cpu_group, tcp_store_group=None):
super().__init__(cpu_group, tcp_store_group)
def dispatch_router_logits(
self,
hidden_states: torch.Tensor,
router_logits: torch.Tensor,
is_sequence_parallel: bool = False,
extra_tensors: list[torch.Tensor] | None = None,
) -> (
tuple[torch.Tensor, torch.Tensor]
| tuple[torch.Tensor, torch.Tensor, list[torch.Tensor]]
):
"""
Gather hidden_states and router_logits from all dp ranks.
"""
dp_metadata = get_forward_context().dp_metadata
assert dp_metadata is not None
sizes = dp_metadata.get_chunk_sizes_across_dp_rank()
assert sizes is not None
dist_group = get_ep_group() if is_sequence_parallel else get_dp_group()
assert sizes[dist_group.rank_in_group] == hidden_states.shape[0]
tensors_to_gather = [hidden_states, router_logits]
if extra_tensors is not None:
tensors_to_gather.extend(extra_tensors)
gathered_tensors = dist_group.all_gatherv(
tensors_to_gather,
dim=0,
sizes=sizes,
)
if extra_tensors is not None:
return (gathered_tensors[0], gathered_tensors[1], gathered_tensors[2:])
return gathered_tensors[0], gathered_tensors[1]
def dispatch(
self,
hidden_states: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
is_sequence_parallel: bool = False,
extra_tensors: list[torch.Tensor] | None = None,
) -> (
tuple[torch.Tensor, torch.Tensor, torch.Tensor]
| tuple[torch.Tensor, torch.Tensor, torch.Tensor, list[torch.Tensor]]
):
"""
Gather hidden_states and router_logits from all dp ranks.
"""
dp_metadata = get_forward_context().dp_metadata
assert dp_metadata is not None
sizes = dp_metadata.get_chunk_sizes_across_dp_rank()
assert sizes is not None
dist_group = get_ep_group() if is_sequence_parallel else get_dp_group()
assert sizes[dist_group.rank_in_group] == hidden_states.shape[0]
tensors_to_gather = [hidden_states, topk_weights, topk_ids]
if extra_tensors is not None:
tensors_to_gather.extend(extra_tensors)
gathered_tensors = dist_group.all_gatherv(
tensors_to_gather,
dim=0,
sizes=sizes,
)
hidden_states = gathered_tensors[0]
topk_weights = gathered_tensors[1]
topk_ids = gathered_tensors[2]
if extra_tensors is None:
return hidden_states, topk_weights, topk_ids
return hidden_states, topk_weights, topk_ids, gathered_tensors[3:]
def combine(
self, hidden_states: torch.Tensor, is_sequence_parallel: bool = False
) -> torch.Tensor:
"""
Reduce-scatter hidden_states across all dp ranks.
"""
dp_metadata = get_forward_context().dp_metadata
assert dp_metadata is not None
sizes = dp_metadata.get_chunk_sizes_across_dp_rank()
assert sizes is not None
dist_group = get_ep_group() if is_sequence_parallel else get_dp_group()
hidden_states = dist_group.reduce_scatterv(hidden_states, dim=0, sizes=sizes)
return hidden_states
def destroy(self):
pass
class DeepEPAll2AllManagerBase(All2AllManagerBase):
"""
All2All communication based on DeepEP High-Throughput kernels.
"""
def __init__(self, cpu_group, tcp_store_group=None):
assert has_deep_ep(), (
"DeepEP kernels not found. Please follow https://github.com/vllm-project/vllm/blob/main/tools/ep_kernels/README.md"
" to install DeepEP kernels."
) # noqa
super().__init__(cpu_group, tcp_store_group)
self.handle_cache = Cache()
# This is the DeepEP default. Stick to it till we can establish
# reasonable defaults based on profiling.
self.num_sms = 20
def get_handle(self, kwargs):
raise NotImplementedError
def dispatch_router_logits(
self,
hidden_states: torch.Tensor,
router_logits: torch.Tensor,
is_sequence_parallel: bool = False,
extra_tensors: list[torch.Tensor] | None = None,
) -> tuple[torch.Tensor, torch.Tensor]:
raise NotImplementedError
def dispatch(
self,
hidden_states: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
is_sequence_parallel: bool = False,
extra_tensors: list[torch.Tensor] | None = None,
) -> (
tuple[torch.Tensor, torch.Tensor, torch.Tensor]
| tuple[torch.Tensor, torch.Tensor, torch.Tensor, list[torch.Tensor]]
):
raise NotImplementedError
def combine(
self, hidden_states: torch.Tensor, is_sequence_parallel: bool = False
) -> torch.Tensor:
raise NotImplementedError
def destroy(self):
with self.handle_cache._lock:
for _, handle in self.handle_cache._cache.items():
handle.destroy()
self.handle_cache._cache.clear()
class DeepEPHTAll2AllManager(DeepEPAll2AllManagerBase):
"""
All2All communication based on DeepEP High-Throughput kernels.
"""
def __init__(self, cpu_group, tcp_store_group=None):
super().__init__(cpu_group, tcp_store_group)
def _make_all2all_kwargs(self) -> dict[Any, Any]:
# Defaults for internode and intranode are taken from DeepEP tests.
num_nvl_bytes = envs.VLLM_DEEPEP_BUFFER_SIZE_MB * 1024 * 1024
num_rdma_bytes = None
num_qps_per_rank = None
if self.internode and not envs.VLLM_DEEPEP_HIGH_THROUGHPUT_FORCE_INTRA_NODE:
num_rdma_bytes = envs.VLLM_DEEPEP_BUFFER_SIZE_MB * 1024 * 1024
num_qps_per_rank = self.num_sms // 2
else:
num_rdma_bytes = 0
num_qps_per_rank = 1
assert num_rdma_bytes is not None
assert num_qps_per_rank is not None
return dict(
group=self.cpu_group,
num_nvl_bytes=num_nvl_bytes,
num_rdma_bytes=num_rdma_bytes,
low_latency_mode=False,
num_qps_per_rank=num_qps_per_rank,
explicitly_destroy=True,
)
def get_handle(self, kwargs):
assert len(kwargs) == 0, (
"DeepEPHTAll2AllManager expects no arguments. All the required "
"args are computed in the Manager itself."
)
import deep_ep # type: ignore[import-not-found]
buffer_kwargs = self._make_all2all_kwargs()
logger.debug("DeepEP all2all args %s", buffer_kwargs)
handle: deep_ep.Buffer = self.handle_cache.get_or_create(
buffer_kwargs, deep_ep.Buffer
)
return handle
def set_num_sms(self, num_sms: int):
import deep_ep # type: ignore[import-not-found]
# Right now the buffers are sized for only what the kernels were
# created with. So we can only reduce the number of SMS used
# but not increase it.
if num_sms > self.num_sms:
num_sms = self.num_sms
deep_ep.Buffer.set_num_sms(num_sms)
class DeepEPLLAll2AllManager(DeepEPAll2AllManagerBase):
"""
All2All communication based on DeepEP Low-Latency kernels.
"""
def __init__(self, cpu_group, tcp_store_group=None):
super().__init__(cpu_group, tcp_store_group)
def _make_all2all_kwargs(
self,
max_num_tokens_per_dp_rank: int,
token_hidden_size: int,
num_ep_ranks: int,
num_global_experts: int,
num_local_experts: int,
) -> dict[Any, Any]:
"""
max_num_tokens_per_dp_rank : the maximum number of tokens a DP rank
can dispatch all the ranks must hold the same value.
token_hidden_size: the hidden dimension of each token.
num_ep_ranks: the number of EP group ranks.
num_global_experts: Number of experts in the model.
num_local_experts: Number of experts in an EP rank.
"""
import deep_ep # type: ignore[import-not-found]
# Defaults for internode and intranode are taken from DeepEP tests.
num_nvl_bytes = envs.VLLM_DEEPEP_BUFFER_SIZE_MB * 1024 * 1024
num_qps_per_rank = num_local_experts
num_rdma_bytes = deep_ep.Buffer.get_low_latency_rdma_size_hint(
num_max_dispatch_tokens_per_rank=max_num_tokens_per_dp_rank,
hidden=token_hidden_size,
num_ranks=num_ep_ranks,
num_experts=num_global_experts,
)
assert num_rdma_bytes is not None
return dict(
group=self.cpu_group,
num_nvl_bytes=num_nvl_bytes,
num_rdma_bytes=num_rdma_bytes,
low_latency_mode=True,
num_qps_per_rank=num_qps_per_rank,
allow_nvlink_for_low_latency_mode=True,
allow_mnnvl=envs.VLLM_DEEPEP_LOW_LATENCY_USE_MNNVL,
explicitly_destroy=True,
)
def get_handle(self, kwargs):
"""
The kwargs for DeepEPLLAll2AllManager is dictated by
_make_all2all_kwargs.
"""
import deep_ep # type: ignore[import-not-found]
buffer_kwargs = self._make_all2all_kwargs(**kwargs)
logger.debug("DeepEP all2all args %s", buffer_kwargs)
handle: deep_ep.Buffer = self.handle_cache.get_or_create(
buffer_kwargs, deep_ep.Buffer
)
return handle
# DeepEP LL uses RDMA so no SMs are used for communication
def max_sms_used(self) -> int | None:
return 0
class FlashInferAllToAllManager(All2AllManagerBase):
"""
All2All communication based on flashinfer kernels.
"""
# This type lint could be removed after all of the work in
# https://github.com/vllm-project/vllm/issues/26533 done.
rank: int
world_size: int
def __init__(self, cpu_group, tcp_store_group=None):
assert has_flashinfer_all2all(), (
"flashinfer all2all module not found. Please install/check flashinfer"
) # noqa
super().__init__(cpu_group, tcp_store_group)
logger.debug(
"Initialize for flashinfer All2All rank=%d, world size=%d",
self.rank,
self.world_size,
)
self.initialized = False
self.alltoall_info = None
def initialize(
self,
world_size: int,
rank: int,
gpus_per_node: int,
):
"""Initialize workspace"""
if self.initialized:
return
self.cleanup()
logger.debug("making map: rank=%d, world size=%d", rank, world_size)
self.mapping = Mapping(
world_size,
rank,
gpus_per_node,
tp_size=world_size,
)
from vllm.distributed.device_communicators.mnnvl_compat import (
CustomCommunicator,
)
dp_config = MnnvlConfig(
comm_backend=CustomCommunicator(get_dp_group().cpu_group),
fabric_page_size=1 << 29, # 512MB
allocation_granularity=0, # Auto-detect
)
self.workspace_tensor = MnnvlMoe.get_moe_workspaces(self.mapping, dp_config)
self.prepare_workspace_tensor = MnnvlMoe.get_moe_prepare_workspace(
self.mapping, dp_config
)
self.world_size = world_size
self.rank = rank
self.gpus_per_node = gpus_per_node
self.initialized = True
logger.info(
"FlashInfer All2All initialized for rank %s, size %s", rank, world_size
)
def ensure_alltoall_workspace_initialized(self):
"""Ensure workspace is initialized"""
if not has_flashinfer_all2all():
return False
if self.world_size <= 1:
return False
if not self.initialized:
self.initialize(
world_size=self.world_size,
rank=self.rank,
gpus_per_node=torch.cuda.device_count,
)
return self.initialized
def get_handle(self, kwargs):
return self
def cleanup(self):
"""Clean up workspace"""
if (
self.initialized
and self.workspace_tensor is not None
and self.prepare_workspace_tensor is not None
):
try:
del self.workspace_tensor
del self.prepare_workspace_tensor
except Exception as e:
logger.warning("Failed to cleanup FlashInfer workspace: %s", e)
finally:
self.workspace_tensor = None
self.prepare_workspace_tensor = None
self.mapping = None
self.initialized = False
class MoriAll2AllManager(All2AllManagerBase):
def __init__(self, cpu_group):
assert has_mori(), (
"MoRI kernels not found. Please follow https://github.com/ROCm/mori/blob/main/README.md"
" to install MoRI kernels."
) # noqa
import mori
super().__init__(cpu_group)
self.handle_cache = Cache()
torch._C._distributed_c10d._register_process_group("mori", cpu_group)
mori.shmem.shmem_torch_process_group_init("mori")
def _make_all2all_kwargs(
self,
rank: int,
num_ep_ranks: int,
input_dtype: torch.dtype,
quant_dtype: torch.dtype,
token_hidden_size: int,
scale_dim: int,
scale_type_size: int,
max_num_tokens_per_dp_rank: int,
num_local_experts: int,
num_experts_per_token: int,
):
import mori # type: ignore[import-not-found]
from vllm.platforms.rocm import on_gfx942, on_gfx950
assert on_gfx942() or on_gfx950(), (
"mori currently only support arch gfx942 and gfx950"
)
if not self.internode:
# single node
kernel_type = mori.ops.EpDispatchCombineKernelType.IntraNode
rdma_block_num = 0
warp_num_per_block = 16
block_num = 80
else:
# multi node
kernel_type = mori.ops.EpDispatchCombineKernelType.InterNodeV1
if on_gfx942():
warp_num_per_block = 16
block_num = 32
rdma_block_num = 16
elif on_gfx950():
warp_num_per_block = 8
block_num = 64
rdma_block_num = 32
else:
raise NotImplementedError(
"mori currently only support arch gfx942 and gfx950"
)
return dict(
rank=rank,
world_size=num_ep_ranks,
data_type=quant_dtype,
hidden_dim=token_hidden_size,
scale_dim=scale_dim,
scale_type_size=scale_type_size,
max_token_type_size=input_dtype.itemsize,
max_num_inp_token_per_rank=max_num_tokens_per_dp_rank,
num_experts_per_rank=num_local_experts,
num_experts_per_token=num_experts_per_token,
warp_num_per_block=warp_num_per_block,
block_num=block_num,
kernel_type=kernel_type,
rdma_block_num=rdma_block_num,
gpu_per_node=min(8, num_ep_ranks),
)
def _make_handle(self, **kwargs):
import mori # type: ignore[import-not-found]
mori_config = mori.ops.EpDispatchCombineConfig(**kwargs)
handle = mori.ops.EpDispatchCombineOp(mori_config)
return handle
def get_handle(self, kwargs):
import mori # type: ignore[import-not-found]
mori_kwargs = self._make_all2all_kwargs(**kwargs)
logger.debug("MoRI all2all args %s", mori_kwargs)
handle: mori.ops.EpDispatchCombineOp = self.handle_cache.get_or_create(
mori_kwargs, self._make_handle
)
return handle
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/distributed/device_communicators/all2all.py",
"license": "Apache License 2.0",
"lines": 514,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/entrypoints/openai/test_completion_with_function_calling.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import datetime
import json
import jsonschema
import openai # use the official client for correctness check
import pytest
import pytest_asyncio
# downloading lora to test lora requests
from ...utils import RemoteOpenAIServer
# any model with a chat template should work here
MODEL_NAME = "Qwen/Qwen3-0.6B"
tools = [
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"city": {
"type": "string",
"description": "The city to find the weather for, e.g. "
"'Vienna'",
"default": "Vienna",
},
"country": {
"type": "string",
"description": "The country that the city is in, e.g. "
"'Austria'",
},
"unit": {
"type": "string",
"description": "The unit to fetch the temperature in",
"enum": ["celsius", "fahrenheit"],
},
"options": {
"$ref": "#/$defs/WeatherOptions",
"description": "Optional parameters for weather query",
},
},
"required": ["country", "unit"],
"$defs": {
"WeatherOptions": {
"title": "WeatherOptions",
"type": "object",
"additionalProperties": False,
"properties": {
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"default": "celsius",
"description": "Temperature unit",
"title": "Temperature Unit",
},
"include_forecast": {
"type": "boolean",
"default": False,
"description": "Whether to include a 24-hour forecast",
"title": "Include Forecast",
},
"language": {
"type": "string",
"default": "zh-CN",
"description": "Language of the response",
"title": "Language",
"enum": ["zh-CN", "en-US", "ja-JP"],
},
},
},
},
},
},
},
{
"type": "function",
"function": {
"name": "get_forecast",
"description": "Get the weather forecast for a given location",
"parameters": {
"type": "object",
"properties": {
"city": {
"type": "string",
"description": "The city to get the forecast for, e.g. "
"'Vienna'",
"default": "Vienna",
},
"country": {
"type": "string",
"description": "The country that the city is in, e.g. "
"'Austria'",
},
"days": {
"type": "integer",
"description": "Number of days to get the forecast for (1-7)",
},
"unit": {
"type": "string",
"description": "The unit to fetch the temperature in",
"enum": ["celsius", "fahrenheit"],
},
},
"required": ["country", "days", "unit"],
},
},
},
]
messages = [
{"role": "user", "content": "Hi! How are you doing today?"},
{"role": "assistant", "content": "I'm doing well! How can I help you?"},
{
"role": "user",
"content": "Can you tell me what the current weather is in Berlin and the "
"forecast for the next 5 days, in fahrenheit?",
},
]
@pytest.fixture(scope="module")
def server():
args = [
# use half precision for speed and memory savings in CI environment
"--dtype",
"half",
"--enable-auto-tool-choice",
"--structured-outputs-config.backend",
"xgrammar",
"--tool-call-parser",
"hermes",
"--reasoning-parser",
"qwen3",
"--gpu-memory-utilization",
"0.4",
]
with RemoteOpenAIServer(MODEL_NAME, args) as remote_server:
yield remote_server
@pytest_asyncio.fixture
async def client(server):
async with server.get_async_client() as async_client:
yield async_client
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
@pytest.mark.parametrize("stream", [True, False])
@pytest.mark.parametrize(
"tool_choice",
[
"auto",
"required",
{"type": "function", "function": {"name": "get_current_weather"}},
],
)
@pytest.mark.parametrize("enable_thinking", [True, False])
async def test_function_tool_use(
client: openai.AsyncOpenAI,
model_name: str,
stream: bool,
tool_choice: str | dict,
enable_thinking: bool,
):
if not stream:
# Non-streaming test
chat_completion = await client.chat.completions.create(
messages=messages,
model=model_name,
tools=tools,
tool_choice=tool_choice,
extra_body={"chat_template_kwargs": {"enable_thinking": enable_thinking}},
)
if enable_thinking:
assert chat_completion.choices[0].message.reasoning is not None
assert chat_completion.choices[0].message.reasoning != ""
assert chat_completion.choices[0].message.tool_calls is not None
assert len(chat_completion.choices[0].message.tool_calls) > 0
else:
# Streaming test
output_stream = await client.chat.completions.create(
messages=messages,
model=model_name,
tools=tools,
tool_choice=tool_choice,
stream=True,
extra_body={"chat_template_kwargs": {"enable_thinking": enable_thinking}},
)
output = []
reasoning = []
async for chunk in output_stream:
if chunk.choices:
if enable_thinking and getattr(
chunk.choices[0].delta, "reasoning", None
):
reasoning.append(chunk.choices[0].delta.reasoning)
if chunk.choices[0].delta.tool_calls:
output.extend(chunk.choices[0].delta.tool_calls)
assert len(output) > 0
if enable_thinking:
assert len(reasoning) > 0
@pytest.fixture(scope="module")
def k2_server():
args = [
# use half precision for speed and memory savings in CI environment
"--dtype",
"half",
"--enable-auto-tool-choice",
"--structured-outputs-config.backend",
"xgrammar",
"--tool-call-parser",
"hermes",
"--reasoning-parser",
"qwen3",
"--gpu-memory-utilization",
"0.4",
]
# hack to test kimi_k2 tool use tool_id format.
# avoid error in is_deepseek_mla check by setting kv_lora_rank=null
with RemoteOpenAIServer(
MODEL_NAME,
args,
override_hf_configs={"model_type": "kimi_k2", "kv_lora_rank": None},
) as remote_server:
yield remote_server
@pytest_asyncio.fixture
async def k2_client(k2_server):
async with k2_server.get_async_client() as async_client:
yield async_client
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
@pytest.mark.parametrize("stream", [True, False])
@pytest.mark.parametrize("tool_choice", ["required"])
async def test_tool_id_kimi_k2(
k2_client: openai.AsyncOpenAI, model_name: str, stream: bool, tool_choice: str
):
if not stream:
# Non-streaming test
chat_completion = await k2_client.chat.completions.create(
messages=messages, model=model_name, tools=tools, tool_choice=tool_choice
)
assert chat_completion.choices[0].message.tool_calls is not None
assert len(chat_completion.choices[0].message.tool_calls) > 0
assert chat_completion.choices[0].message.tool_calls[0].id in [
"functions.get_current_weather:0",
"functions.get_forecast:1",
]
else:
# Streaming test
output_stream = await k2_client.chat.completions.create(
messages=messages,
model=model_name,
tools=tools,
tool_choice=tool_choice,
stream=True,
)
output = []
async for chunk in output_stream:
if chunk.choices and chunk.choices[0].delta.tool_calls:
output.extend(chunk.choices[0].delta.tool_calls)
for o in output:
assert o.id is None or o.id in [
"functions.get_current_weather:0",
"functions.get_forecast:1",
]
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
@pytest.mark.parametrize("arguments", ["{}", ""])
async def test_no_args_tool_call(
client: openai.AsyncOpenAI, model_name: str, arguments: str
):
# Step 1: Define a tool that requires no parameters
tools = [
{
"type": "function",
"function": {
"name": "get_current_time",
"description": "Get the current date and time. No parameters needed.",
"parameters": {
"type": "object",
"properties": {}, # No parameters
"required": [], # No required fields
},
},
}
]
messages = [{"role": "user", "content": "What time is it now?"}]
# Step 2: Send user message and let model decide whether to call the tool
response = await client.chat.completions.create(
model=model_name,
messages=messages,
tools=tools,
tool_choice="auto", # Let model choose automatically
)
# Step 3: Check if model wants to call a tool
message = response.choices[0].message
if message.tool_calls:
# Get the first tool call
tool_call = message.tool_calls[0]
tool_name = tool_call.function.name
# Step 4: Execute the tool locally (no parameters)
if tool_name == "get_current_time":
# Test both empty string and "{}" for no-arg tool calls
tool_call.function.arguments = arguments
messages.append(message)
current_time = datetime.datetime.now()
result = current_time.isoformat()
messages.append(
{
"role": "tool",
"tool_call_id": tool_call.id,
"content": result,
}
)
# Step 5: Send tool result back to model to continue conversation
final_response = await client.chat.completions.create(
model=model_name,
messages=messages,
)
# Output final natural language response
assert final_response.choices[0].message.content is not None
else:
# No tool called — just print model's direct reply
assert message.content is not None
@pytest.mark.asyncio
async def test_named_tool_use(
client: openai.AsyncOpenAI,
sample_json_schema,
):
messages = [
{"role": "system", "content": "you are a helpful assistant"},
{
"role": "user",
"content": (
"Give an example JSON for an employee profile using the specified tool."
),
},
]
tools = [
{
"type": "function",
"function": {
"name": "dummy_function_name",
"description": "This is a dummy function",
"parameters": sample_json_schema,
},
}
]
tool_choice = {"type": "function", "function": {"name": "dummy_function_name"}}
# non-streaming
chat_completion = await client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
max_completion_tokens=1000,
tools=tools,
temperature=0.0,
tool_choice=tool_choice,
)
message = chat_completion.choices[0].message
assert len(message.content) == 0
json_string = message.tool_calls[0].function.arguments
json1 = json.loads(json_string)
jsonschema.validate(instance=json1, schema=sample_json_schema)
messages.append({"role": "assistant", "content": json_string})
messages.append(
{"role": "user", "content": "Give me another one with a different name and age"}
)
# streaming
stream = await client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
max_completion_tokens=1000,
tools=tools,
tool_choice=tool_choice,
temperature=0.0,
stream=True,
)
output = []
finish_reason_count = 0
async for chunk in stream:
delta = chunk.choices[0].delta
if delta.role:
assert delta.role == "assistant"
assert delta.content is None or len(delta.content) == 0
if delta.tool_calls:
output.append(delta.tool_calls[0].function.arguments)
if chunk.choices[0].finish_reason is not None:
finish_reason_count += 1
# finish reason should only return in last block
assert finish_reason_count == 1
json2 = json.loads("".join(output))
jsonschema.validate(instance=json2, schema=sample_json_schema)
assert json1["name"] != json2["name"]
assert json1["age"] != json2["age"]
@pytest.mark.asyncio
async def test_inconsistent_tool_choice_and_tools(
client: openai.AsyncOpenAI, sample_json_schema
):
messages = [
{"role": "system", "content": "you are a helpful assistant"},
{
"role": "user",
"content": f"Give an example JSON for an employee profile that "
f"fits this schema: {sample_json_schema}",
},
]
with pytest.raises(openai.BadRequestError):
await client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
max_completion_tokens=1000,
tool_choice={
"type": "function",
"function": {"name": "dummy_function_name"},
},
)
with pytest.raises(openai.BadRequestError):
await client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
max_completion_tokens=1000,
tools=[
{
"type": "function",
"function": {
"name": "dummy_function_name",
"description": "This is a dummy function",
"parameters": sample_json_schema,
},
}
],
tool_choice={
"type": "function",
"function": {"name": "nondefined_function_name"},
},
)
with pytest.raises(openai.BadRequestError):
await client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
max_completion_tokens=1000,
tools=[
{
"type": "function",
"function": {
"name": "dummy_function_name",
"description": "This is a dummy function",
"parameters": sample_json_schema,
},
}
],
tool_choice={},
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/entrypoints/openai/test_completion_with_function_calling.py",
"license": "Apache License 2.0",
"lines": 448,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:examples/offline_inference/qwen_1m.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import os
from urllib.request import urlopen
from vllm import LLM, SamplingParams
os.environ["VLLM_ALLOW_LONG_MAX_MODEL_LEN"] = "1"
def load_prompt() -> str:
# Test cases with various lengths can be found at:
#
# https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2.5-1M/test-data/64k.txt
# https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2.5-1M/test-data/200k.txt
# https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2.5-1M/test-data/600k.txt
# https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2.5-1M/test-data/1m.txt
with urlopen(
"https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2.5-1M/test-data/600k.txt",
timeout=5,
) as response:
prompt = response.read().decode("utf-8")
return prompt
# Processing the prompt.
def process_requests(llm: LLM, prompts: list[str]) -> None:
# Create a sampling params object.
sampling_params = SamplingParams(
temperature=0.7,
top_p=0.8,
top_k=20,
repetition_penalty=1.05,
detokenize=True,
max_tokens=256,
)
# Generate texts from the prompts.
outputs = llm.generate(prompts, sampling_params)
# Print the outputs.
for output in outputs:
prompt_token_ids = output.prompt_token_ids
generated_text = output.outputs[0].text
print(
f"Prompt length: {len(prompt_token_ids)}, "
f"Generated text: {generated_text!r}"
)
# Create an LLM.
def initialize_engine() -> LLM:
llm = LLM(
model="Qwen/Qwen2.5-7B-Instruct-1M",
max_model_len=1048576,
tensor_parallel_size=4,
enforce_eager=True,
enable_chunked_prefill=True,
max_num_batched_tokens=131072,
)
return llm
def main():
llm = initialize_engine()
prompt = load_prompt()
process_requests(llm, [prompt])
if __name__ == "__main__":
main()
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/offline_inference/qwen_1m.py",
"license": "Apache License 2.0",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/mimo.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# Adapted from
# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/qwen2/modeling_qwen2.py
# Copyright 2025 Xiaomi Corporation.
# Copyright 2024 The Qwen team.
# Copyright 2023 The vLLM team.
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inference-only MiMo model compatible with HuggingFace weights."""
from collections.abc import Iterable
from itertools import islice
import torch
import torch.nn as nn
from vllm.compilation.decorators import support_torch_compile
from vllm.config import VllmConfig
from vllm.distributed import get_pp_group
from vllm.logger import init_logger
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.vocab_parallel_embedding import ParallelLMHead
from vllm.model_executor.model_loader.weight_utils import (
default_weight_loader,
maybe_remap_kv_scale_name,
)
from vllm.model_executor.models.qwen2 import Qwen2ForCausalLM, Qwen2Model
from vllm.sequence import IntermediateTensors
from .utils import PPMissingLayer, is_pp_missing_parameter, maybe_prefix
logger = init_logger(__name__)
@support_torch_compile(
dynamic_arg_dims={
"input_ids": 0,
"positions": -1,
"intermediate_tensors": 0,
"inputs_embeds": 0,
}
)
class MiMoModel(Qwen2Model):
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor | IntermediateTensors:
if get_pp_group().is_first_rank:
if inputs_embeds is not None:
hidden_states = inputs_embeds
else:
hidden_states = self.embed_input_ids(input_ids)
residual = None
else:
assert intermediate_tensors is not None
hidden_states = intermediate_tensors["hidden_states"]
residual = intermediate_tensors["residual"]
for layer in islice(self.layers, self.start_layer, self.end_layer):
hidden_states, residual = layer(
positions,
hidden_states,
residual,
)
if not get_pp_group().is_last_rank:
return IntermediateTensors(
{"hidden_states": hidden_states, "residual": residual}
)
hidden_states = hidden_states + residual
return hidden_states
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
stacked_params_mapping = [
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
params_dict = dict(self.named_parameters(remove_duplicate=False))
loaded_params: set[str] = set()
for name, loaded_weight in weights:
if "mtp_layers" in name:
continue
if "rotary_emb.inv_freq" in name:
continue
if self.quant_config is not None and (
scale_name := self.quant_config.get_cache_scale(name)
):
# Loading kv cache quantization scales
param = params_dict[scale_name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
loaded_weight = (
loaded_weight if loaded_weight.dim() == 0 else loaded_weight[0]
)
weight_loader(param, loaded_weight)
loaded_params.add(scale_name)
continue
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
if is_pp_missing_parameter(name, self):
continue
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
break
else:
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
# Remapping the name of FP8 kv-scale.
name = maybe_remap_kv_scale_name(name, params_dict)
if name is None:
continue
if is_pp_missing_parameter(name, self):
continue
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
loaded_params.add(name)
return loaded_params
class MiMoForCausalLM(Qwen2ForCausalLM, nn.Module):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
nn.Module.__init__(self)
config = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
self.config = config
self.quant_config = quant_config
self.model = MiMoModel(
vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")
)
if get_pp_group().is_last_rank:
if config.tie_word_embeddings:
self.lm_head = self.model.embed_tokens
else:
self.lm_head = ParallelLMHead(
config.vocab_size,
config.hidden_size,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "lm_head"),
)
else:
self.lm_head = PPMissingLayer()
self.logits_processor = LogitsProcessor(config.vocab_size)
self.make_empty_intermediate_tensors = (
self.model.make_empty_intermediate_tensors
)
def compute_logits(
self,
hidden_states: torch.Tensor,
) -> torch.Tensor | None:
hidden_states = self.model.norm(hidden_states)
logits = self.logits_processor(self.lm_head, hidden_states)
return logits
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/mimo.py",
"license": "Apache License 2.0",
"lines": 170,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/mimo_mtp.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# Adapted from
# https://github.com/vllm-project/vllm/blob/v0.7.3/vllm/model_executor/models/deepseek_mtp.py
# Copyright 2025 Xiaomi Corporation.
# Copyright 2023 The vLLM team.
# Copyright 2024 DeepSeek-AI team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inference-only MiMo-MTP model."""
from collections.abc import Iterable
import torch
import torch.nn as nn
from transformers import PretrainedConfig
from vllm.config import CacheConfig, ModelConfig, VllmConfig
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.vocab_parallel_embedding import (
ParallelLMHead,
VocabParallelEmbedding,
)
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
from vllm.model_executor.models.qwen2 import Qwen2DecoderLayer
from vllm.sequence import IntermediateTensors
from .utils import maybe_prefix
class MiMoMultiTokenPredictorLayer(nn.Module):
def __init__(
self,
config: PretrainedConfig,
prefix: str,
model_config: ModelConfig,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
) -> None:
super().__init__()
self.token_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.hidden_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.input_proj = nn.Linear(
config.hidden_size * 2, config.hidden_size, bias=False
)
self.mtp_block = Qwen2DecoderLayer(
config=config,
cache_config=cache_config,
quant_config=quant_config,
prefix=prefix,
)
self.final_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
inputs_embeds: torch.Tensor,
positions: torch.Tensor,
previous_hidden_states: torch.Tensor,
spec_step_index: int = 0,
) -> torch.Tensor:
assert inputs_embeds is not None
# masking inputs at position 0, as not needed by MTP
inputs_embeds[positions == 0] = 0
inputs_embeds = self.token_layernorm(inputs_embeds)
previous_hidden_states = self.hidden_layernorm(previous_hidden_states)
hidden_states = self.input_proj(
torch.cat([previous_hidden_states, inputs_embeds], dim=-1)
)
hidden_states, residual = self.mtp_block(
positions=positions, hidden_states=hidden_states, residual=None
)
hidden_states = residual + hidden_states
return self.final_layernorm(hidden_states)
class MiMoMultiTokenPredictor(nn.Module):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
self.mtp_start_layer_idx = config.num_hidden_layers
self.num_mtp_layers = config.num_nextn_predict_layers
self.embed_tokens = VocabParallelEmbedding(
config.vocab_size,
config.hidden_size,
)
self.mtp_layers = torch.nn.ModuleDict(
{
str(idx): MiMoMultiTokenPredictorLayer(
config,
f"{prefix}.layers.{idx}",
model_config=vllm_config.model_config,
cache_config=vllm_config.cache_config,
quant_config=vllm_config.quant_config,
)
for idx in range(
self.mtp_start_layer_idx,
self.mtp_start_layer_idx + self.num_mtp_layers,
)
}
)
self.logits_processor = LogitsProcessor(config.vocab_size)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.embed_tokens(input_ids)
def forward(
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
previous_hidden_states: torch.Tensor,
inputs_embeds: torch.Tensor | None = None,
spec_step_idx: int = 0,
) -> torch.Tensor:
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
return self.mtp_layers[str(self.mtp_start_layer_idx + spec_step_idx)](
inputs_embeds,
positions,
previous_hidden_states,
spec_step_idx,
)
def compute_logits(
self,
hidden_states: torch.Tensor,
lm_head: ParallelLMHead,
spec_step_idx: int = 0,
) -> torch.Tensor:
self.mtp_layers[str(self.mtp_start_layer_idx + spec_step_idx)]
logits = self.logits_processor(lm_head, hidden_states)
return logits
class MiMoMTP(nn.Module):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
self.config = vllm_config.model_config.hf_config
self.model = MiMoMultiTokenPredictor(
vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")
)
self.lm_head = ParallelLMHead(
self.config.vocab_size,
self.config.hidden_size,
prefix=maybe_prefix(prefix, "lm_head"),
)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.model.embed_input_ids(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
hidden_states: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
spec_step_idx: int = 0,
) -> torch.Tensor:
assert spec_step_idx == 0, "mimo_mtp only support predict one token now"
hidden_states = self.model(
input_ids, positions, hidden_states, inputs_embeds, spec_step_idx
)
return hidden_states
def compute_logits(
self,
hidden_states: torch.Tensor,
spec_step_idx: int = 0,
) -> torch.Tensor | None:
return self.model.compute_logits(hidden_states, self.lm_head, spec_step_idx)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
stacked_params_mapping = [
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
params_dict = dict(self.named_parameters())
loaded_params: set[str] = set()
for name, loaded_weight in weights:
if "rotary_emb.inv_freq" in name:
continue
name = self.map_model_name_to_mtp_param_name(name)
for param_name, weight_name, shard_id in stacked_params_mapping:
# Skip non-stacked layers and experts (experts handled below).
if weight_name not in name:
continue
if "mtp_layers" not in name:
break
# We have mlp.experts[0].gate_proj in the checkpoint.
# Since we handle the experts below in expert_params_mapping,
# we need to skip here BEFORE we update the name, otherwise
# name will be updated to mlp.experts[0].gate_up_proj, which
# will then be updated below in expert_params_mapping
# for mlp.experts[0].gate_gate_up_proj, which breaks load.
if ("mlp.experts." in name) and name not in params_dict:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
break
else:
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
if "mtp_layers" not in name and (
"embed_tokens" not in name and "lm_head" not in name
):
continue
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
loaded_params.add(name)
return loaded_params
def map_model_name_to_mtp_param_name(self, name: str) -> str:
import regex as re
# append mtp_start_layer_idx
pattern = r"(model\.mtp_layers\.)(\d+)(\.)"
match = re.match(pattern, name)
if match:
original_num = int(match.group(2))
new_num = original_num + self.config.num_hidden_layers
name = name.replace(match.group(), f"{match.group(1)}{new_num}.")
# check for early turn
name_without_prefix = [
"token_layernorm",
"hidden_layernorm",
"input_proj",
"final_layernorm",
]
for sub_name in name_without_prefix:
if sub_name in name:
return name
# add mtp_block
pattern = r"(model\.mtp_layers\.\d+\.)"
match = re.match(pattern, name)
if match:
name = name.replace(match.group(), match.group() + "mtp_block.")
return name
def _rewrite_spec_layer_name(self, spec_layer: int, name: str) -> str:
"""
Rewrite the weight name to match the format of the original model.
Add .mtp_block for modules in transformer layer block for spec layer
"""
spec_layer_weight_names = [
"embed_tokens",
"enorm",
"hnorm",
"eh_proj",
"shared_head",
]
spec_layer_weight = False
for weight_name in spec_layer_weight_names:
if weight_name in name:
spec_layer_weight = True
break
if not spec_layer_weight:
# treat rest weights as weights for transformer layer block
name = name.replace(
f"model.layers.{spec_layer}.", f"model.layers.{spec_layer}.mtp_block."
)
return name
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/mimo_mtp.py",
"license": "Apache License 2.0",
"lines": 261,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/v1/spec_decode/test_eagle.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from unittest import mock
import pytest
import torch
from tests.utils import get_attn_backend_list_based_on_platform
from tests.v1.attention.utils import (
BatchSpec,
create_common_attn_metadata,
create_standard_kv_cache_spec,
try_get_attention_backend,
)
from vllm.config import (
AttentionConfig,
CacheConfig,
DeviceConfig,
ModelConfig,
ParallelConfig,
SchedulerConfig,
SpeculativeConfig,
VllmConfig,
)
from vllm.config.load import LoadConfig
from vllm.model_executor.models.llama import LlamaForCausalLM
from vllm.platforms import current_platform
from vllm.v1.attention.backends.registry import AttentionBackendEnum
from vllm.v1.spec_decode.draft_model import DraftModelProposer
from vllm.v1.spec_decode.eagle import EagleProposer
from vllm.v1.spec_decode.metadata import SpecDecodeMetadata
from vllm.v1.worker.gpu_input_batch import CachedRequestState, InputBatch
model_dir = "meta-llama/Llama-3.1-8B-Instruct"
eagle_dir = "yuhuili/EAGLE-LLaMA3.1-Instruct-8B"
eagle3_dir = "yuhuili/EAGLE3-LLaMA3.1-Instruct-8B"
ar_draft_model_dir = "amd/PARD-Llama-3.2-1B" # Compatible with parallel and AR drafting
def _create_proposer(
method: str,
num_speculative_tokens: int,
attention_backend: str | None = None,
speculative_token_tree: list[tuple[int, ...]] | None = None,
parallel_drafting: bool = False,
) -> EagleProposer:
model_config = ModelConfig(model=model_dir, runner="generate", max_model_len=100)
# Method-dependent setup
if method == "eagle":
draft_model_dir = eagle_dir
elif method == "eagle3":
draft_model_dir = eagle3_dir
elif method == "draft_model":
draft_model_dir = ar_draft_model_dir
else:
raise ValueError(f"Unknown method: {method}")
spec_token_tree_str = None
if speculative_token_tree is not None:
assert num_speculative_tokens == len(speculative_token_tree)
spec_token_tree_str = str(speculative_token_tree)
speculative_config = SpeculativeConfig(
target_model_config=model_config,
target_parallel_config=ParallelConfig(),
model=draft_model_dir,
method=method,
num_speculative_tokens=num_speculative_tokens,
speculative_token_tree=spec_token_tree_str,
parallel_drafting=parallel_drafting,
)
if parallel_drafting:
# Overwrite pard_token to avoid crash during init
speculative_config.draft_model_config.hf_config.pard_token = 0
device = current_platform.device_type
vllm_config = VllmConfig(
model_config=model_config,
cache_config=CacheConfig(),
speculative_config=speculative_config,
device_config=DeviceConfig(device=device),
parallel_config=ParallelConfig(),
load_config=LoadConfig(),
scheduler_config=SchedulerConfig(
max_model_len=model_config.max_model_len,
is_encoder_decoder=model_config.is_encoder_decoder,
),
attention_config=AttentionConfig(backend=attention_backend),
)
if "eagle" in method:
return EagleProposer(vllm_config=vllm_config, device=device)
else:
return DraftModelProposer(vllm_config=vllm_config, device=device)
def test_prepare_next_token_ids():
"""
Test for prepare_next_token_ids_cpu and prepare_next_token_ids_padded.
Each will produce a device tensor of next_token_ids, taking as input
either the GPU tensor of sampled_token_ids with -1 for rejected tokens,
or the CPU python list[list[int]] with the rejected tokens removed.
"""
device = torch.device(current_platform.device_type)
num_requests = 4
num_speculative_tokens = 4
batch_spec = BatchSpec(
seq_lens=[num_speculative_tokens + 1] * num_requests,
query_lens=[num_speculative_tokens + 1] * num_requests,
)
req_ids = [f"req_{i + 1}" for i in range(num_requests)]
mock_input_batch = mock.MagicMock(spec=InputBatch)
mock_input_batch.req_ids = req_ids
mock_input_batch.num_reqs = num_requests
mock_input_batch.vocab_size = 100
mock_num_scheduled_tokens = {req_id: 0 for req_id in req_ids}
mock_requests = {}
for req_id in req_ids:
mock_request = mock.MagicMock(spec=CachedRequestState)
# Each request will have a backup next token id of 10, 20, 30, 40
mock_request.get_token_id.return_value = int(req_id.split("_")[1]) * 10
mock_request.num_computed_tokens = 0
mock_requests[req_id] = mock_request
# explicitly discard the last request
discarded_req_mask = torch.tensor(
[False, False, False, True], dtype=torch.bool, device=device
)
sampled_token_ids = [
[0, 1, -1, -1, -1], # 1 accepted, 3 rejected, "1" sampled
[0, 1, 2, 3, 4], # all accepted, "4" sampled
[-1, -1, -1, -1, -1], # sampling skipped, use backup token "30"
[0, 1, 2, -1, -1], # explicitly discarded, sampling should be ignored
]
sampled_token_ids_tensor = torch.tensor(
sampled_token_ids, dtype=torch.int32, device=device
)
sampled_token_ids_cpu = [[i for i in seq if i != -1] for seq in sampled_token_ids]
for i in range(len(sampled_token_ids_cpu)):
if discarded_req_mask[i]:
sampled_token_ids_cpu[i] = []
expected_next_token_ids_cpu = [1, 4, 30, 40]
expected_next_token_ids_tensor = torch.tensor(
expected_next_token_ids_cpu, dtype=torch.int32, device=device
)
proposer = _create_proposer("eagle", num_speculative_tokens)
next_token_ids_from_cpu = proposer.prepare_next_token_ids_cpu(
sampled_token_ids_cpu,
mock_requests,
mock_input_batch,
mock_num_scheduled_tokens,
)
assert torch.equal(next_token_ids_from_cpu, expected_next_token_ids_tensor)
common_attn_metadata = create_common_attn_metadata(
batch_spec,
block_size=16,
device=device,
)
expected_valid_sampled_tokens_count = torch.tensor(
[2, 5, 0, 0], dtype=torch.int32, device=device
)
next_token_ids_from_padded, valid_sampled_tokens_count = (
proposer.prepare_next_token_ids_padded(
common_attn_metadata,
sampled_token_ids_tensor,
mock_requests,
mock_input_batch,
discarded_req_mask,
)
)
assert torch.equal(next_token_ids_from_padded, expected_next_token_ids_tensor)
assert torch.equal(valid_sampled_tokens_count, expected_valid_sampled_tokens_count)
def test_prepare_inputs():
"""
cu_target_query_lens: [0, a, a + b, a + b + c]
num_rejected_tokens: [n1, n2, n3]
num_tokens_per_req: [a - n1, b - n2, c - n3]
cu_num_tokens: [0, a - n1, a + b - n1 - n2, a + b + c - n1 - n2 - n3]
token_indices: [0, 1, ..., a - n1 - 1,
a, a + 1, ..., a + b - n2 - 1,
a + b, a + b + 1, ..., a + b + c - n3 - 1]
"""
device = torch.device(current_platform.device_type)
# q1 = 4, q2 = 7, q3 = 5
# n1 = 1, n2 = 3, n3 = 2
batch_spec = BatchSpec(
seq_lens=[4, 7, 5],
query_lens=[4, 7, 5],
)
common_attn_metadata = create_common_attn_metadata(
batch_spec,
block_size=16,
device=device,
)
# If there are `k` sampled tokens, then `k-1` tokens are draft tokens
# from the previous iteration, and the last token is the bonus token sampled
# from the base model.
num_draft_tokens = [3, 6, 4] # one less than query_lens
# num rejected tokens is [1, 3, 2]
ACCEPT_TOKEN = 0
BONUS_TOKEN = 1
REJECT_TOKEN = -1
sampled_token_ids = [
[ACCEPT_TOKEN, ACCEPT_TOKEN, REJECT_TOKEN, BONUS_TOKEN],
[
ACCEPT_TOKEN,
ACCEPT_TOKEN,
ACCEPT_TOKEN,
REJECT_TOKEN,
REJECT_TOKEN,
REJECT_TOKEN,
BONUS_TOKEN,
],
[ACCEPT_TOKEN, ACCEPT_TOKEN, REJECT_TOKEN, REJECT_TOKEN, BONUS_TOKEN],
]
sampled_token_ids = [
[i for i in seq if i != REJECT_TOKEN] for seq in sampled_token_ids
]
# Expected calculations:
# query_len_per_req = [4, 7, 5]
# num_tokens_per_req = [3, 4, 3] (after subtracting rejected tokens)
# Expected cumulative counts: [0, 3, 7, 10]
expected_cu_num_tokens = torch.tensor(
[0, 3, 7, 10], dtype=torch.int32, device=device
)
# Expected token indices (mapped from original positions):
# First request: indices 0, 1, 2 (keeping first 3 from positions 0-3)
# Second request: indices 4, 5, 6, 7 (keeping first 4 from positions 4-10)
# Third request: indices 11, 12, 13 (keeping first 3 from positions 11-15)
expected_token_indices = torch.tensor(
[
0,
1,
2, # First request: 3 tokens (4-1)
4,
5,
6,
7, # Second request: 4 tokens (7-3)
11,
12,
13, # Third request: 3 tokens (5-2)
],
dtype=torch.int32,
device=device,
)
proposer = _create_proposer("eagle", 1)
updated_metadata, token_indices = proposer.prepare_inputs(
common_attn_metadata, sampled_token_ids, num_draft_tokens
)
assert torch.equal(updated_metadata.query_start_loc, expected_cu_num_tokens)
assert token_indices.shape[0] == expected_cu_num_tokens[-1].item()
assert torch.equal(token_indices, expected_token_indices)
def test_prepare_inputs_padded():
"""
Input scenario is 3 requests with num_speculative_tokens == 2 and:
- Request 1: query_len = 3, rejected = 1
- Request 2: query_len = 3, rejected = 0
- Request 3: query_len = 3, rejected = 2
Expected outputs:
token_indices_to_sample: [1, 5, 6]
Reason: After accounting for rejections, these are the valid token positions
from the original indices to sample from.
"""
device = torch.device(current_platform.device_type)
expected_token_indices_to_sample = torch.tensor(
[1, 5, 6], dtype=torch.int32, device=device
)
num_speculative_tokens = 2
batch_spec = BatchSpec(
seq_lens=[3, 3, 3],
query_lens=[3, 3, 3],
)
common_attn_metadata = create_common_attn_metadata(
batch_spec,
block_size=16,
device=device,
)
# Needed for cu_num_draft_tokens, which is expected to be [3, 6, 9]
expected_query_start_loc = torch.tensor(
[0, 3, 6, 9], dtype=torch.int32, device=device
)
spec_decode_metadata = SpecDecodeMetadata.make_dummy(
draft_token_ids=[[0] * num_speculative_tokens] * 3,
device=device,
)
# num_rejected_tokens = [1, 0, 2]
# num_draft_tokens = [2, 2, 2]
# valid_sampled_tokens_count = num_draft_tokens + 1 - num_rejected_tokens
valid_sampled_tokens_count = torch.tensor(
[2, 3, 1], dtype=torch.int32, device=device
)
proposer = _create_proposer("eagle", num_speculative_tokens)
output_metadata, token_indices_to_sample, num_rejected_tokens_gpu = (
proposer.prepare_inputs_padded(
common_attn_metadata, spec_decode_metadata, valid_sampled_tokens_count
)
)
# Verify num_rejected_tokens_gpu is calculated correctly
expected_num_rejected = torch.tensor([1, 0, 2], dtype=torch.int32, device=device)
assert torch.equal(num_rejected_tokens_gpu, expected_num_rejected)
assert output_metadata.max_query_len == 3
assert torch.equal(output_metadata.query_start_loc, expected_query_start_loc)
assert torch.equal(token_indices_to_sample, expected_token_indices_to_sample)
def test_set_inputs_first_pass_default_eagle():
"""
Test for set_inputs_first_pass without extra input slots (default EAGLE).
This tests the path where needs_extra_input_slots=False, which is the
default EAGLE pathway. In this case:
- Input IDs are rotated (shifted by one)
- The next_token_ids are inserted at the last position of each request
- Positions are copied as-is
- Hidden states are copied as-is
- The CommonAttentionMetadata is returned unchanged
Setup:
- 3 requests with query_lens [3, 2, 4]
- Tokens: [a1, a2, a3, b1, b2, c1, c2, c3, c4]
- After rotation: [a2, a3, -, b2, -, c2, c3, c4, -]
- After inserting next_tokens [100, 200, 300]:
[a2, a3, 100, b2, 200, c2, c3, c4, 300]
"""
device = torch.device(current_platform.device_type)
num_speculative_tokens = 3
proposer = _create_proposer("eagle", num_speculative_tokens)
# Setup batch with 3 requests
batch_spec = BatchSpec(
seq_lens=[10, 8, 12], # Arbitrary context lengths
query_lens=[3, 2, 4],
)
common_attn_metadata = create_common_attn_metadata(
batch_spec,
block_size=16,
device=device,
)
# Input tensors
# Request 0: tokens [10, 11, 12] at positions [7, 8, 9]
# Request 1: tokens [20, 21] at positions [6, 7]
# Request 2: tokens [30, 31, 32, 33] at positions [8, 9, 10, 11]
target_token_ids = torch.tensor(
[10, 11, 12, 20, 21, 30, 31, 32, 33], dtype=torch.int32, device=device
)
target_positions = torch.tensor(
[7, 8, 9, 6, 7, 8, 9, 10, 11], dtype=torch.int64, device=device
)
target_hidden_states = torch.randn(
9, proposer.hidden_size, dtype=proposer.dtype, device=device
)
next_token_ids = torch.tensor([100, 200, 300], dtype=torch.int32, device=device)
num_tokens, token_indices_to_sample, output_cad = proposer.set_inputs_first_pass(
target_token_ids=target_token_ids,
next_token_ids=next_token_ids,
target_positions=target_positions,
target_hidden_states=target_hidden_states,
token_indices_to_sample=None,
cad=common_attn_metadata,
num_rejected_tokens_gpu=None,
)
assert num_tokens == 9 # Total tokens unchanged
expected_token_indices_to_sample = torch.tensor(
[2, 4, 8], dtype=torch.int32, device=device
)
assert torch.equal(token_indices_to_sample, expected_token_indices_to_sample)
assert output_cad is common_attn_metadata
# Verify input_ids are rotated and next_tokens inserted
# Original: [10, 11, 12, 20, 21, 30, 31, 32, 33]
# After shift by 1: [11, 12, 12, 21, 21, 31, 32, 33, 33]
# After inserting at last indices [2, 4, 8]: [11, 12, 100, 21, 200, 31, 32, 33, 300]
expected_input_ids = torch.tensor(
[11, 12, 100, 21, 200, 31, 32, 33, 300], dtype=torch.int32, device=device
)
assert torch.equal(proposer.input_ids[:num_tokens], expected_input_ids)
# Verify positions are copied as-is
assert torch.equal(proposer.positions[:num_tokens], target_positions)
# Verify hidden states are copied as-is
assert torch.equal(proposer.hidden_states[:num_tokens], target_hidden_states)
def test_set_inputs_first_pass_draft_model():
"""
Test for set_inputs_first_pass with a draft model (extra input slots,
no shift).
This tests the path where needs_extra_input_slots=True and
shift_input_ids=False (draft model case). In this case:
- Input IDs are NOT shifted
- Each request gets extra_slots_per_request (1) new slots
- The kernel handles copying tokens and inserting bonus/padding tokens
- A new CommonAttentionMetadata is returned with updated query_start_loc
Setup:
- 2 requests
- Request 0: tokens [10, 11, 12] at positions [0, 1, 2]
- Only tokens [10, 11] are "valid" (query_end_loc=1),
token 12 is a rejected token from previous speculation
- Request 1: tokens [20, 21] at positions [0, 1], both valid.
- Note: this is less than num_speculative_tokens (2) to ensure
we handle variable lengths correctly.
- next_token_ids: [100, 200] (bonus tokens)
With extra_slots_per_request=1 and shift=False:
Expected output layout:
Request 0 (indices 0-3):
- idx 0: token 10, pos 0
- idx 1: token 11, pos 1
- idx 2: token 100, pos 2 (bonus token)
- idx 3: padding_token_id, is_rejected=True
Request 1 (indices 4-6):
- idx 4: token 20, pos 0
- idx 5: token 21, pos 1
- idx 6: token 200, pos 2 (bonus token)
"""
device = torch.device(current_platform.device_type)
num_speculative_tokens = 2
block_size = 16
# Create a proposer configured as a draft model (pass_hidden_states=False)
# We need to mock this since _create_proposer defaults to EAGLE
proposer = _create_proposer("draft_model", num_speculative_tokens)
proposer.parallel_drafting_token_id = 0
proposer.is_rejected_token_mask = torch.zeros(
proposer.max_num_tokens, dtype=torch.bool, device=device
)
proposer.is_masked_token_mask = torch.zeros(
proposer.max_num_tokens, dtype=torch.bool, device=device
)
# Mock the attn_metadata_builder to avoid needing the full model setup
mock_kv_cache_spec = mock.MagicMock()
mock_kv_cache_spec.block_size = block_size
mock_builder = mock.MagicMock()
mock_builder.kv_cache_spec = mock_kv_cache_spec
proposer.attn_metadata_builder = mock_builder
# Request 0: query_len=3 (but 1 rejected), Request 1: query_len=2
batch_spec = BatchSpec(
seq_lens=[3, 2],
query_lens=[3, 2],
)
common_attn_metadata = create_common_attn_metadata(
batch_spec,
block_size=block_size,
device=device,
arange_block_indices=True, # Use predictable block indices
)
# Input tensors
target_token_ids = torch.tensor(
[10, 11, 12, 20, 21], dtype=torch.int32, device=device
)
target_positions = torch.tensor([0, 1, 2, 0, 1], dtype=torch.int64, device=device)
target_hidden_states = torch.randn(
5, proposer.hidden_size, dtype=proposer.dtype, device=device
)
next_token_ids = torch.tensor([100, 200], dtype=torch.int32, device=device)
num_rejected_tokens_gpu = torch.tensor([1, 0], dtype=torch.int32, device=device)
num_tokens, token_indices_to_sample, output_cad = proposer.set_inputs_first_pass(
target_token_ids=target_token_ids,
next_token_ids=next_token_ids,
target_positions=target_positions,
target_hidden_states=target_hidden_states,
token_indices_to_sample=None,
cad=common_attn_metadata,
num_rejected_tokens_gpu=num_rejected_tokens_gpu,
)
assert proposer.net_num_new_slots_per_request == 1
assert proposer.needs_extra_input_slots
# total_output_tokens = total_input_tokens + net_num_new_slots * batch_size
assert num_tokens == 7
# Request 0: [10, 11, 100, padding_token (0)]
# Request 1: [20, 21, 200]
# Combined: [10, 11, 100, 0, 20, 21, 200]
expected_input_ids = torch.tensor(
[10, 11, 100, 0, 20, 21, 200], dtype=torch.int32, device=device
)
assert torch.equal(proposer.input_ids[:num_tokens], expected_input_ids)
# Verify positions
# Request 0: [0, 1, 2, 0 (don't care)]
# Request 1: [0, 1, 2]
# Combined: [0, 1, 2, 0, 0, 1, 2]
expected_positions = torch.tensor(
[0, 1, 2, 0, 0, 1, 2], dtype=torch.int64, device=device
)
assert torch.equal(
proposer.positions[:num_tokens],
expected_positions,
)
# Verify rejection mask
expected_is_rejected = torch.zeros(7, dtype=torch.bool, device=device)
expected_is_rejected[3] = True # padding token at index 3
assert torch.equal(
proposer.is_rejected_token_mask[:num_tokens], expected_is_rejected
)
# Verify masked token mask (should all be False for non-parallel drafting)
expected_is_masked = torch.zeros(7, dtype=torch.bool, device=device)
assert torch.equal(proposer.is_masked_token_mask[:num_tokens], expected_is_masked)
# Verify token_indices_to_sample (bonus tokens at indices 2 and 6)
expected_token_indices_to_sample = torch.tensor(
[2, 6], dtype=torch.int32, device=device
)
assert torch.equal(token_indices_to_sample, expected_token_indices_to_sample)
# Verify the new CAD has updated query_start_loc
# Original: [0, 3, 5] -> New: [0, 4, 7] (each request gains 1 slot)
expected_query_start_loc = torch.tensor([0, 4, 7], dtype=torch.int32, device=device)
assert torch.equal(output_cad.query_start_loc, expected_query_start_loc)
def test_set_inputs_first_pass_parallel_drafting():
"""
Test for set_inputs_first_pass with parallel drafting (extra input slots,
with shift).
This tests the path where needs_extra_input_slots=True and
shift_input_ids=True (parallel drafting case). In this case:
- Input IDs ARE shifted (like default EAGLE)
- Each request gets extra_slots_per_request (3) new slots
- Parallel drafting tokens are inserted and marked as masked
- Hidden states are mapped correctly
Setup:
- 2 requests with query_lens [4, 4] (1 bonus + 3 spec tokens each)
- Request 0: tokens [10, 11, 12, 13] at positions [5, 6, 7, 8]
- Only tokens [10, 11, 12] are "valid", token 13 is rejected
- Request 1: tokens [20, 21, 22, 23] at positions [10, 11, 12, 13], all valid.
- next_token_ids: [100, 200] (bonus tokens)
With shift_input_ids=True, extra_slots_per_request=3:
Expected output layout:
Request 0 (6 output slots = 4 - 1 + 3):
- idx 0-2: shifted tokens [11, 12, 100]
- idx 3-4: parallel_drafting_tokens, is_masked=True
- idx 5: padding_token, is_rejected=True
Request 1 (6 output slots = 4 - 1 + 3):
- idx 6-8: shifted tokens [21, 22, 23]
- idx 9: bonus token 200
- idx 10-11: parallel_drafting_tokens, is_masked=True
"""
device = torch.device(current_platform.device_type)
num_speculative_tokens = 3
block_size = 16
proposer = _create_proposer("eagle", num_speculative_tokens, parallel_drafting=True)
# Override to simulate parallel drafting behavior
proposer.parallel_drafting_token_id = -2
proposer.parallel_drafting_hidden_state_tensor = torch.zeros(
proposer.hidden_size, dtype=proposer.dtype, device=device
)
proposer.is_rejected_token_mask = torch.zeros(
proposer.max_num_tokens, dtype=torch.bool, device=device
)
proposer.is_masked_token_mask = torch.zeros(
proposer.max_num_tokens, dtype=torch.bool, device=device
)
# Mock the attn_metadata_builder
mock_kv_cache_spec = mock.MagicMock()
mock_kv_cache_spec.block_size = block_size
mock_builder = mock.MagicMock()
mock_builder.kv_cache_spec = mock_kv_cache_spec
proposer.attn_metadata_builder = mock_builder
# Request 0: query_len=4 (1 rejected), Request 1: query_len=4 (all valid)
batch_spec = BatchSpec(
seq_lens=[9, 14],
query_lens=[4, 4],
)
common_attn_metadata = create_common_attn_metadata(
batch_spec,
block_size=block_size,
device=device,
arange_block_indices=True,
)
# Input tensors
target_token_ids = torch.tensor(
[10, 11, 12, 13, 20, 21, 22, 23], dtype=torch.int32, device=device
)
target_positions = torch.tensor(
[5, 6, 7, 8, 10, 11, 12, 13], dtype=torch.int64, device=device
)
target_hidden_states = torch.arange(
8 * proposer.hidden_size, dtype=proposer.dtype, device=device
).view(8, proposer.hidden_size)
next_token_ids = torch.tensor([100, 200], dtype=torch.int32, device=device)
num_rejected_tokens_gpu = torch.tensor([1, 0], dtype=torch.int32, device=device)
num_tokens, token_indices_to_sample, output_cad = proposer.set_inputs_first_pass(
target_token_ids=target_token_ids,
next_token_ids=next_token_ids,
target_positions=target_positions,
target_hidden_states=target_hidden_states,
token_indices_to_sample=None,
cad=common_attn_metadata,
num_rejected_tokens_gpu=num_rejected_tokens_gpu,
)
# total_output_tokens = total_input_tokens + net_num_new_slots * batch_size
# = 8 + 2 * 2 = 12
assert num_tokens == 12
# Request 0: [11, 12, 100, -2, -2, 0(padding)]
# Request 1: [21, 22, 23, 200, -2, -2]
expected_input_ids = torch.tensor(
[11, 12, 100, -2, -2, 0, 21, 22, 23, 200, -2, -2],
dtype=torch.int32,
device=device,
)
assert torch.equal(proposer.input_ids[:num_tokens], expected_input_ids)
# Verify positions
# Request 0: [5, 6, 7, 8, 9, 0 (don't care)]
# Request 1: [10, 11, 12, 13, 14, 15]
expected_positions = torch.tensor(
[5, 6, 7, 8, 9, 0, 10, 11, 12, 13, 14, 15], dtype=torch.int64, device=device
)
assert torch.equal(
proposer.positions[:num_tokens],
expected_positions,
)
# Verify rejection mask
expected_is_rejected = torch.zeros(12, dtype=torch.bool, device=device)
expected_is_rejected[5] = True
assert torch.equal(
proposer.is_rejected_token_mask[:num_tokens], expected_is_rejected
)
# Verify masked token mask (parallel drafting slots should be masked)
expected_is_masked = torch.zeros(12, dtype=torch.bool, device=device)
expected_is_masked[3] = True
expected_is_masked[4] = True
expected_is_masked[10] = True
expected_is_masked[11] = True
assert torch.equal(proposer.is_masked_token_mask[:num_tokens], expected_is_masked)
# Verify token_indices_to_sample (bonus + parallel drafting tokens)
# Request 0: bonus at 2, parallel at 3, 4
# Request 1: bonus at 9, parallel at 10, 11
expected_token_indices_to_sample = torch.tensor(
[2, 3, 4, 9, 10, 11], dtype=torch.int32, device=device
)
assert torch.equal(token_indices_to_sample, expected_token_indices_to_sample)
# Verify the new CAD has updated query_start_loc
# Original query_lens: [4, 4] -> Output: [6, 6]
expected_query_start_loc = torch.tensor(
[0, 6, 12], dtype=torch.int32, device=device
)
assert torch.equal(output_cad.query_start_loc, expected_query_start_loc)
# Verify masked positions have the parallel drafting hidden state (zeros)
parallel_drafting_hs = proposer.parallel_drafting_hidden_state_tensor
for i in range(num_tokens):
if expected_is_masked[i]:
assert torch.equal(proposer.hidden_states[i], parallel_drafting_hs), (
f"Masked position {i} should have parallel drafting hidden state"
)
@pytest.mark.parametrize("method", ["eagle", "eagle3"])
@pytest.mark.parametrize("attn_backend", get_attn_backend_list_based_on_platform())
@pytest.mark.parametrize("pp_size", [1, 2])
@pytest.mark.parametrize("use_distinct_embed_tokens", [True, False])
@pytest.mark.parametrize("use_distinct_lm_head", [True, False])
@mock.patch("vllm.v1.spec_decode.eagle.get_pp_group")
@mock.patch("vllm.v1.spec_decode.eagle.get_layers_from_vllm_config")
@mock.patch("vllm.v1.spec_decode.eagle.get_model")
def test_load_model(
mock_get_model,
mock_get_layers,
mock_get_pp_group,
method,
attn_backend,
pp_size,
use_distinct_embed_tokens,
use_distinct_lm_head,
monkeypatch,
):
if attn_backend == "TRITON_ATTN" and not current_platform.is_rocm():
pytest.skip(
"TRITON_ATTN does not support "
"multi-token eagle spec decode on current platform"
)
if attn_backend == "ROCM_AITER_FA" and current_platform.is_rocm():
monkeypatch.setenv("VLLM_ROCM_USE_AITER", "1")
# Setup draft model mock
mock_model = mock.MagicMock()
mock_model.model = mock.MagicMock()
mock_model.has_own_embed_tokens = use_distinct_embed_tokens
if use_distinct_embed_tokens:
mock_model.model.embed_tokens = mock.MagicMock()
mock_model.has_own_lm_head = use_distinct_lm_head
if use_distinct_lm_head:
mock_model.lm_head = mock.MagicMock()
mock_get_model.return_value = mock_model
# Setup mocks for attention layers
target_attn_layers = {
"target_attn_1": mock.MagicMock(),
"target_attn_2": mock.MagicMock(),
}
target_indx_layers: dict[str, mock.MagicMock] = {}
# Draft model has one extra attention layer compared to target model
all_attn_layers = {**target_attn_layers, "draft_extra_attn": mock.MagicMock()}
all_indx_layers: dict[str, mock.MagicMock] = {}
# Make mock_get_layers return different values for each call
mock_get_layers.side_effect = [
target_attn_layers,
target_indx_layers,
all_attn_layers,
all_indx_layers,
]
# Setup mock for pp group to return the appropriate value for world size
mock_pp_group = mock.MagicMock()
mock_pp_group.world_size = pp_size
mock_get_pp_group.return_value = mock_pp_group
# Set up the target model mock with a custom class so that
# isinstance() checks match the expected type.
class _TargetModelStub(LlamaForCausalLM):
model: mock.MagicMock
lm_head: mock.MagicMock
target_model = mock.create_autospec(_TargetModelStub, instance=True)
target_model.model = mock.MagicMock()
target_model.lm_head = mock.MagicMock()
target_model.model.embed_tokens = mock.MagicMock()
from vllm.model_executor.models import SupportsMultiModal
assert not isinstance(target_model, SupportsMultiModal)
# Create proposer using the helper function
proposer = _create_proposer(
method, num_speculative_tokens=8, attention_backend=attn_backend
)
# Call the method under test
proposer.load_model(target_model)
# Verify common interactions
mock_get_model.assert_called_once()
# Verify that the lm head is set correctly
if use_distinct_lm_head:
assert proposer.model.lm_head is not target_model.lm_head
else:
assert proposer.model.lm_head is target_model.lm_head
# Verify that the embed tokens are set correctly
# If pp_size is > 1, the embed tokens should be distinct
if pp_size > 1 or use_distinct_embed_tokens:
assert proposer.model.model.embed_tokens is not target_model.model.embed_tokens
else:
assert proposer.model.model.embed_tokens is target_model.model.embed_tokens
@pytest.mark.parametrize("method", ["eagle", "eagle3"])
@pytest.mark.parametrize("attn_backend", get_attn_backend_list_based_on_platform())
@pytest.mark.parametrize("num_speculative_tokens", [1, 3, 8])
def test_propose(method, attn_backend, num_speculative_tokens, monkeypatch):
if attn_backend == "TRITON_ATTN" and not current_platform.is_rocm():
pytest.skip(
"TRITON_ATTN does not support "
"multi-token eagle spec decode on current platform"
)
if attn_backend == "TREE_ATTN":
pytest.skip(
"TREE_ATTN is tested separately in test_propose_tree"
"because it requires special input mocking."
)
if attn_backend == "ROCM_AITER_FA" and current_platform.is_rocm():
monkeypatch.setenv("VLLM_ROCM_USE_AITER", "1")
# Use GPU device
device = torch.device(current_platform.device_type)
# Setup test parameters
batch_size = 2
seq_len_1 = 5
seq_len_2 = 3
total_tokens = seq_len_1 + seq_len_2
vocab_size = 100
seq_lens = [seq_len_1, seq_len_2]
# Create proposer first so we can use its actual hidden_size
proposer = _create_proposer(
"eagle", num_speculative_tokens, attention_backend=attn_backend
)
# Get the hidden_size from the proposer to ensure consistency
hidden_size = proposer.hidden_size
# Helper to create deterministic logits that will produce specific tokens
def create_deterministic_logits(token_ids):
logits = torch.full((batch_size, vocab_size), -100.0, device=device)
for i, token_id in enumerate(token_ids):
logits[i, token_id] = 100.0
return logits
# We mock a model that returns deterministic logits
# Sequence 1: 42, 43, 44, ...
# Sequence 2: 60, 61, 62, ...
base_token_ids = [42, 60]
# Skip loading the model and replace it with a mock directly
# Create the mock model with deterministic outputs
model_mock = mock.MagicMock()
# Setup for model forward calls
forward_returns = []
for i in range(num_speculative_tokens):
if i == 0:
# First call uses all tokens
h_logits = torch.zeros(total_tokens, hidden_size, device=device)
h_states = torch.zeros(total_tokens, hidden_size, device=device)
else:
# Subsequent calls use batch_size tokens
h_logits = torch.zeros(batch_size, hidden_size, device=device)
h_states = torch.zeros(batch_size, hidden_size, device=device)
forward_returns.append((h_logits, h_states))
# For single token case, we only need the first item;
# for multi-token, we need the sequence
if num_speculative_tokens == 1:
model_mock.return_value = forward_returns[0]
else:
model_mock.side_effect = forward_returns
# Setup for compute_logits calls
logits_returns = []
for i in range(num_speculative_tokens):
# For each call, increment the base token IDs
current_tokens = [base_id + i for base_id in base_token_ids]
logits_returns.append(create_deterministic_logits(current_tokens))
if num_speculative_tokens == 1:
model_mock.compute_logits.return_value = logits_returns[0]
else:
model_mock.compute_logits.side_effect = logits_returns
# Assign the mock to the proposer
proposer.model = model_mock
# Assign draft attn_layer_names since load_model is not invoked
proposer.attn_layer_names = ["layer.0"]
# Create input tensors
batch_spec = BatchSpec(
seq_lens=seq_lens,
query_lens=seq_lens,
)
common_attn_metadata = create_common_attn_metadata(
batch_spec,
block_size=16,
device=device,
)
target_token_ids = torch.randint(0, vocab_size, (total_tokens,), device=device)
target_positions = torch.cat(
[torch.arange(seq_len_1, device=device), torch.arange(seq_len_2, device=device)]
)
target_hidden_states = torch.randn(total_tokens, hidden_size, device=device)
next_token_ids = torch.randint(
0, vocab_size, (batch_size,), dtype=torch.int32, device=device
)
sampling_metadata = mock.MagicMock()
if attn_backend == "FLASH_ATTN":
attn_metadata_builder_cls, _ = try_get_attention_backend(
AttentionBackendEnum.FLASH_ATTN
)
elif attn_backend == "TRITON_ATTN":
attn_metadata_builder_cls, _ = try_get_attention_backend(
AttentionBackendEnum.TRITON_ATTN
)
elif attn_backend == "TREE_ATTN":
attn_metadata_builder_cls, _ = try_get_attention_backend(
AttentionBackendEnum.TREE_ATTN
)
elif attn_backend == "ROCM_AITER_FA":
attn_metadata_builder_cls, _ = try_get_attention_backend(
AttentionBackendEnum.ROCM_AITER_FA
)
else:
raise ValueError(f"Unsupported attention backend: {attn_backend}")
attn_metadata_builder = attn_metadata_builder_cls(
kv_cache_spec=create_standard_kv_cache_spec(proposer.vllm_config),
layer_names=proposer.attn_layer_names,
vllm_config=proposer.vllm_config,
device=device,
)
# Mock runner for attention metadata building
proposer.runner = mock.MagicMock()
proposer.runner.attn_groups.append([mock.MagicMock()])
proposer.runner.attn_groups[0][
0
].get_metadata_builder.return_value = attn_metadata_builder
proposer._get_attention_metadata_builder = mock.MagicMock(
return_value=attn_metadata_builder
)
result = proposer.propose(
target_token_ids=target_token_ids,
target_positions=target_positions,
target_hidden_states=target_hidden_states,
next_token_ids=next_token_ids,
token_indices_to_sample=None,
common_attn_metadata=common_attn_metadata,
sampling_metadata=sampling_metadata,
)
assert result.shape == (batch_size, num_speculative_tokens)
# Create expected tokens based on our token pattern
if num_speculative_tokens == 1:
# Example for num_speculative_tokens=1:
# [[42], [60]]
expected_tokens = torch.tensor(
[[base_token_ids[0]], [base_token_ids[1]]], device=device
)
else:
# Example for num_speculative_tokens=3:
# [[42, 43, 44], [60, 61, 62]]
expected_tokens = torch.zeros(
(batch_size, num_speculative_tokens), dtype=torch.int64, device=device
)
for i in range(batch_size):
for j in range(num_speculative_tokens):
expected_tokens[i, j] = base_token_ids[i] + j
# Verify all tokens match our expectations
assert torch.equal(result, expected_tokens)
@pytest.mark.parametrize(
"spec_token_tree",
[
[(0,)], # A single token
[(0,), (0, 0), (0, 0, 0)], # Chain
[(0,), (1,), (2,)], # Parallel
[(0,), (1,), (2,), (0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1)], # Tree
],
)
def test_propose_tree(spec_token_tree):
# Get GPU device.
device = torch.device(current_platform.device_type)
# Setup test parameters.
batch_size = 2
seq_len_1 = 5
seq_len_2 = 3
total_tokens = seq_len_1 + seq_len_2
vocab_size = 100
seq_lens = [seq_len_1, seq_len_2]
num_speculative_tokens = len(spec_token_tree)
# Create proposer first so we can use its actual hidden_size.
proposer = _create_proposer(
"eagle",
num_speculative_tokens,
speculative_token_tree=spec_token_tree,
)
# Get the hidden_size from the proposer to ensure consistency.
hidden_size = proposer.hidden_size
# Helper to create deterministic logits that will produce specific tokens
def create_deterministic_logits(token_ids, k: int):
logits = torch.full((batch_size, vocab_size), -100.0, device=device)
for i, token_id in enumerate(token_ids):
# Assign decreasing values to the k, consecutive, tokens.
for j in range(k):
logits[i, token_id + j] = 100.0 - j
return logits
# Mock a model that returns deterministic logits.
base_token_ids = torch.tensor([42, 60], dtype=torch.int64, device=device)
# Skip loading the model and replace it with a mock that returns
# deterministic outputs.
model_mock = mock.MagicMock()
# Mock the model forward calls.
forward_returns = [
(
torch.zeros(total_tokens, hidden_size, device=device),
torch.zeros(total_tokens, hidden_size, device=device),
)
]
for cu_num_drafts in proposer.cu_drafts_per_level:
h_logits = torch.zeros(batch_size * cu_num_drafts, hidden_size, device=device)
h_states = torch.zeros(batch_size * cu_num_drafts, hidden_size, device=device)
forward_returns.append((h_logits, h_states))
model_mock.side_effect = forward_returns
# Mock the compute_logits calls.
cu_num_drafts_tensor = torch.tensor(
[0] + proposer.cu_drafts_per_level, dtype=torch.int32, device=device
)
logits_returns = []
for level, num_children in enumerate(proposer.child_drafts_per_level):
token_ids = base_token_ids + cu_num_drafts_tensor[level]
level_num_drafts = cu_num_drafts_tensor[level + 1] - cu_num_drafts_tensor[level]
level_logits = []
for i in range(level_num_drafts // num_children):
level_logits.append(
create_deterministic_logits(token_ids + i * num_children, num_children)
)
logits_returns.append(torch.stack(level_logits, dim=1))
model_mock.compute_logits.side_effect = logits_returns
# Assign the mock to the proposer
proposer.model = model_mock
# Assign draft attn_layer_names since load_model is not invoked
proposer.attn_layer_names = ["layer.0"]
# Get the tree attention metadata builder.
attn_metadata_builder_cls, _ = try_get_attention_backend(
AttentionBackendEnum.TREE_ATTN
)
attn_metadata_builder = attn_metadata_builder_cls(
kv_cache_spec=create_standard_kv_cache_spec(proposer.vllm_config),
layer_names=proposer.attn_layer_names,
vllm_config=proposer.vllm_config,
device=device,
)
# Mock runner for attention metadata building.
proposer.runner = mock.MagicMock()
proposer.runner.attn_groups.append([mock.MagicMock()])
proposer.runner.attn_groups[0][0].metadata_builders = [attn_metadata_builder]
proposer.runner.attn_groups[0][
0
].get_metadata_builder.return_value = attn_metadata_builder
proposer._get_attention_metadata_builder = mock.MagicMock(
return_value=attn_metadata_builder
)
# Setup inputs for the proposer.
target_token_ids = torch.randint(0, vocab_size, (total_tokens,), device=device)
target_positions = torch.cat(
[torch.arange(seq_len_1, device=device), torch.arange(seq_len_2, device=device)]
)
target_hidden_states = torch.randn(total_tokens, hidden_size, device=device)
next_token_ids = torch.randint(
0, vocab_size, (batch_size,), dtype=torch.int32, device=device
)
batch_spec = BatchSpec(
seq_lens=seq_lens,
query_lens=seq_lens,
)
common_attn_metadata = create_common_attn_metadata(
batch_spec,
block_size=16,
device=device,
)
sampling_metadata = mock.MagicMock()
# Propose draft tokens.
result = proposer.propose(
target_token_ids=target_token_ids,
target_positions=target_positions,
target_hidden_states=target_hidden_states,
next_token_ids=next_token_ids,
token_indices_to_sample=None,
common_attn_metadata=common_attn_metadata,
sampling_metadata=sampling_metadata,
)
assert result.shape == (batch_size, num_speculative_tokens)
# The tokens are expected to be consecutive integers starting
# from the base token IDs.
expected_tokens = base_token_ids[:, None] + torch.arange(
num_speculative_tokens, dtype=torch.int64, device=device
)
# Verify that the draft tokens match our expectations.
assert torch.equal(result, expected_tokens)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/spec_decode/test_eagle.py",
"license": "Apache License 2.0",
"lines": 987,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/plugins/lora_resolvers/test_filesystem_resolver.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import os
import shutil
import pytest
from huggingface_hub import snapshot_download
from vllm.plugins.lora_resolvers.filesystem_resolver import FilesystemResolver
MODEL_NAME = "Qwen/Qwen3-0.6B"
LORA_NAME = "charent/self_cognition_Alice"
PA_NAME = "swapnilbp/llama_tweet_ptune"
@pytest.fixture(scope="module")
def adapter_cache(request, tmpdir_factory):
# Create dir that mimics the structure of the adapter cache
adapter_cache = tmpdir_factory.mktemp(request.module.__name__) / "adapter_cache"
return adapter_cache
@pytest.fixture(scope="module")
def qwen3_lora_files():
return snapshot_download(repo_id=LORA_NAME)
@pytest.fixture(scope="module")
def pa_files():
return snapshot_download(repo_id=PA_NAME)
@pytest.mark.asyncio
async def test_filesystem_resolver(adapter_cache, qwen3_lora_files):
model_files = adapter_cache / LORA_NAME
shutil.copytree(qwen3_lora_files, model_files)
fs_resolver = FilesystemResolver(adapter_cache)
assert fs_resolver is not None
lora_request = await fs_resolver.resolve_lora(MODEL_NAME, LORA_NAME)
assert lora_request is not None
assert lora_request.lora_name == LORA_NAME
assert lora_request.lora_path == os.path.join(adapter_cache, LORA_NAME)
@pytest.mark.asyncio
async def test_missing_adapter(adapter_cache):
fs_resolver = FilesystemResolver(adapter_cache)
assert fs_resolver is not None
missing_lora_request = await fs_resolver.resolve_lora(MODEL_NAME, "foobar")
assert missing_lora_request is None
@pytest.mark.asyncio
async def test_nonlora_adapter(adapter_cache, pa_files):
model_files = adapter_cache / PA_NAME
shutil.copytree(pa_files, model_files)
fs_resolver = FilesystemResolver(adapter_cache)
assert fs_resolver is not None
pa_request = await fs_resolver.resolve_lora(MODEL_NAME, PA_NAME)
assert pa_request is None
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/plugins/lora_resolvers/test_filesystem_resolver.py",
"license": "Apache License 2.0",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/plugins/lora_resolvers/filesystem_resolver.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import json
import os
import vllm.envs as envs
from vllm.lora.request import LoRARequest
from vllm.lora.resolver import LoRAResolver, LoRAResolverRegistry
class FilesystemResolver(LoRAResolver):
def __init__(self, lora_cache_dir: str):
self.lora_cache_dir = lora_cache_dir
async def resolve_lora(
self, base_model_name: str, lora_name: str
) -> LoRARequest | None:
lora_path = os.path.join(self.lora_cache_dir, lora_name)
maybe_lora_request = await self._get_lora_req_from_path(
lora_name, lora_path, base_model_name
)
return maybe_lora_request
async def _get_lora_req_from_path(
self, lora_name: str, lora_path: str, base_model_name: str
) -> LoRARequest | None:
"""Builds a LoraRequest pointing to the lora path if it's a valid
LoRA adapter and has a matching base_model_name.
"""
if os.path.exists(lora_path):
adapter_config_path = os.path.join(lora_path, "adapter_config.json")
if os.path.exists(adapter_config_path):
with open(adapter_config_path) as file:
adapter_config = json.load(file)
if (
adapter_config["peft_type"] == "LORA"
and adapter_config["base_model_name_or_path"] == base_model_name
):
lora_request = LoRARequest(
lora_name=lora_name,
lora_int_id=abs(hash(lora_name)),
lora_path=lora_path,
)
return lora_request
return None
def register_filesystem_resolver():
"""Register the filesystem LoRA Resolver with vLLM"""
lora_cache_dir = envs.VLLM_LORA_RESOLVER_CACHE_DIR
if lora_cache_dir:
if not os.path.exists(lora_cache_dir) or not os.path.isdir(lora_cache_dir):
raise ValueError(
"VLLM_LORA_RESOLVER_CACHE_DIR must be set to a valid directory \
for Filesystem Resolver plugin to function"
)
fs_resolver = FilesystemResolver(lora_cache_dir)
LoRAResolverRegistry.register_resolver("Filesystem Resolver", fs_resolver)
return
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/plugins/lora_resolvers/filesystem_resolver.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/v1/kv_connector/nixl_integration/test_accuracy.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import os
import lm_eval
import openai
BASE_URL = "http://localhost:8192/v1"
NUM_CONCURRENT = 100
TASK = "gsm8k"
FILTER = "exact_match,strict-match"
RTOL = 0.03
# Model-specific expected values
EXPECTED_VALUES = {
"Qwen/Qwen3-0.6B": 0.41,
"deepseek-ai/deepseek-vl2-small": 0.59,
"deepseek-ai/deepseek-vl2-tiny": 0.19,
"deepseek-ai/DeepSeek-V2-Lite-Chat": 0.65,
}
SIMPLE_PROMPT = (
"The best part about working on vLLM is that I got to meet so many people across "
"various different organizations like UCB, Google, and Meta which means",
)
# Get model name from environment variable
MODEL_NAME = os.environ.get("TEST_MODEL", "Qwen/Qwen3-0.6B")
def run_simple_prompt():
client = openai.OpenAI(api_key="EMPTY", base_url=BASE_URL)
completion = client.completions.create(model=MODEL_NAME, prompt=SIMPLE_PROMPT)
print("-" * 50)
print(f"Completion results for {MODEL_NAME}:")
print(completion)
print("-" * 50)
def test_accuracy():
"""Run the end to end accuracy test."""
run_simple_prompt()
model_args = (
f"model={MODEL_NAME},"
f"base_url={BASE_URL}/completions,"
f"num_concurrent={NUM_CONCURRENT},tokenized_requests=False"
)
results = lm_eval.simple_evaluate(
model="local-completions",
model_args=model_args,
tasks=TASK,
)
measured_value = results["results"][TASK][FILTER]
expected_value = EXPECTED_VALUES.get(MODEL_NAME)
if expected_value is None:
print(
f"Warning: No expected value found for {MODEL_NAME}. "
"Skipping accuracy check."
)
print(f"Measured value: {measured_value}")
return
assert (
measured_value - RTOL < expected_value
and measured_value + RTOL > expected_value
), f"Expected: {expected_value} | Measured: {measured_value}"
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/kv_connector/nixl_integration/test_accuracy.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/v1/kv_connector/nixl_integration/test_edge_cases.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import os
import openai
PREFILL_HOST = os.getenv("PREFILL_HOST", "localhost")
PREFILL_PORT = os.getenv("PREFILL_PORT", None)
DECODE_HOST = os.getenv("DECODE_HOST", "localhost")
DECODE_PORT = os.getenv("DECODE_PORT", None)
PROXY_HOST = os.getenv("PROXY_HOST", "localhost")
PROXY_PORT = os.getenv("PROXY_PORT", None)
if PREFILL_PORT is None or DECODE_PORT is None or PROXY_PORT is None:
raise ValueError("Please set the PREFILL_PORT, DECODE_PORT, and PROXY_PORT.")
LONG_PROMPT = "Red Hat is the best company in the world to work for because it works on open source software, which means that all the contributions are delivered to the community. As a result, when working on projects like vLLM we are able to meet many amazing people from various organizations like AMD, Google, NVIDIA, " # noqa: E501
PROMPT = "Red Hat is the best company in the world to work for because it works on open source software, which means that all the contributions are delivered to the community. As a result," # noqa: E501
SHORT_PROMPT = "Red Hat is "
def test_edge_cases():
# Set the OpenAI API key and base URL
decode_client = openai.OpenAI(
api_key="MY_KEY",
base_url=f"http://{DECODE_HOST}:{DECODE_PORT}/v1",
)
prefill_client = openai.OpenAI(
api_key="MY_KEY",
base_url=f"http://{PREFILL_HOST}:{PREFILL_PORT}/v1",
)
proxy_client = openai.OpenAI(
api_key="MY_KEY",
base_url=f"http://{PROXY_HOST}:{PROXY_PORT}/v1",
)
# Get the list of models
models = decode_client.models.list()
MODEL = models.data[0].id
# (1) Check that we can handle a very short prompt,
# less than the length of the block size.
completion = proxy_client.completions.create(
model=MODEL, prompt=SHORT_PROMPT, temperature=0
)
proxy_response = completion.choices[0].text
completion = prefill_client.completions.create(
model=MODEL, prompt=SHORT_PROMPT, temperature=0
)
prefill_response = completion.choices[0].text
print(f"SMALL PROMPT: {proxy_response=}")
assert proxy_response == prefill_response
# (2) Check that we can handle a full prefix cache
# hit on the D worker but not on the P worker.
# (2a): prime the D worker.
completion = decode_client.completions.create(
model=MODEL, prompt=PROMPT, temperature=0
)
decode_response = completion.choices[0].text
# (2b): send via the P/D setup
completion = proxy_client.completions.create(
model=MODEL, prompt=PROMPT, temperature=0
)
proxy_response = completion.choices[0].text
print(f"FULL CACHE HIT: {proxy_response=}")
assert proxy_response == decode_response
# (3) Check that we can handle a partial prefix cache
# hit on the D worker.
completion = proxy_client.completions.create(
model=MODEL, prompt=LONG_PROMPT, temperature=0
)
proxy_response = completion.choices[0].text
completion = prefill_client.completions.create(
model=MODEL, prompt=LONG_PROMPT, temperature=0
)
prefill_response = completion.choices[0].text
print(f"PARTIAL CACHE HIT: {proxy_response=}")
assert proxy_response == prefill_response
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/kv_connector/nixl_integration/test_edge_cases.py",
"license": "Apache License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/v1/kv_connector/nixl_integration/toy_proxy_server.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import argparse
import itertools
import logging
import os
import uuid
from contextlib import asynccontextmanager
import httpx
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
@asynccontextmanager
async def lifespan(app: FastAPI):
"""
Lifespan context manager to handle startup and shutdown events.
"""
# Startup: Initialize client pools for prefiller and decoder services
app.state.prefill_clients = []
app.state.decode_clients = []
# Create prefill clients
for i, (host, port) in enumerate(global_args.prefiller_instances):
prefiller_base_url = f"http://{host}:{port}/v1"
app.state.prefill_clients.append(
{
"client": httpx.AsyncClient(
timeout=None,
base_url=prefiller_base_url,
limits=httpx.Limits(
max_connections=None,
max_keepalive_connections=None,
),
),
"host": host,
"port": port,
"id": i,
}
)
# Create decode clients
for i, (host, port) in enumerate(global_args.decoder_instances):
decoder_base_url = f"http://{host}:{port}/v1"
app.state.decode_clients.append(
{
"client": httpx.AsyncClient(
timeout=None,
base_url=decoder_base_url,
limits=httpx.Limits(
max_connections=None,
max_keepalive_connections=None,
),
),
"host": host,
"port": port,
"id": i,
}
)
# Initialize round-robin iterators
app.state.prefill_iterator = itertools.cycle(range(len(app.state.prefill_clients)))
app.state.decode_iterator = itertools.cycle(range(len(app.state.decode_clients)))
print(
f"Initialized {len(app.state.prefill_clients)} prefill clients "
f"and {len(app.state.decode_clients)} decode clients."
)
yield
# Shutdown: Close all clients
for client_info in app.state.prefill_clients:
await client_info["client"].aclose()
for client_info in app.state.decode_clients:
await client_info["client"].aclose()
# Update FastAPI app initialization to use lifespan
app = FastAPI(lifespan=lifespan)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--port", type=int, default=8000)
# Always use 127.0.0.1 as localhost binds to IPv6 which is blocked on CI
parser.add_argument("--host", type=str, default="127.0.0.1")
# For prefiller instances
parser.add_argument(
"--prefiller-hosts",
"--prefiller-host",
type=str,
nargs="+",
default=["localhost"],
)
parser.add_argument(
"--prefiller-ports", "--prefiller-port", type=int, nargs="+", default=[8100]
)
# For decoder instances
parser.add_argument(
"--decoder-hosts", "--decoder-host", type=str, nargs="+", default=["localhost"]
)
parser.add_argument(
"--decoder-ports", "--decoder-port", type=int, nargs="+", default=[8200]
)
args = parser.parse_args()
# Validate and pair hosts with ports
if len(args.prefiller_hosts) != len(args.prefiller_ports):
raise ValueError(
"Number of prefiller hosts must match number of prefiller ports"
)
if len(args.decoder_hosts) != len(args.decoder_ports):
raise ValueError("Number of decoder hosts must match number of decoder ports")
# Create tuples of (host, port) for each service type
args.prefiller_instances = list(zip(args.prefiller_hosts, args.prefiller_ports))
args.decoder_instances = list(zip(args.decoder_hosts, args.decoder_ports))
return args
def get_next_client(app, service_type: str):
"""
Get the next client in round-robin fashion.
Args:
app: The FastAPI app instance
service_type: Either 'prefill' or 'decode'
Returns:
The next client to use
"""
if service_type == "prefill":
client_idx = next(app.state.prefill_iterator)
return app.state.prefill_clients[client_idx]
elif service_type == "decode":
client_idx = next(app.state.decode_iterator)
return app.state.decode_clients[client_idx]
else:
raise ValueError(f"Unknown service type: {service_type}")
async def send_request_to_service(
client_info: dict, endpoint: str, req_data: dict, request_id: str
):
"""
Send a request to a service using a client from the pool.
"""
req_data = req_data.copy()
req_data["kv_transfer_params"] = {
"do_remote_decode": True,
"do_remote_prefill": False,
"remote_engine_id": None,
"remote_block_ids": None,
"remote_host": None,
"remote_port": None,
}
req_data["stream"] = False
req_data["max_tokens"] = 1
if "max_completion_tokens" in req_data:
req_data["max_completion_tokens"] = 1
if "stream_options" in req_data:
del req_data["stream_options"]
headers = {
"Authorization": f"Bearer {os.environ.get('OPENAI_API_KEY')}",
"X-Request-Id": request_id,
}
response = await client_info["client"].post(
endpoint, json=req_data, headers=headers
)
response.raise_for_status()
# read/consume the response body to release the connection
# otherwise, it would http.ReadError
await response.aread()
return response
async def stream_service_response(
client_info: dict, endpoint: str, req_data: dict, request_id: str
):
"""
Asynchronously stream response from a service using a client from the pool.
"""
headers = {
"Authorization": f"Bearer {os.environ.get('OPENAI_API_KEY')}",
"X-Request-Id": request_id,
}
async with client_info["client"].stream(
"POST", endpoint, json=req_data, headers=headers
) as response:
response.raise_for_status()
async for chunk in response.aiter_bytes():
yield chunk
async def _handle_completions(api: str, request: Request):
try:
req_data = await request.json()
request_id = str(uuid.uuid4())
# Get the next prefill client in round-robin fashion
prefill_client_info = get_next_client(request.app, "prefill")
# Send request to prefill service
response = await send_request_to_service(
prefill_client_info, api, req_data, request_id
)
# Extract the needed fields
response_json = response.json()
await response.aclose() # CRITICAL: Release connection back to pool
kv_transfer_params = response_json.get("kv_transfer_params", {})
if kv_transfer_params:
req_data["kv_transfer_params"] = kv_transfer_params
# Get the next decode client in round-robin fashion
decode_client_info = get_next_client(request.app, "decode")
logger.debug("Using %s %s", prefill_client_info, decode_client_info)
# Stream response from decode service
async def generate_stream():
async for chunk in stream_service_response(
decode_client_info, api, req_data, request_id=request_id
):
yield chunk
return StreamingResponse(generate_stream(), media_type="application/json")
except Exception as e:
import sys
import traceback
exc_info = sys.exc_info()
print(f"Error occurred in disagg prefill proxy server - {api} endpoint")
print(e)
print("".join(traceback.format_exception(*exc_info)))
raise
@app.post("/v1/completions")
async def handle_completions(request: Request):
return await _handle_completions("/completions", request)
@app.post("/v1/chat/completions")
async def handle_chat_completions(request: Request):
return await _handle_completions("/chat/completions", request)
@app.get("/healthcheck")
async def healthcheck():
"""Simple endpoint to check if the server is running."""
return {
"status": "ok",
"prefill_instances": len(app.state.prefill_clients),
"decode_instances": len(app.state.decode_clients),
}
if __name__ == "__main__":
global global_args
global_args = parse_args()
import uvicorn
uvicorn.run(app, host=global_args.host, port=global_args.port)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/kv_connector/nixl_integration/toy_proxy_server.py",
"license": "Apache License 2.0",
"lines": 226,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/v1/kv_connector/unit/test_nixl_connector.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import contextlib
import inspect
import os
import tempfile
import textwrap
import time
import uuid
from collections import defaultdict
from typing import Any
from unittest.mock import MagicMock, patch
import msgspec
import pytest
import ray
import torch
from vllm import LLM
from vllm.config import KVTransferConfig, set_current_vllm_config
from vllm.distributed.kv_transfer.kv_connector.utils import (
KVOutputAggregator,
TpKVTopology,
get_current_attn_backend,
)
from vllm.distributed.kv_transfer.kv_connector.v1 import nixl_connector
from vllm.distributed.kv_transfer.kv_connector.v1.metrics import KVConnectorStats
from vllm.distributed.kv_transfer.kv_connector.v1.multi_connector import (
MultiKVConnectorStats,
)
from vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector import (
KVConnectorRole,
NixlAgentMetadata,
NixlConnector,
NixlConnectorMetadata,
NixlConnectorScheduler,
NixlConnectorWorker,
NixlHandshakePayload,
NixlKVConnectorStats,
compute_nixl_compatibility_hash,
)
from vllm.distributed.kv_transfer.kv_transfer_state import (
ensure_kv_transfer_shutdown,
has_kv_transfer_group,
)
from vllm.forward_context import ForwardContext
from vllm.outputs import RequestOutput
from vllm.platforms import current_platform
from vllm.platforms.interface import Platform
from vllm.sampling_params import SamplingParams
from vllm.v1.attention.backends.flash_attn import FlashAttentionBackend
from vllm.v1.attention.backends.utils import set_kv_cache_layout
from vllm.v1.engine import EngineCoreRequest
from vllm.v1.engine.output_processor import OutputProcessor
from vllm.v1.kv_cache_interface import AttentionSpec, KVCacheConfig, KVCacheTensor
from vllm.v1.outputs import KVConnectorOutput, ModelRunnerOutput
from vllm.v1.request import RequestStatus
from vllm.v1.worker.kv_connector_model_runner_mixin import KVConnectorModelRunnerMixin
from vllm.v1.worker.utils import AttentionGroup
from .utils import create_request, create_scheduler, create_vllm_config
@pytest.fixture(scope="module", autouse=True)
def clear_kv_transfer():
"""
The test cases in this file use `VLLM_ENABLE_V1_MULTIPROCESSING=0`,
causing the global variable `_KV_CONNECTOR_AGENT`
to be assigned but never deleted.
Since the current pytest process does not terminate and instead
continues running tests from other files,
this global variable remains in memory and interferes
with test cases in other modules.
So we use this fixture to ensure that the global variable
`_KV_CONNECTOR_AGENT` is properly cleaned up after each test.
"""
yield
if has_kv_transfer_group():
ensure_kv_transfer_shutdown()
def get_default_xfer_telemetry(
xferDurationS: float = 1,
postDurationS: float = 1,
totalBytes: int = 1,
descCount: int = 1,
) -> dict:
class AttributeDict(dict):
__slots__ = ()
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__ # type: ignore[assignment]
# We can't instantiate nixlXferTelemetry because it's read only and
# ray env does not have NIXL, so we must fake it
return AttributeDict(
xferDuration=xferDurationS * 1e6, # in us
postDuration=postDurationS * 1e6, # in us
totalBytes=totalBytes,
descCount=descCount,
)
class FakeNixlWrapper:
"""Mock implementation of NixlWrapper for testing.
We don't inherit from nixl._api.nixl_agent because nixl may not be
installed.
Note: The complete source of this class is also used in the
`_make_fake_nixl_pkg` function to create a fake nixl package
for Ray workers.
"""
AGENT_METADATA = b"fake_agent_metadata"
REMOTE_AGENT_NAME = "remote_agent"
def __init__(self, agent_name: str, *args, **kwargs):
self._cycles_before_xfer_done = 0
self._check_xfer_state_cycles: defaultdict[int, int] = defaultdict(lambda: 0)
def get_reg_descs(self, caches_data, memory_type: str) -> list:
return [str(uuid.uuid4()) for _ in caches_data]
def register_memory(self, descs, backends) -> None:
pass
def deregister_memory(self, descs) -> None:
pass
def get_xfer_descs(self, blocks_data, memory_type: str) -> list:
return [str(uuid.uuid4()) for _ in blocks_data]
def prep_xfer_dlist(self, agent_name: str, descs: list) -> int:
return uuid.uuid4().int
def get_agent_metadata(self) -> bytes:
return self.AGENT_METADATA
def add_remote_agent(self, agent_metadata: bytes) -> str:
return self.REMOTE_AGENT_NAME
def get_new_notifs(self) -> dict[str, list[bytes]]:
# Used to collect done_sending, which we don't test yet.
return {}
def check_xfer_state(self, handle: int) -> str:
if self._check_xfer_state_cycles[handle] >= self._cycles_before_xfer_done:
return "DONE"
self._check_xfer_state_cycles[handle] += 1
return "PROC"
def release_xfer_handle(self, handle: int) -> None:
pass
def release_dlist_handle(self, handle: int) -> None:
pass
def remove_remote_agent(self, agent: str) -> None:
pass
def send_notif(self, agent_name: str, notif_msg: bytes) -> None:
pass
def make_prepped_xfer(
self,
xfer_type: str,
local_xfer_side_handle: int,
local_block_descs_ids: list[int],
remote_xfer_side_handle: int,
remote_block_descs_ids: list[int],
notif_msg: bytes | None = None,
) -> int:
return uuid.uuid4().int
def transfer(self, handle: int) -> str:
return "PROC"
def get_xfer_telemetry(self, handle: int) -> dict:
return get_default_xfer_telemetry()
############################################################
# Follow are for changing the behavior during testing.
############################################################
def set_cycles_before_xfer_done(self, cycles: int):
"""Set the number of cycles before a transfer is considered done."""
@contextlib.contextmanager
def _make_fake_nixl_pkg():
"""Context manager that creates a temporary package making
`from nixl._api import nixl_agent` resolve to our FakeNixlWrapper.
Also creates rixl package for ROCm compatibility.
Automatically cleans up the temporary directory when done.
"""
with tempfile.TemporaryDirectory() as td:
# Create both nixl and rixl packages for cross-platform compatibility
for pkg_name in ["nixl", "rixl"]:
pkg_root = os.path.join(td, pkg_name, "_api")
os.makedirs(pkg_root, exist_ok=True)
# Get the source code of FakeNixlWrapper class and dedent it
fake_nixl_source = inspect.getsource(FakeNixlWrapper)
fake_nixl_source = textwrap.dedent(fake_nixl_source)
stub = f"""\
# Copy of FakeNixlWrapper implementation for Ray workers
import uuid
from collections import defaultdict
{fake_nixl_source}
# Export as nixl_agent
nixl_agent = FakeNixlWrapper
"""
with open(os.path.join(pkg_root, "__init__.py"), "w") as f:
f.write(stub)
# Mock nixlXferTelemetry class
pkg_root2 = os.path.join(td, pkg_name, "_bindings")
os.makedirs(pkg_root2, exist_ok=True)
with open(os.path.join(pkg_root2, "__init__.py"), "w") as f:
f.write("class nixlXferTelemetry: pass")
# touch parent package
open(os.path.join(td, pkg_name, "__init__.py"), "w").close()
yield td
def test_basic_interface():
"""Unit test for basic NixlConnector interface functionality."""
vllm_config = create_vllm_config()
scheduler = create_scheduler(vllm_config)
# 2 Full Blocks and 1 Half Block.
BLOCK_SIZE = vllm_config.cache_config.block_size
NUM_EXTERNAL_FULL_BLOCKS = 2
NUM_TOKENS = int(BLOCK_SIZE * (NUM_EXTERNAL_FULL_BLOCKS + 0.5))
request = create_request(
request_id=1,
block_size=BLOCK_SIZE,
num_tokens=NUM_TOKENS,
do_remote_prefill=True,
)
request_id = request.request_id
scheduler.add_request(request)
# Remote Prefill, triggers NixlConnectorMetadata.
scheduler_output = scheduler.schedule()
kv_connector_metadata = scheduler_output.kv_connector_metadata
assert kv_connector_metadata is not None
assert isinstance(kv_connector_metadata, NixlConnectorMetadata)
assert len(kv_connector_metadata.reqs_to_recv) == 1
assert request_id in kv_connector_metadata.reqs_to_recv
req_meta = kv_connector_metadata.reqs_to_recv[request_id]
for block_id, block in zip(
req_meta.local_block_ids,
scheduler.kv_cache_manager.coordinator.single_type_managers[0].req_to_blocks[
request_id
],
):
assert block_id == block.block_id
def test_prompt_less_than_block_size():
"""
Test that we can handle case where prompt is < block.
In this case, the P worker will still send remote_block_ids of the
partial block. The D worker should schedule an async read
in this case.
"""
vllm_config = create_vllm_config()
scheduler = create_scheduler(vllm_config)
# Half of a block.
BLOCK_SIZE = vllm_config.cache_config.block_size
NUM_TOKENS = int(BLOCK_SIZE * 0.5)
# Request will have 1 partial remote block.
request = create_request(
request_id=1,
block_size=BLOCK_SIZE,
num_tokens=NUM_TOKENS,
do_remote_prefill=True,
num_remote_blocks=1,
)
scheduler.add_request(request)
scheduler_output = scheduler.schedule()
# This request will read async.
kv_connector_metadata = scheduler_output.kv_connector_metadata
assert kv_connector_metadata is not None
assert isinstance(kv_connector_metadata, NixlConnectorMetadata)
assert len(kv_connector_metadata.reqs_to_recv) == 1
assert len(scheduler_output.scheduled_new_reqs) == 0
@patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper",
FakeNixlWrapper,
)
def test_kv_transfer_handshake(dist_init):
"""Unit test for basic NixlConnector interface functionality."""
from vllm.config import set_current_vllm_config
# Test setup, we creates a scheduler that contains a NixlConnector
# of role SCHEDULER, and expect it to be serving NixlAgentMetadata from
# all workers of the instance.
vllm_config = create_vllm_config()
# in case the test runs on non-GPU machine
vllm_config.kv_transfer_config.kv_buffer_device = "cpu"
scheduler = create_scheduler(vllm_config)
with set_current_vllm_config(vllm_config):
# Create two NixlConnector of role WORKER, one is the worker of
# the scheduler (prefill), the other is a worker of decode instance.
# Prefill connector will register KV cache to populate proper handshake
# metadata.
prefill_connector = NixlConnector(vllm_config, KVConnectorRole.WORKER)
kv_cache_shape = FlashAttentionBackend.get_kv_cache_shape(
num_blocks=2, block_size=16, num_kv_heads=4, head_size=64
)
shared_tensor = torch.zeros(*kv_cache_shape, dtype=torch.float16)
unique_tensor = torch.zeros(*kv_cache_shape, dtype=torch.float16)
kv_caches = {
"layer0": shared_tensor,
"layer1": unique_tensor,
"layer2": shared_tensor,
}
prefill_connector.register_kv_caches(kv_caches)
# Simulate EngineCore initialization that would gather connector
# metadata from all workers
metadata = prefill_connector.get_handshake_metadata()
# metadata is a NixlHandshakePayload, decode it to get NixlAgentMetadata
decoder = msgspec.msgpack.Decoder(NixlAgentMetadata)
expected_agent_metadata = decoder.decode(metadata.agent_metadata_bytes)
# The scheduler connector expects metadata to be in
# dict[int, KVConnectorHandshakeMetadata], where the first key is
# the dp_rank, the second key is the tp_rank.
scheduler_connector = scheduler.get_kv_connector()
scheduler_connector.set_xfer_handshake_metadata({0: metadata})
# Simulate a request that finishes prefill, which returns
# corresponding NixlConnectorMetadata for decode instance.
BLOCK_SIZE = vllm_config.cache_config.block_size
NUM_EXTERNAL_FULL_BLOCKS = 2
NUM_TOKENS = int(BLOCK_SIZE * (NUM_EXTERNAL_FULL_BLOCKS + 0.5))
request = create_request(
request_id=1,
block_size=BLOCK_SIZE,
num_tokens=NUM_TOKENS,
do_remote_decode=True,
)
request.status = RequestStatus.FINISHED_LENGTH_CAPPED
delay, kv_connector_metadata = scheduler.get_kv_connector().request_finished(
request, [0, 1, 2]
)
assert delay
# Decode connector will be able to create handshake with the prefill connector.
decode_connector = NixlConnector(vllm_config, KVConnectorRole.WORKER)
decode_connector.register_kv_caches(kv_caches)
# Here we are testing the retrieval of NIXLAgentMetadata.
# Knowing the implementation detail, we override the add_remote_agent
# to validate the metadata received is the same as the one in prefill_connector.
with patch.object(
decode_connector.connector_worker, "add_remote_agent"
) as mock_add_remote_agent:
mock_add_remote_agent.return_type = "remote_agent"
decode_connector.connector_worker._nixl_handshake(
kv_connector_metadata["remote_host"],
kv_connector_metadata["remote_port"],
kv_connector_metadata["tp_size"],
kv_connector_metadata["remote_engine_id"],
)
received_metadata = mock_add_remote_agent.call_args.args
assert received_metadata[0] == expected_agent_metadata
assert received_metadata[1] == 0 # remote_tp_rank
assert received_metadata[2] == 1 # remote_tp_size
# Need to shutdown the background thread to release NIXL side channel port
scheduler_connector.shutdown()
class FakeNixlConnectorWorker(NixlConnectorWorker):
REMOTE_ENGINE_ID = "remote_engine"
def __init__(
self, *args, hand_shake_latency: float = 1.8, kv_cache_layout="HND", **kwargs
):
super().__init__(*args, **kwargs)
self._hand_shake_latency = hand_shake_latency
self.kv_cache_layout = kv_cache_layout
# Mock register_kv_caches attribute needed for tests that do not call it.
self.src_xfer_handles_by_block_size = {self.block_size: 1}
test_shape = self.attn_backend.get_kv_cache_shape(
num_blocks=1, block_size=16, num_kv_heads=1, head_size=1
)
self.kv_topo = TpKVTopology(
tp_rank=self.tp_rank,
engine_id=self.engine_id,
remote_tp_size=self._tp_size, # shared state
remote_block_size=self._block_size, # shared state
is_mla=self.use_mla,
total_num_kv_heads=self.model_config.get_total_num_kv_heads(),
attn_backend=self.attn_backend,
tensor_shape=test_shape,
)
self.compat_hash = compute_nixl_compatibility_hash(
self.vllm_config, self.backend_name, self.kv_topo.cross_layers_blocks
)
def _nixl_handshake(
self, host: str, port: int, remote_tp_size: int, expected_engine_id: str
) -> dict[int, str]:
# Mimic slow _nixl_handshake, as well as bypass zmq communication.
time.sleep(self._hand_shake_latency)
# These should've been done in register_kv_caches(), called by
# gpu_model_runner. Here we just hardcode some dummy values.
slot_size_bytes = 4096
self.slot_size_per_layer = [slot_size_bytes]
self.block_len_per_layer = [slot_size_bytes * self.block_size]
self.num_blocks = 1
self.dst_num_blocks[self.engine_id] = self.num_blocks
assert expected_engine_id == self.REMOTE_ENGINE_ID
# Adjust remote block length metadata to satisfy heterogeneous TP
# invariants enforced during handshake validation.
remote_block_lens = list(self.block_len_per_layer)
tp_ratio = self.kv_topo.tp_ratio(remote_tp_size)
if remote_tp_size > self.world_size:
# P TP > D TP case, block_len of remote is smaller
remote_block_lens = [
block_len // (-tp_ratio) for block_len in remote_block_lens
]
elif remote_tp_size < self.world_size:
remote_block_lens = [
block_len * tp_ratio for block_len in remote_block_lens
]
# When remote tp_size > local tp_size, handshake with multiple
# remote ranks.
num_hanshakes = 1 if tp_ratio > 0 else -tp_ratio
remote_agents: dict[int, str] = {}
for remote_tp_rank in range(num_hanshakes):
remote_agent_name = self.add_remote_agent(
NixlAgentMetadata(
engine_id=self.REMOTE_ENGINE_ID,
agent_metadata=FakeNixlWrapper.AGENT_METADATA,
kv_caches_base_addr=[0],
device_id=remote_tp_rank,
num_blocks=1,
block_lens=remote_block_lens,
# `self.kv_cache_layout` is only forced to HND when vllm engine
# is started. We mock HND here.
kv_cache_layout="HND",
block_size=self.block_size,
),
remote_tp_rank=remote_tp_rank,
remote_tp_size=remote_tp_size,
)
remote_agents[remote_tp_rank] = remote_agent_name
return remote_agents
class TestNixlHandshake:
@patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper",
FakeNixlWrapper,
)
def test_multi_xfer_one_engine(
self,
default_vllm_config,
# dist_init is a fixture that initializes the distributed environment.
dist_init,
):
"""Test case where multiple xfers are initiated to the same engine.
This test triggers the connector to load remote KV for the same
`request_id`. The transfer is not done immediately due to
`set_cycles_before_xfer_done`, so there is a state where there are
multiple transfer states for the same `request_id`, and `get_finished`
should handle it correctly (wait for all transfers to be done).
"""
vllm_config = create_vllm_config()
request_id = "req_id"
# Test worker role in decode server.
connector = NixlConnector(vllm_config, KVConnectorRole.WORKER)
connector.connector_worker = FakeNixlConnectorWorker(
vllm_config, connector.engine_id, hand_shake_latency=0
)
assert isinstance(connector.connector_worker.nixl_wrapper, FakeNixlWrapper)
worker = connector.connector_worker
worker.nixl_wrapper.set_cycles_before_xfer_done(3)
# simulate handshake
worker.dst_xfer_side_handles = {
FakeNixlConnectorWorker.REMOTE_ENGINE_ID: {0: 1}
}
worker.kv_cache_layout = "HND"
num_xfers = 4
while True:
# For the same request_id, initiate multiple xfers across different
# round of `execute_model` calls.
metadata = NixlConnectorMetadata()
if num_xfers > 0:
num_xfers -= 1
metadata.add_new_req_to_recv(
request_id=request_id,
local_block_ids=[num_xfers + 1, num_xfers + 2, num_xfers + 3],
kv_transfer_params={
"remote_block_ids": [
num_xfers + 4,
num_xfers + 5,
num_xfers + 6,
],
"remote_engine_id": FakeNixlConnectorWorker.REMOTE_ENGINE_ID,
"remote_request_id": f"prefill-{request_id}",
"remote_host": "localhost",
"remote_port": 1234,
"remote_tp_size": 1,
},
)
connector.bind_connector_metadata(metadata)
# Mimic logic in KVConnectorModelRunnerMixin._get_kv_connector_output.
dummy_ctx = ForwardContext(
no_compile_layers={},
attn_metadata={},
virtual_engine=0,
slot_mapping={},
)
_before_load = time.perf_counter()
connector.start_load_kv(dummy_ctx)
_after_load = time.perf_counter()
assert _after_load - _before_load < 0.1, (
f"start_load_kv took {_after_load - _before_load} seconds"
)
# Mimic logic in KVConnectorModelRunnerMixin._get_kv_connector_output.
_, done_recving = connector.get_finished(finished_req_ids=set())
if len(done_recving) > 0:
assert request_id in done_recving
break
connector.clear_connector_metadata()
@patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper",
FakeNixlWrapper,
)
@pytest.mark.parametrize(
"decode_tp_size, prefill_tp_size",
[
(1, 1),
(2, 1),
(4, 2),
(4, 4),
],
)
def test_async_load_kv(
self,
default_vllm_config,
# Fixture that initializes the distributed environment.
dist_init,
# Simulate consumer-producer TP sizes.
decode_tp_size,
prefill_tp_size,
):
"""Test that NixlConnector's start_load_kv should be non-blocking."""
vllm_config = create_vllm_config()
vllm_config.parallel_config.tensor_parallel_size = decode_tp_size
# Test worker role in decode server.
connector = NixlConnector(vllm_config, KVConnectorRole.WORKER)
connector.connector_worker = FakeNixlConnectorWorker(
vllm_config, connector.engine_id
)
metadata = NixlConnectorMetadata()
metadata.add_new_req_to_recv(
request_id="id",
local_block_ids=[1, 2, 3],
kv_transfer_params={
"remote_block_ids": [4, 5, 6],
"remote_engine_id": FakeNixlConnectorWorker.REMOTE_ENGINE_ID,
"remote_request_id": "prefill-id",
"remote_host": "localhost",
"remote_port": 1234,
"remote_tp_size": prefill_tp_size,
},
)
connector.bind_connector_metadata(metadata)
timeout = 2.5
start = time.perf_counter()
while time.perf_counter() - start < timeout:
dummy_ctx = ForwardContext(
no_compile_layers={},
attn_metadata={},
virtual_engine=0,
slot_mapping={},
)
_before_load = time.perf_counter()
connector.start_load_kv(dummy_ctx)
_after_load = time.perf_counter()
assert _after_load - _before_load < 0.1, (
f"start_load_kv took {_after_load - _before_load} seconds"
)
time.sleep(0.5) # backoff for the async handshake to complete.
connector.bind_connector_metadata(NixlConnectorMetadata())
_, done_recving = connector.get_finished(finished_req_ids=set())
if len(done_recving) > 0:
return
raise TimeoutError("Took too long to complete async handshake.")
@patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper",
FakeNixlWrapper,
)
@pytest.mark.parametrize("local_tp_size", [1, 2])
def test_prefill_tp_size_greater_than_decode_tp_size(
self, local_tp_size: int, default_vllm_config, dist_init
):
"""
Verify remote TP > local TP handshake succeeds with different
remote configurations.
"""
vllm_config = create_vllm_config()
local_tp_size = 1
vllm_config.parallel_config.tensor_parallel_size = local_tp_size
connector = NixlConnector(vllm_config, KVConnectorRole.WORKER)
connector.connector_worker = FakeNixlConnectorWorker(
vllm_config, connector.engine_id, hand_shake_latency=0
)
worker = connector.connector_worker
# Minimal local registration params used by add_remote_agent
worker.slot_size_per_layer = [4096]
worker.block_len_per_layer = [4096 * worker.block_size]
worker.num_blocks = 1
worker.dst_num_blocks[worker.engine_id] = worker.num_blocks
worker.src_blocks_data = [(0, worker.block_len_per_layer[0], worker.tp_rank)]
def check_handshake(remote_tp_size: int):
tp_ratio = remote_tp_size // local_tp_size
assert set(remote_agents.keys()) == set(range(tp_ratio))
remote_engine_id = worker.REMOTE_ENGINE_ID
assert worker._tp_size[remote_engine_id] == remote_tp_size
assert -tp_ratio == worker.kv_topo.tp_ratio_from_engine_id(remote_engine_id)
# ensure src_xfer_handles_by_tp_ratio is populated with tpratio chunks
assert -tp_ratio in worker.src_xfer_handles_by_tp_ratio
assert len(worker.src_xfer_handles_by_tp_ratio[-tp_ratio]) == tp_ratio
assert remote_engine_id in worker.dst_xfer_side_handles
assert set(worker.dst_xfer_side_handles[remote_engine_id].keys()) == set(
range(tp_ratio)
)
remote_agents = worker._nixl_handshake(
host="localhost",
port=1234,
remote_tp_size=2,
expected_engine_id=worker.REMOTE_ENGINE_ID,
)
check_handshake(2)
# NOTE flexiblity: a second remote with higher number of ranks is
# discovered. This is not a scenario we actively support right now, but
# the connector allows it.
worker.REMOTE_ENGINE_ID = "remote_engine_2"
remote_agents = worker._nixl_handshake(
host="localhost",
port=1234,
remote_tp_size=6,
expected_engine_id=worker.REMOTE_ENGINE_ID,
)
check_handshake(6)
@patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper",
FakeNixlWrapper,
)
@pytest.mark.parametrize("local_tp_size", [1, 2])
def test_prefill_tp_size_greater_than_decode_tp_size_mla(
self, local_tp_size: int, default_vllm_config, dist_init
):
"""
Verify remote TP > local TP handshake succeeds with different
remote configurations for an MLA model.
"""
vllm_config = create_vllm_config()
d_tp_size = 1
p_tp_size = 2
# Build two separate connectors/workers to emulate P TP=2 ranks.
conn_p0 = NixlConnector(vllm_config, KVConnectorRole.WORKER)
conn_p1 = NixlConnector(vllm_config, KVConnectorRole.WORKER)
conn_p0.connector_worker = FakeNixlConnectorWorker(
vllm_config, conn_p0.engine_id, hand_shake_latency=0
)
conn_p1.connector_worker = FakeNixlConnectorWorker(
vllm_config, conn_p1.engine_id, hand_shake_latency=0
)
# Force P world size to 2 for both workers and emulate distinct tp_ranks.
# Also enable MLA path so that expected_finished_count is updated.
for rank, worker in enumerate(
(conn_p0.connector_worker, conn_p1.connector_worker)
):
worker.world_size = p_tp_size
worker.kv_topo.remote_tp_size = {worker.engine_id: p_tp_size}
worker.tp_rank = rank
worker.use_mla = True
req_id = "req-ep-dp2-p0"
now = time.perf_counter()
# Register a request on P that is waiting for consumers to read
# (both workers track it).
conn_p0.connector_worker._reqs_to_send[req_id] = now + 10.0
conn_p0.connector_worker._reqs_to_process.add(req_id)
conn_p1.connector_worker._reqs_to_send[req_id] = now + 10.0
conn_p1.connector_worker._reqs_to_process.add(req_id)
# Simulate a read notification coming from D with (tp=1, dp=2).
notif = f"{req_id}:{d_tp_size}".encode()
# D0-0->P0 notif
conn_p0.connector_worker.nixl_wrapper.get_new_notifs = lambda: {
"agent": [notif]
} # type: ignore[method-assign]
conn_p1.connector_worker.nixl_wrapper.get_new_notifs = lambda: {
"agent": [notif]
} # type: ignore[method-assign]
# Trigger notification processing via get_finished().
done_sending0, _ = conn_p0.get_finished(finished_req_ids=set())
done_sending1, _ = conn_p1.get_finished(finished_req_ids=set())
assert req_id in done_sending0 and req_id in done_sending1
# E2E aggregation: ensure the aggregated output marks the request
# as finished using the connector's expected_finished_count.
from vllm.v1.outputs import KVConnectorOutput, ModelRunnerOutput
aggregator = KVOutputAggregator.from_connector(conn_p0, world_size=2)
out0 = ModelRunnerOutput(
req_ids=[req_id],
req_id_to_index={req_id: 0},
sampled_token_ids=[[0]],
logprobs=None,
prompt_logprobs_dict={},
pooler_output=[None],
kv_connector_output=KVConnectorOutput(
finished_sending=done_sending0,
finished_recving=None,
),
)
out1 = ModelRunnerOutput(
req_ids=[req_id],
req_id_to_index={req_id: 0},
sampled_token_ids=[[0]],
logprobs=None,
prompt_logprobs_dict={},
pooler_output=[None],
kv_connector_output=KVConnectorOutput(
finished_sending=done_sending1,
finished_recving=None,
),
)
aggregated = aggregator.aggregate([out0, out1], output_rank=0)
assert aggregated.kv_connector_output is not None
assert aggregated.kv_connector_output.finished_sending == {req_id}
# Producers cleaned up state for the finished request.
assert req_id not in conn_p0.connector_worker._reqs_to_send
assert req_id not in conn_p0.connector_worker._reqs_to_process
assert req_id not in conn_p1.connector_worker._reqs_to_send
assert req_id not in conn_p1.connector_worker._reqs_to_process
@patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper",
FakeNixlWrapper,
)
def test_concurrent_load_kv(
self,
default_vllm_config,
# dist_init is a fixture that initializes the distributed environment.
dist_init,
):
"""Test that multiple start_load_kv calls should occur concurrently."""
vllm_config = create_vllm_config()
# Test worker role in decode server.
connector = NixlConnector(vllm_config, KVConnectorRole.WORKER)
connector.connector_worker = FakeNixlConnectorWorker(
vllm_config, connector.engine_id
)
# Register (mocked) local xfer handler
# worker = connector.connector_worker
# worker.src_xfer_handles_by_block_size = {worker.block_size: 1}
metadata = NixlConnectorMetadata()
total_reqs = 5
for i in range(total_reqs):
metadata.add_new_req_to_recv(
request_id=f"id_{i}",
local_block_ids=[1, 2, 3],
kv_transfer_params={
"remote_block_ids": [4, 5, 6],
"remote_engine_id": FakeNixlConnectorWorker.REMOTE_ENGINE_ID,
"remote_request_id": f"prefill-id-{i}",
"remote_host": "localhost",
"remote_port": 1234,
"remote_tp_size": 1,
},
)
connector.bind_connector_metadata(metadata)
timeout = 2.5 * total_reqs
cnt_finished_reqs = 0
start = time.perf_counter()
while time.perf_counter() - start < timeout:
dummy_ctx = ForwardContext(
no_compile_layers={},
attn_metadata={},
virtual_engine=0,
slot_mapping={},
)
_before_load = time.perf_counter()
connector.start_load_kv(dummy_ctx)
_after_load = time.perf_counter()
assert _after_load - _before_load < 0.1, (
f"start_load_kv took {_after_load - _before_load} seconds"
)
time.sleep(0.5) # backoff for the async handshake to complete.
connector.bind_connector_metadata(NixlConnectorMetadata())
_, done_recving = connector.get_finished(finished_req_ids=set())
if len(done_recving) > 0:
cnt_finished_reqs += len(done_recving)
if cnt_finished_reqs == total_reqs:
return
raise TimeoutError("Took too long to complete async handshake.")
@patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper",
FakeNixlWrapper,
)
def test_handshake_fails_on_kv_cache_layout_mismatch(
self, default_vllm_config, dist_init
):
"""
Verify that adding a remote agent fails if kv_cache_layout differs.
This test is only relevant for heterogeneous TP.
"""
vllm_config = create_vllm_config()
# Mock TP world size to 2 to force heterogeneous TP when
# remote_tp_size=1
with patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.get_tensor_model_parallel_world_size", # noqa: E501
return_value=2,
):
# Initialize connector and worker (with fake NIXL wrapper)
connector = NixlConnector(vllm_config, KVConnectorRole.WORKER)
connector.connector_worker = FakeNixlConnectorWorker(
vllm_config, connector.engine_id, hand_shake_latency=0
)
worker = connector.connector_worker
# Minimal local registration params used by add_remote_agent
worker.slot_size_per_layer = [4096]
worker.block_len_per_layer = [4096 * worker.block_size]
worker.num_blocks = 1
worker.dst_num_blocks[worker.engine_id] = worker.num_blocks
# Metadata with different kv_cache_layout than local worker
mismatched_layout = "HND" if worker.kv_cache_layout != "HND" else "NHD"
meta = NixlAgentMetadata(
engine_id=FakeNixlConnectorWorker.REMOTE_ENGINE_ID,
agent_metadata=FakeNixlWrapper.AGENT_METADATA,
kv_caches_base_addr=[0],
device_id=0,
num_blocks=1,
block_lens=worker.block_len_per_layer,
kv_cache_layout=mismatched_layout,
block_size=worker.block_size,
)
with pytest.raises(RuntimeError):
# mismatched layout is expected to fail
worker.add_remote_agent(meta, remote_tp_size=2)
worker.add_remote_agent(meta, remote_tp_size=1)
@patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper",
FakeNixlWrapper,
)
def test_handshake_succeed_on_kv_cache_layout_mismatch_with_experimental(
self, default_vllm_config, dist_init
):
"""
Verify that adding a remote agent fails if kv_cache_layout differs.
This test is only relevant for heterogeneous TP.
"""
vllm_config = create_vllm_config(enable_permute_local_kv=True)
# Mock TP world size to 2 to force heterogeneous TP when
# remote_tp_size=1
with patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.get_tensor_model_parallel_world_size", # noqa: E501
return_value=2,
):
# Initialize connector and worker (with fake NIXL wrapper)
connector = NixlConnector(vllm_config, KVConnectorRole.WORKER)
connector.connector_worker = FakeNixlConnectorWorker(
vllm_config,
connector.engine_id,
hand_shake_latency=0,
kv_cache_layout="NHD",
)
worker = connector.connector_worker
# Minimal local registration params used by add_remote_agent
worker.slot_size_per_layer = [2048]
worker.block_len_per_layer = [2048 * worker.block_size]
worker.num_blocks = 1
worker.dst_num_blocks[worker.engine_id] = worker.num_blocks
# Metadata with different kv_cache_layout than local worker
meta = NixlAgentMetadata(
engine_id=FakeNixlConnectorWorker.REMOTE_ENGINE_ID,
agent_metadata=FakeNixlWrapper.AGENT_METADATA,
kv_caches_base_addr=[0],
device_id=0,
num_blocks=1,
# prefill TP=1, decode TP=2, remote block_lens is double to local
block_lens=[i * 2 for i in worker.block_len_per_layer],
kv_cache_layout="HND",
block_size=worker.block_size,
)
# We don't check layout for homogeneous TP and MLA for now, as the
# whole block is moved.
worker.add_remote_agent(meta, remote_tp_size=1)
# NOTE: resource cleanup in mp backend is a bit finicky, so the order in which
# we put here is important. First run ray, it will clean up the resources, then
# the rest of the tests.
@patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper",
FakeNixlWrapper,
)
def test_kv_connector_stats(default_vllm_config, dist_init):
"""Test that KV transfer stats are properly recorded and retrieved."""
vllm_config = create_vllm_config()
# Test worker role in decode server.
connector = NixlConnector(vllm_config, KVConnectorRole.WORKER)
connector.connector_worker = FakeNixlConnectorWorker(
vllm_config, connector.engine_id, hand_shake_latency=0
)
# Verify that xfer_stats starts empty
initial_stats = connector.get_kv_connector_stats()
assert initial_stats is None
# Create transfer metadata
request_id = "test_req_for_stats"
metadata = NixlConnectorMetadata()
metadata.add_new_req_to_recv(
request_id=request_id,
local_block_ids=[1, 2, 3],
kv_transfer_params={
"remote_block_ids": [4, 5, 6],
"remote_engine_id": FakeNixlConnectorWorker.REMOTE_ENGINE_ID,
"remote_request_id": f"prefill-{request_id}",
"remote_host": "localhost",
"remote_port": 1234,
"remote_tp_size": 1,
},
)
connector.bind_connector_metadata(metadata)
# Start the transfer
dummy_ctx = ForwardContext(
no_compile_layers={},
attn_metadata={},
virtual_engine=0,
slot_mapping={},
)
connector.start_load_kv(dummy_ctx)
# Verify stats are recorded after transfer is complete
max_iterations = 2
# Clear metadata before start_load_kv to prevent reprocessing same request
connector.bind_connector_metadata(NixlConnectorMetadata())
for _ in range(max_iterations):
# Need to call start_load_kv to process completed handshakes
connector.start_load_kv(dummy_ctx)
_, done_recving = connector.get_finished(finished_req_ids=set())
if len(done_recving) > 0 and request_id in done_recving:
break
time.sleep(0.1) # Small delay to allow background handshake to complete
else:
assert "Transfer did not complete within expected iterations"
# Now check that stats were recorded
stats_after_transfer = connector.get_kv_connector_stats()
assert isinstance(stats_after_transfer, NixlKVConnectorStats)
# Verify stats values are recorded
assert not stats_after_transfer.is_empty()
assert stats_after_transfer.num_successful_transfers == 1
# Verify stats are reset after retrieval
stats_after_reset = connector.get_kv_connector_stats()
assert stats_after_reset is None
def test_kv_connector_stats_aggregation():
"""
Test KV transfer stats aggregation across TP ranks using
KVOutputAggregator (used by MultiprocExecutor).
"""
# Create KVOutputAggregator for 3 workers (simulating TP=3), same thing
# done in MultiprocExecutor.execute_model
aggregator = KVOutputAggregator(expected_finished_count=3)
# Create stats for multiple workers with different transfer patterns
worker1_stats = NixlKVConnectorStats()
worker2_stats = NixlKVConnectorStats()
worker3_stats = NixlKVConnectorStats()
# Record different transfers on each worker
# Worker 1: 2 transfers
stats = get_default_xfer_telemetry()
worker1_stats.record_transfer(stats)
worker1_stats.record_transfer(stats)
# Worker 2: 1 transfer
worker2_stats.record_transfer(stats)
# Worker 3: 3 transfers
stats = get_default_xfer_telemetry(
xferDurationS=2, postDurationS=2, totalBytes=2, descCount=2
)
worker3_stats.record_transfer(stats)
worker3_stats.record_transfer(stats)
worker3_stats.record_transfer(stats)
# Create ModelRunnerOutput instances for each worker
worker_outputs = []
for i, worker_stats in enumerate([worker1_stats, worker2_stats, worker3_stats]):
output = ModelRunnerOutput(
req_ids=[f"req_{i}"],
req_id_to_index={f"req_{i}": 0},
sampled_token_ids=[[123]], # dummy token
logprobs=None,
prompt_logprobs_dict={},
pooler_output=[None],
kv_connector_output=KVConnectorOutput(
finished_sending=set([f"req_{i}_send"])
if i < 2
else None, # Workers 0,1 finished sending
finished_recving=set([f"req_{i}_recv"])
if i > 0
else None, # Workers 1,2 finished receiving
kv_connector_stats=worker_stats,
),
)
worker_outputs.append(output)
# Use the real aggregation mechanism (like MultiprocExecutor.execute_model)
aggregated_output = aggregator.aggregate(worker_outputs, output_rank=0)
kv_connector_stats = aggregated_output.kv_connector_output.kv_connector_stats
assert isinstance(kv_connector_stats, NixlKVConnectorStats)
# Number of total transfers across all workers.
assert kv_connector_stats.num_successful_transfers == 6
# Logging proc, call reduce() to get CLI-friendly stats.
cli_stats = kv_connector_stats.reduce()
assert cli_stats["Avg xfer time (ms)"] == 1500.0
assert cli_stats["Avg post time (ms)"] == 1500.0
assert cli_stats["Avg number of descriptors"] == 1.5
def test_multi_kv_connector_stats_aggregation():
"""
Test MultiKVConnectorStats aggregation across TP ranks using
KVOutputAggregator (used by MultiprocExecutor).
"""
aggregator = KVOutputAggregator(expected_finished_count=3)
from dataclasses import dataclass
# Mock a KVConnectorStats class for testing aggregation over connectors.
@dataclass
class FooKVConnectorStats(KVConnectorStats):
def reset(self):
self.data = {"num_foo_transfers": 0}
def record_transfer(self):
if "num_foo_transfers" not in self.data:
self.data["num_foo_transfers"] = 0
self.data["num_foo_transfers"] += 1
def is_empty(self) -> bool:
return self.data["num_foo_transfers"] == 0
def aggregate(self, other: "FooKVConnectorStats") -> "FooKVConnectorStats":
if not other.is_empty():
self.data["num_foo_transfers"] += other.data["num_foo_transfers"]
return self
def make_multi_stats(nixl_count: int, foo_count: int) -> MultiKVConnectorStats:
data: dict[str, KVConnectorStats] = {}
if nixl_count > 0:
nixl_stats = NixlKVConnectorStats()
for _ in range(nixl_count):
nixl_stats.record_transfer(get_default_xfer_telemetry())
data["NixlConnector"] = nixl_stats
if foo_count > 0:
foo_stats = FooKVConnectorStats()
for _ in range(foo_count):
foo_stats.record_transfer()
data["FooConnector"] = foo_stats
return MultiKVConnectorStats(data=data)
# Create heterogeneous stats across 3 workers
worker_patterns = [(2, 1), (3, 0), (0, 5)] # (Nixl, Foo)
worker_outputs: list[ModelRunnerOutput] = []
for i, (nixl, foo) in enumerate(worker_patterns):
stats = make_multi_stats(nixl, foo)
output = ModelRunnerOutput(
req_ids=[f"req_{i}"],
req_id_to_index={f"req_{i}": 0},
sampled_token_ids=[[123]],
logprobs=None,
prompt_logprobs_dict={},
pooler_output=[None],
kv_connector_output=KVConnectorOutput(
finished_sending=set([f"req_{i}_send"]) if i < 2 else None,
finished_recving=set([f"req_{i}_recv"]) if i > 0 else None,
kv_connector_stats=stats,
),
)
worker_outputs.append(output)
aggregated_output = aggregator.aggregate(worker_outputs, output_rank=0)
kv_connector_stats = aggregated_output.kv_connector_output.kv_connector_stats
assert isinstance(kv_connector_stats, MultiKVConnectorStats)
# Validate per-connector totals across workers
assert isinstance(kv_connector_stats["NixlConnector"], NixlKVConnectorStats)
assert kv_connector_stats["NixlConnector"].num_successful_transfers == 5
assert isinstance(kv_connector_stats["FooConnector"], FooKVConnectorStats)
assert kv_connector_stats["FooConnector"].data["num_foo_transfers"] == 6
@patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper",
FakeNixlWrapper,
)
def test_scheduler_kv_connector_stats_aggregation():
"""Test scheduler and worker KV connector stats aggregation."""
from vllm.v1.core.sched.output import SchedulerOutput
scheduler = create_scheduler(create_vllm_config())
# Worker stats with transfer metrics
worker_stats = NixlKVConnectorStats()
worker_stats.record_transfer(get_default_xfer_telemetry())
worker_stats.data["remote_tokens"] = []
# Scheduler stats with custom metric (needs dummy transfer to avoid being skipped)
scheduler_stats = NixlKVConnectorStats()
scheduler_stats.data.update(
{ # dummy transfer just for testing, to bypass is_empty() check
"transfer_duration": [0],
"post_duration": [0],
"bytes_transferred": [0],
"num_descriptors": [0],
"remote_tokens": [128],
}
)
# Mock the scheduler connector's stats method
scheduler.connector.get_kv_connector_stats = lambda: MultiKVConnectorStats(
data={"NixlConnector": scheduler_stats}
)
model_output = ModelRunnerOutput(
req_ids=["req_0"],
req_id_to_index={"req_0": 0},
sampled_token_ids=[[123]],
logprobs=None,
prompt_logprobs_dict={},
pooler_output=[None],
kv_connector_output=KVConnectorOutput(
kv_connector_stats=MultiKVConnectorStats(
data={"NixlConnector": worker_stats}
)
),
)
scheduler_output = SchedulerOutput(
scheduled_new_reqs=[],
scheduled_cached_reqs=None,
num_scheduled_tokens={"req_0": 1},
total_num_scheduled_tokens=1,
scheduled_spec_decode_tokens={},
scheduled_encoder_inputs={},
num_common_prefix_blocks=[0],
finished_req_ids=set(),
free_encoder_mm_hashes=[],
)
engine_core_outputs = scheduler.update_from_output(scheduler_output, model_output)
final_stats = next(
iter(engine_core_outputs.values())
).scheduler_stats.kv_connector_stats
nixl_stats = final_stats["NixlConnector"]
assert nixl_stats.num_successful_transfers == 2
assert nixl_stats.data["remote_tokens"] == [128]
@pytest.mark.parametrize("distributed_executor_backend", ["ray", None])
@patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper",
FakeNixlWrapper,
)
def test_abort_timeout_on_prefiller(monkeypatch, distributed_executor_backend):
"""
Test lifecycle of an aborted Remote Prefill request hitting the timeout.
-----> P
| {process request}
<-/--- | {result is NOT delivered, eg proxy is down}
|
|
| {eventually free blocks}
"""
model_name = "Qwen/Qwen3-0.6B"
kv_transfer_config = KVTransferConfig(
kv_connector="NixlConnector",
kv_role="kv_both",
)
llm_kwargs = {
"model": model_name,
"enforce_eager": True,
"gpu_memory_utilization": 0.5,
"kv_transfer_config": kv_transfer_config,
"distributed_executor_backend": distributed_executor_backend,
}
timeout = 6
monkeypatch.setenv("VLLM_ENABLE_V1_MULTIPROCESSING", "0")
monkeypatch.setenv("VLLM_NIXL_ABORT_REQUEST_TIMEOUT", str(timeout))
def run_test_and_cleanup():
llm = LLM(**llm_kwargs)
try:
_run_abort_timeout_test(llm, timeout)
finally:
llm.llm_engine.engine_core.shutdown()
# Build runtime_env only if we're using Ray
if distributed_executor_backend == "ray":
with _make_fake_nixl_pkg() as working_dir:
runtime_env = {
"working_dir": working_dir, # ship fake nixl package
"env_vars": {
"VLLM_NIXL_ABORT_REQUEST_TIMEOUT": str(timeout),
# TODO: for ray to carry over, remove once we set
"NIXL_TELEMETRY_ENABLE": "1",
},
}
ray.init(runtime_env=runtime_env)
try:
run_test_and_cleanup()
finally:
ray.shutdown()
else:
run_test_and_cleanup()
class RequestIdMapper:
"""Helper class to map external request IDs to internal request IDs."""
def __init__(self, output_processor: OutputProcessor):
self.req_id_mapping: dict[str, str] = {}
self.original_add_request = output_processor.add_request
output_processor.add_request = self._add_request
def _add_request(self, request: EngineCoreRequest, *args, **kwargs):
self.req_id_mapping[request.external_req_id] = request.request_id
return self.original_add_request(request, *args, **kwargs)
def __call__(self, external_req_id: str) -> str:
return self.req_id_mapping[external_req_id]
def _run_abort_timeout_test(llm: LLM, timeout: int):
"""Helper function to run the abort timeout test logic."""
remote_prefill_opts = {
"do_remote_decode": True,
"do_remote_prefill": False,
"remote_engine_id": None,
"remote_block_ids": None,
"remote_host": None,
"remote_port": None,
}
# Simulate sidecar request
sampling_params = SamplingParams(
temperature=0.0,
max_tokens=1,
extra_args={"kv_transfer_params": remote_prefill_opts},
)
scheduler = llm.llm_engine.engine_core.engine_core.scheduler
req_to_blocks = scheduler.kv_cache_manager.coordinator.single_type_managers[
0
].req_to_blocks
id_mapper = RequestIdMapper(llm.llm_engine.output_processor)
def req_id(outputs: list[RequestOutput]) -> str:
assert len(outputs) == 1
return id_mapper(outputs[0].request_id)
padding = "Just making this request a little longer so that we're sure "
"we're not hitting the small-request lower bound beneath which we don't "
"actually trigger the whole kv transfer, but rather just recompute the "
"blocks on D."
req0_id = req_id(
llm.generate([f"What is the capital of Japan? {padding}"], sampling_params)
)
# Request finished but not freed
assert req0_id in scheduler.finished_req_ids and req0_id in req_to_blocks
# Some other request, 0 still not freed
req1_id = req_id(
llm.generate([f"What is the capital of Italy? {padding}"], sampling_params)
)
assert req0_id in req_to_blocks
assert req1_id in scheduler.finished_req_ids and req1_id in req_to_blocks
# Wait for timeout and trigger another scheduler loop
time.sleep(timeout)
_ = llm.generate([f"What is the capital of France? {padding}"], sampling_params)
# Request-0 times out and is cleared!
assert req0_id not in req_to_blocks
# Need to shutdown the background thread to release NIXL side channel port
llm.llm_engine.engine_core.shutdown()
@pytest.mark.parametrize("enable_cross_layers", ["False", "True"])
@pytest.mark.parametrize(
"attn_backend",
[
pytest.param(
"FLASH_ATTN",
marks=pytest.mark.skipif(
current_platform.is_rocm(),
reason="Attention backend FLASH_ATTN is not supported on ROCm",
),
),
pytest.param(
"ROCM_ATTN",
marks=pytest.mark.skipif(
not current_platform.is_rocm(),
reason="Attention backend ROCM_ATTN is only supported on ROCm",
),
),
"TRITON_ATTN",
],
)
def test_register_kv_caches(
default_vllm_config, dist_init, attn_backend, enable_cross_layers
):
"""
Test that register_kv_caches() properly calls nixl_wrapper methods with
correct data.
This test verifies:
1. nixl_wrapper.get_reg_descs() is called with caches_data containing
tensor metadata
2. nixl_wrapper.get_xfer_descs() is called with blocks_data containing
block layout info
"""
vllm_config = create_vllm_config(attention_backend=attn_backend)
# Enable cross layers blocks
vllm_config.kv_transfer_config.kv_connector_extra_config[
"enable_cross_layers_blocks"
] = enable_cross_layers
set_kv_cache_layout("HND")
# Import the appropriate backend based on the parameter
if attn_backend == "FLASH_ATTN":
from vllm.v1.attention.backends.flash_attn import FlashAttentionBackend
backend_cls = FlashAttentionBackend
elif attn_backend == "ROCM_ATTN":
from vllm.v1.attention.backends.rocm_attn import RocmAttentionBackend
backend_cls = RocmAttentionBackend
else: # TRITON_ATTN
from vllm.v1.attention.backends.triton_attn import TritonAttentionBackend
backend_cls = TritonAttentionBackend
nixl_module = "vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector"
with (
patch(f"{nixl_module}.NixlWrapper") as mock_nixl_wrapper,
patch(f"{nixl_module}.threading.Event"),
patch(f"{nixl_module}.threading.Thread") as mock_thread,
patch(f"{nixl_module}.get_current_attn_backend") as mock_get_attn_backend,
):
# Ensure get_attn_backend returns the correct value due to
# _cached_get_attn_backend returning the backend from previous
# test run if not mocking.
mock_get_attn_backend.return_value = backend_cls
# Create connector
connector = NixlConnector(vllm_config, KVConnectorRole.WORKER)
connector.connector_worker = FakeNixlConnectorWorker(
vllm_config, connector.engine_id, hand_shake_latency=0
)
# Get the mock instance
mock_wrapper_instance = mock_nixl_wrapper.return_value
connector.connector_worker.nixl_wrapper = mock_wrapper_instance
# Appease NixlHandshakePayload encoding with some bytes
mock_wrapper_instance.get_agent_metadata.return_value = b"fake_agent_metadata"
# Reassure the shutdown() check that the thread is terminated
mock_thread.return_value.is_alive.return_value = False
expected_tensor_size: int
expected_base_addrs: list[int]
expected_num_entries: int
kv_caches: dict[str, torch.Tensor]
assert str(enable_cross_layers).lower() != "true" or (
(attn_backend not in ("FLASH_ATTN", "FLASHINFER"))
or connector.prefer_cross_layer_blocks
)
if connector.prefer_cross_layer_blocks:
num_layers = 32
block_size = 16
num_blocks = 8
kv_cache_spec = AttentionSpec(
block_size=block_size,
num_kv_heads=4,
head_size=64,
dtype=torch.bfloat16,
)
kv_cache_config = KVCacheConfig(
num_blocks=num_blocks,
kv_cache_tensors=[
KVCacheTensor(
size=kv_cache_spec.page_size_bytes * num_blocks,
shared_by=["dummy-layer"],
)
for i in range(num_layers)
],
# allocate_uniform_kv_caches does not use this
kv_cache_groups=[],
)
with set_current_vllm_config(vllm_config):
_, cross_layers_kv_cache, _ = (
KVConnectorModelRunnerMixin.allocate_uniform_kv_caches(
kv_cache_config=kv_cache_config,
attn_groups=[
[
AttentionGroup(
backend=backend_cls,
layer_names=[],
kv_cache_spec=kv_cache_spec,
kv_cache_group_id=0,
)
]
],
cache_dtype=torch.bfloat16,
device=torch.cuda.current_device(),
kernel_block_sizes=[block_size],
)
)
# Store tensor info for validation
expected_tensor_size = (
cross_layers_kv_cache.element_size() * cross_layers_kv_cache.numel()
)
expected_base_addrs = [
cross_layers_kv_cache.data_ptr(),
]
expected_num_entries = 1
expected_blocks_count = 8
kv_caches = {"all-layers": cross_layers_kv_cache}
else:
# Create test kv cache tensors using proper backend shape
kv_cache_shape = backend_cls.get_kv_cache_shape(
num_blocks=2, block_size=16, num_kv_heads=4, head_size=64
)
shared_tensor = torch.zeros(*kv_cache_shape, dtype=torch.float16)
unique_tensor = torch.zeros(*kv_cache_shape, dtype=torch.float16)
kv_caches = {
"layer0": shared_tensor,
"layer1": unique_tensor,
"layer2": shared_tensor,
}
# Store tensor info for validation
test_shape = backend_cls.get_kv_cache_shape(
num_blocks=1, block_size=16, num_kv_heads=1, head_size=1
)
is_blocks_first = len(test_shape) == 5 and test_shape[0] == 1
if is_blocks_first:
expected_tensor_size = (
shared_tensor.element_size() * shared_tensor.numel()
)
expected_base_addrs = [
shared_tensor.data_ptr(),
unique_tensor.data_ptr(),
]
expected_num_entries = 2
else:
expected_tensor_size = (
shared_tensor[0].element_size() * shared_tensor[0].numel()
)
expected_base_addrs = [
shared_tensor[0].data_ptr(),
shared_tensor[1].data_ptr(),
unique_tensor[0].data_ptr(),
unique_tensor[1].data_ptr(),
]
expected_num_entries = 4
expected_blocks_count = 8
# Execute register_kv_caches
connector.register_kv_caches(kv_caches)
# Verify get_reg_descs was called with caches_data
assert mock_wrapper_instance.get_reg_descs.called
caches_data, _ = mock_wrapper_instance.get_reg_descs.call_args[0]
assert len(caches_data) == expected_num_entries
for i, cache_entry in enumerate(caches_data):
base_addr, size, _tp_rank, _ = cache_entry
assert size == expected_tensor_size, (
f"Entry {i}: Expected tensor size {expected_tensor_size}, got {size}"
)
assert base_addr == expected_base_addrs[i], (
f"Entry {i}: Expected base address {expected_base_addrs[i]}, "
f"got {base_addr}"
)
# Verify get_xfer_descs was called with blocks_data
assert mock_wrapper_instance.get_xfer_descs.called
blocks_data, _ = mock_wrapper_instance.get_xfer_descs.call_args[0]
# Validate blocks_data structure and size
assert len(blocks_data) == expected_blocks_count, (
f"Expected {expected_blocks_count} blocks, got {len(blocks_data)}"
)
if connector.prefer_cross_layer_blocks:
num_blocks = 8
expected_block_len = expected_tensor_size // num_blocks
else:
num_blocks = 2
if is_blocks_first:
expected_block_len = expected_tensor_size // num_blocks // 2
else:
expected_block_len = expected_tensor_size // num_blocks
for i, block_entry in enumerate(blocks_data):
block_start_addr, block_len, tp_rank = block_entry
assert block_len == expected_block_len, (
f"Block entry {i}: Expected block len {expected_block_len}, "
f"got {block_len}"
)
assert connector.connector_worker.block_size == 16
class FakePlatform(Platform):
device_type: str = "oot"
@classmethod
def get_nixl_supported_devices(cls) -> dict[str, tuple[str, ...]]:
"""
Returns a mapping from device_type to a tuple of supported
kv_buffer_device for nixl.
"""
return {"oot": ("oot",)}
@classmethod
def get_nixl_memory_type(cls) -> str | None:
"""
Returns the nixl memory type for the current platform.
"""
return "VRAM"
@pytest.mark.parametrize(
"kv_buffer_device, nixl_memory_type",
[
("oot", "VRAM"),
],
)
def test_kv_buffer_to_nixl_memory_types(
default_vllm_config, dist_init, kv_buffer_device, nixl_memory_type
):
"""
Test that register_kv_caches() passes the correct memory types from the
config to the nixl_wrapper.
"""
vllm_config = create_vllm_config()
# Override the default memory types in the config
vllm_config.kv_transfer_config.kv_buffer_device = kv_buffer_device
from vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector import (
_NIXL_SUPPORTED_DEVICE,
)
_NIXL_SUPPORTED_DEVICE.update(FakePlatform.get_nixl_supported_devices())
with (
patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper"
),
patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.threading.Event"
),
patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.threading.Thread"
),
patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.current_platform",
FakePlatform,
),
patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector._NIXL_SUPPORTED_DEVICE",
_NIXL_SUPPORTED_DEVICE,
),
): # noqa: E501
# Create connector and replace its worker with a fake one for isolation
connector = NixlConnector(vllm_config, KVConnectorRole.WORKER)
# Verify get_reg_descs was called with the correct memory_type
assert connector.connector_worker.kv_buffer_device == kv_buffer_device
assert connector.connector_worker.nixl_memory_type == nixl_memory_type
@patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper",
FakeNixlWrapper,
)
def test_shutdown_cleans_up_resources(default_vllm_config, dist_init):
"""Test that shutdown() properly cleans up all resources."""
vllm_config = create_vllm_config()
scheduler = NixlConnectorScheduler(
vllm_config, vllm_config.kv_transfer_config.engine_id
)
worker = NixlConnectorWorker(vllm_config, vllm_config.kv_transfer_config.engine_id)
nixl_wrapper = worker.nixl_wrapper
with (
patch.object(worker, "_handshake_initiation_executor") as mock_exec,
patch.object(scheduler, "_nixl_handshake_listener_t") as mock_listener,
patch.object(nixl_wrapper, "release_xfer_handle") as mock_rel_xfer,
patch.object(nixl_wrapper, "release_dlist_handle") as mock_rel_dlist,
patch.object(nixl_wrapper, "remove_remote_agent") as mock_rem_agent,
patch.object(nixl_wrapper, "deregister_memory") as mock_dereg,
):
worker._recving_transfers = {"req1": [123]}
# Mock register_kv_cache which registers local handle
worker.src_xfer_handles_by_block_size = {worker.block_size: 455}
# P TP = 2 * D TP case, we should register 2 local handles
worker.src_xfer_handles_by_tp_ratio = {-2: [456, 457]}
worker.dst_xfer_side_handles = {"engine1": {0: 789}}
worker._remote_agents = {"engine1": {0: "agent1"}}
worker._registered_descs = ["desc1", "desc2"]
mock_listener.is_alive.return_value = False
worker.shutdown()
# Test idempotency
worker.shutdown()
worker.shutdown()
mock_exec.shutdown.assert_called_with(wait=False)
# Same sequence on scheduler.shutdown()
scheduler.shutdown()
scheduler.shutdown()
scheduler.shutdown()
mock_listener.join.assert_called_once()
mock_rel_xfer.assert_called_once_with(123)
assert mock_rel_dlist.call_count == 4
mock_rel_dlist.assert_any_call(455) # src handle (whole region)
mock_rel_dlist.assert_any_call(456) # src handle (1st chunk)
mock_rel_dlist.assert_any_call(457) # src handle (2nd chunk)
mock_rel_dlist.assert_any_call(789) # dst handle
mock_rem_agent.assert_called_once_with("agent1")
assert mock_dereg.call_count == 2
mock_dereg.assert_any_call("desc1")
mock_dereg.assert_any_call("desc2")
@patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper",
FakeNixlWrapper,
)
def test_aborted_request_removed_from_worker_in_batch(default_vllm_config, dist_init):
"""
Create and schedule a request so that P adds it to in-batch tracking via
the real scheduler, then simulate an abort (request not in next scheduler
iteration) and verify the worker no longer tracks it as in-batch.
"""
vllm_config = create_vllm_config()
scheduler = create_scheduler(vllm_config)
# KVConnector Worker in P
connector = NixlConnector(vllm_config, KVConnectorRole.WORKER)
connector.connector_worker = FakeNixlConnectorWorker(
vllm_config, connector.engine_id, hand_shake_latency=0
)
# Create a request that triggers do_remote_decode so that
# the scheduler adds it to reqs_in_batch
req = create_request(request_id=1, do_remote_decode=True, max_tokens=1)
scheduler.add_request(req)
# First scheduling pass - examinate build_connector_meta output
sched_out = scheduler.schedule()
kv_meta = sched_out.kv_connector_metadata
assert kv_meta is not None
assert isinstance(kv_meta, NixlConnectorMetadata)
assert req.request_id in kv_meta.reqs_in_batch
#### Model Runner start ####
# Bind scheduler-produced metadata and start worker processing.
connector.bind_connector_metadata(kv_meta)
dummy_ctx = ForwardContext(
no_compile_layers={},
attn_metadata={},
virtual_engine=0,
slot_mapping={},
)
connector.start_load_kv(dummy_ctx)
# Ensure it was tracked by the worker
assert req.request_id in connector.connector_worker._reqs_to_process
#### Model Runner end ####
# Abort request - request_finished call in connector scheduler
scheduler.finish_requests(req.request_id, RequestStatus.FINISHED_ABORTED)
# Second scheduling pass - build metadata with aborted request
sched_out2 = scheduler.schedule()
kv_meta2 = sched_out2.kv_connector_metadata
assert kv_meta2 is not None
assert isinstance(kv_meta2, NixlConnectorMetadata)
assert req.request_id not in kv_meta2.reqs_in_batch
# Bind empty/abort metadata and run worker step
#### Model Runner start ####
connector.bind_connector_metadata(kv_meta2)
connector.start_load_kv(dummy_ctx)
# After abort, the worker should not keep tracking it as "in-batch"
assert req.request_id not in connector.connector_worker._reqs_to_process
#### Model Runner end ####
class FailingNixlWrapper(FakeNixlWrapper):
"""Mock NixlWrapper that fails on specific operations."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fail_handshake = False
self.fail_transfer_setup = False
self.fail_send_notif = False
self.fail_transfer_state = False # Returns "ERR" state
self.fail_transfer_exception = False # Raises exception in check_xfer_state
def add_remote_agent(self, agent_metadata: bytes) -> str:
if self.fail_handshake:
from zmq.error import Again
raise Again("Simulated timeout failure")
return super().add_remote_agent(agent_metadata)
def make_prepped_xfer(
self,
xfer_type: str,
local_xfer_side_handle: int,
local_block_descs_ids: list[int],
remote_xfer_side_handle: int,
remote_block_descs_ids: list[int],
notif_msg: bytes | None = None,
) -> int:
if self.fail_transfer_setup:
# classic RuntimeError to simulate failure
raise RuntimeError("BAD STATUS")
return super().make_prepped_xfer(
xfer_type,
local_xfer_side_handle,
local_block_descs_ids,
remote_xfer_side_handle,
remote_block_descs_ids,
notif_msg,
)
def send_notif(self, agent_name: str, notif_msg: bytes) -> None:
if self.fail_send_notif:
raise RuntimeError("Simulated send_notif failure")
return super().send_notif(agent_name, notif_msg)
def check_xfer_state(self, handle: int) -> str:
if self.fail_transfer_exception:
raise RuntimeError("Simulated check_xfer_state exception")
if self.fail_transfer_state:
return "ERR" # Bad transfer state
return super().check_xfer_state(handle)
@patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper",
FailingNixlWrapper,
)
@pytest.mark.parametrize(
"failure_type,wrapper_config,needs_get_finished",
[
("transfer_setup_failed", {"fail_transfer_setup": True}, False),
("handshake_failed", {"fail_handshake": True}, False),
("notification_failed", {"fail_send_notif": True}, False),
("transfer_failed", {"fail_transfer_state": True}, True),
("transfer_exception", {"fail_transfer_exception": True}, True),
],
)
def test_transfer_failure_logging(
default_vllm_config,
dist_init,
failure_type,
wrapper_config,
needs_get_finished,
):
"""Test that transfer failures are logged with structured context.
Run with `pytest -sv` to see the log output.
Covers failure types:
- transfer_setup_failed: make_prepped_xfer fails
- handshake_failed: add_remote_agent fails during request handshake
- notification_failed: send_notif fails
- transfer_failed: check_xfer_state returns bad state (e.g., "ERR")
- transfer_exception: check_xfer_state raises exception
"""
import logging
vllm_config = create_vllm_config()
connector = NixlConnector(vllm_config, KVConnectorRole.WORKER)
connector.connector_worker = FakeNixlConnectorWorker(
vllm_config, connector.engine_id, hand_shake_latency=0.0
)
# Configure FailingNixlWrapper to fail in the specified way
for key, value in wrapper_config.items():
setattr(connector.connector_worker.nixl_wrapper, key, value)
request_id = f"test_{failure_type}_req"
# For notification_failed, we need empty local blocks
# (full cache hit path to trigger send_notif)
local_blocks = [] if failure_type == "notification_failed" else [10, 11, 12]
remote_blocks = [20, 21, 22]
metadata = NixlConnectorMetadata()
metadata.add_new_req_to_recv(
request_id=request_id,
local_block_ids=local_blocks,
kv_transfer_params={
"remote_block_ids": remote_blocks,
"remote_engine_id": FakeNixlConnectorWorker.REMOTE_ENGINE_ID,
"remote_request_id": f"prefill-{request_id}",
"remote_host": "localhost",
"remote_port": 1234,
"remote_tp_size": 1,
},
)
connector.bind_connector_metadata(metadata)
dummy_ctx = ForwardContext(
no_compile_layers={},
attn_metadata={},
virtual_engine=0,
slot_mapping={},
)
# Capture logs from the nixl_connector logger specifically
# vLLM loggers have propagate=False, so we need to capture directly
nixl_logger = logging.getLogger(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector"
)
captured_logs: list[logging.LogRecord] = []
class LogCapture(logging.Handler):
def emit(self, record):
captured_logs.append(record)
handler = LogCapture()
handler.setLevel(logging.ERROR)
nixl_logger.addHandler(handler)
try:
connector.start_load_kv(dummy_ctx)
# Process the ready_requests queue (for async handshake)
connector.bind_connector_metadata(NixlConnectorMetadata())
# Wait for async handshake to complete
time.sleep(0.2)
connector.start_load_kv(dummy_ctx)
# For transfer_failed/transfer_exception, the error happens in
# get_finished() when checking transfer state
if needs_get_finished:
connector.get_finished(finished_req_ids=set())
finally:
nixl_logger.removeHandler(handler)
# Print logs for manual comparison between commits
error_logs = [r for r in captured_logs if r.levelno >= logging.ERROR]
print("\n" + "=" * 60)
print(f"CAPTURED ERROR LOGS for {failure_type}:")
print("=" * 60)
for i, record in enumerate(error_logs):
print(f"\n--- Log {i + 1} ---")
print(f"Message: {record.message}")
print("=" * 60 + "\n")
assert len(error_logs) >= 1, f"Expected at least one error log for {failure_type}"
# Verify structured logging output (new format)
# Check that at least one log matches the expected format
all_messages = [r.message for r in error_logs]
combined_logs = "\n".join(all_messages)
assert any("NIXL transfer failure" in msg for msg in all_messages), (
f"Expected structured log format with 'NIXL transfer failure' prefix "
f"for {failure_type}. Got: {all_messages}"
)
assert any("failure_type" in msg for msg in all_messages), (
f"Expected 'failure_type' in logs. Got: {all_messages}"
)
assert any("Context:" in msg for msg in all_messages), (
f"Expected 'Context:' in logs. Got: {all_messages}"
)
# Check that the expected failure_type appears in at least one log
# Note: handshake_failed also triggers handshake_setup_failed
assert failure_type in combined_logs or (
failure_type == "handshake_failed" and "handshake_setup_failed" in combined_logs
), f"Expected '{failure_type}' in logs. Got: {all_messages}"
@patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper",
FailingNixlWrapper,
)
def test_handshake_failure_returns_finished(default_vllm_config, dist_init):
"""Test that handshake failures mark blocks invalid and return via get_finished."""
vllm_config = create_vllm_config()
connector = NixlConnector(vllm_config, KVConnectorRole.WORKER)
connector.connector_worker = FakeNixlConnectorWorker(
vllm_config, connector.engine_id, hand_shake_latency=0.1
)
connector.connector_worker.nixl_wrapper.fail_handshake = True
request_id = "test_handshake_fail"
metadata = NixlConnectorMetadata()
metadata.add_new_req_to_recv(
request_id=request_id,
local_block_ids=[1, 2, 3],
kv_transfer_params={
"remote_block_ids": [4, 5, 6],
"remote_engine_id": FakeNixlConnectorWorker.REMOTE_ENGINE_ID,
"remote_request_id": f"prefill-{request_id}",
"remote_host": "localhost",
"remote_port": 1234,
"remote_tp_size": 1,
},
)
connector.bind_connector_metadata(metadata)
dummy_ctx = ForwardContext(
no_compile_layers={},
attn_metadata={},
virtual_engine=0,
slot_mapping={},
)
connector.start_load_kv(dummy_ctx)
# Wait for handshake to fail
time.sleep(0.3)
# Check that blocks were marked invalid
invalid_blocks = connector.get_block_ids_with_load_errors()
assert invalid_blocks == {1, 2, 3}
# Check that request appears in get_finished
_, done_recving = connector.get_finished(finished_req_ids=set())
assert request_id in done_recving
@patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper",
FailingNixlWrapper,
)
def test_transfer_setup_failure_returns_finished(default_vllm_config, dist_init):
"""Test that transfer setup failures mark blocks invalid
and return via get_finished."""
vllm_config = create_vllm_config()
connector = NixlConnector(vllm_config, KVConnectorRole.WORKER)
connector.connector_worker = FakeNixlConnectorWorker(
vllm_config, connector.engine_id, hand_shake_latency=0
)
connector.connector_worker.nixl_wrapper.fail_transfer_setup = True
request_id = "test_transfer_fail"
metadata = NixlConnectorMetadata()
metadata.add_new_req_to_recv(
request_id=request_id,
local_block_ids=[7, 8, 9],
kv_transfer_params={
"remote_block_ids": [10, 11, 12],
"remote_engine_id": FakeNixlConnectorWorker.REMOTE_ENGINE_ID,
"remote_request_id": f"prefill-{request_id}",
"remote_host": "localhost",
"remote_port": 1234,
"remote_tp_size": 1,
},
)
connector.bind_connector_metadata(metadata)
dummy_ctx = ForwardContext(
no_compile_layers={},
attn_metadata={},
virtual_engine=0,
slot_mapping={},
)
connector.start_load_kv(dummy_ctx)
# Wait for handshake to complete and process ready_requests
connector.bind_connector_metadata(NixlConnectorMetadata())
time.sleep(0.1)
connector.start_load_kv(dummy_ctx)
# check that blocks were marked invalid
invalid_blocks = connector.get_block_ids_with_load_errors()
assert invalid_blocks == {7, 8, 9}
# ensure request appears in get_finished
_, done_recving = connector.get_finished(finished_req_ids=set())
assert request_id in done_recving
@pytest.mark.parametrize(
"mismatch_type,config_overrides,version_override,should_fail,enforce_handshake_compat",
[
("vllm_version", {}, {"vllm_version": "0.6.1"}, True, True),
("nixl_connector_version", {}, {"connector_version": 37}, True, True),
("model_name", {"model": "facebook/opt-350m"}, {}, True, True),
("dtype", {"dtype": "bfloat16"}, {}, True, True),
("cache_dtype", {"cache_dtype": "fp8"}, {}, True, True),
("num_kv_heads", {"hf_overrides": {"num_key_value_heads": 8}}, {}, True, True),
(
"num_hidden_layers",
{"hf_overrides": {"num_hidden_layers": 24}},
{},
True,
True,
),
("hidden_size", {"hf_overrides": {"hidden_size": 1536}}, {}, True, True),
("block_size", {"block_size": 8}, {}, False, True),
("matching_config", {}, {}, False, True),
("escape_hatch", {"model": "facebook/opt-350m"}, {}, False, False),
],
)
@patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper",
FakeNixlWrapper,
)
def test_compatibility_hash_validation(
default_vllm_config,
dist_init,
mismatch_type,
config_overrides,
version_override,
should_fail,
enforce_handshake_compat,
):
"""
Test NIXL compatibility hash validation during handshake.
Parameters:
mismatch_type: description of what is being tested
config_overrides: dict of config to override for the remote instance
version_override: version dict e.g. {"vllm_version": "0.6.1"}
should_fail: whether the handshake should fail
enforce_handshake_compat: whether to enforce compatibility checking
"""
local_vllm_config = create_vllm_config(
model="facebook/opt-125m",
block_size=16,
kv_connector_extra_config={
"enforce_handshake_compat": enforce_handshake_compat
},
)
decode_connector = NixlConnector(local_vllm_config, KVConnectorRole.WORKER)
decode_worker = decode_connector.connector_worker
kv_cache_shape = decode_worker.attn_backend.get_kv_cache_shape(
num_blocks=2, block_size=16, num_kv_heads=4, head_size=64
)
shared_tensor = torch.zeros(*kv_cache_shape, dtype=torch.float16)
unique_tensor = torch.zeros(*kv_cache_shape, dtype=torch.float16)
kv_caches = {
"layer0": shared_tensor,
"layer1": unique_tensor,
"layer2": shared_tensor,
}
decode_connector.register_kv_caches(kv_caches)
remote_config_params: dict[str, Any] = {
"model": "facebook/opt-125m",
"block_size": 16,
**config_overrides,
}
remote_vllm_config = create_vllm_config(**remote_config_params)
with contextlib.ExitStack() as stack:
if "vllm_version" in version_override:
stack.enter_context(
patch("vllm.__version__", version_override["vllm_version"])
)
elif "connector_version" in version_override:
stack.enter_context(
patch.object(
nixl_connector,
"NIXL_CONNECTOR_VERSION",
version_override["connector_version"],
)
)
remote_hash = compute_nixl_compatibility_hash(
remote_vllm_config,
decode_worker.backend_name,
decode_worker.kv_topo.cross_layers_blocks,
)
prefill_block_size = config_overrides.get("block_size", 16)
prefill_metadata = NixlAgentMetadata(
engine_id=FakeNixlConnectorWorker.REMOTE_ENGINE_ID,
agent_metadata=FakeNixlWrapper.AGENT_METADATA,
kv_caches_base_addr=[0],
device_id=0,
num_blocks=1,
block_lens=[4096 * prefill_block_size], # slot_size * block_size
kv_cache_layout="HND",
block_size=prefill_block_size,
)
handshake_payload = NixlHandshakePayload(
compatibility_hash=remote_hash,
agent_metadata_bytes=msgspec.msgpack.encode(prefill_metadata),
)
# Mock ZMQ socket to return our handshake payload
mock_socket = MagicMock()
mock_socket.recv.return_value = msgspec.msgpack.encode(handshake_payload)
# Mock add_remote_agent to avoid actual NIXL operations
# Patch zmq_ctx to return our mock socket
with (
patch.object(decode_worker, "add_remote_agent", return_value="fake_agent"),
patch.object(nixl_connector, "zmq_ctx") as mock_zmq_ctx,
):
mock_zmq_ctx.return_value.__enter__.return_value = mock_socket
if should_fail:
with pytest.raises(RuntimeError, match="compatibility hash mismatch"):
decode_worker._nixl_handshake(
host="localhost",
port=1234,
remote_tp_size=1,
expected_engine_id=FakeNixlConnectorWorker.REMOTE_ENGINE_ID,
)
else:
result = decode_worker._nixl_handshake(
host="localhost",
port=1234,
remote_tp_size=1,
expected_engine_id=FakeNixlConnectorWorker.REMOTE_ENGINE_ID,
)
# Verify handshake returned agent mapping
assert isinstance(result, dict)
assert len(result) == 1
@pytest.mark.parametrize(
"error_scenario",
[
"handshake_decode_error",
"handshake_validation_error",
"metadata_decode_error",
"metadata_validation_error",
],
)
@patch(
"vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper",
FakeNixlWrapper,
)
def test_handshake_decode_errors(default_vllm_config, dist_init, error_scenario):
"""
Test that msgspec decode errors are properly handled during handshake.
Tests both DecodeError and ValidationError for both decoders:
- NixlHandshakePayload decoder
- NixlAgentMetadata decoder
"""
local_vllm_config = create_vllm_config(
model="facebook/opt-125m",
block_size=16,
)
decode_connector = NixlConnector(local_vllm_config, KVConnectorRole.WORKER)
decode_worker = decode_connector.connector_worker
backend = get_current_attn_backend(local_vllm_config)
test_shape = backend.get_kv_cache_shape(
num_blocks=1, block_size=16, num_kv_heads=1, head_size=1
)
decode_worker.kv_topo = TpKVTopology(
tp_rank=decode_worker.tp_rank,
engine_id=decode_worker.engine_id,
remote_tp_size=decode_worker._tp_size, # shared state
remote_block_size=decode_worker._block_size, # shared state
is_mla=decode_worker.use_mla,
total_num_kv_heads=decode_worker.model_config.get_total_num_kv_heads(),
attn_backend=backend,
tensor_shape=test_shape,
)
decode_worker.compat_hash = compute_nixl_compatibility_hash(
decode_worker.vllm_config,
decode_worker.backend_name,
decode_worker.kv_topo.cross_layers_blocks,
)
if error_scenario == "handshake_decode_error":
msg_bytes = b"this is not valid msgpack data"
elif error_scenario == "handshake_validation_error":
msg_bytes = msgspec.msgpack.encode({"wrong_field": "value"})
elif error_scenario == "metadata_decode_error":
valid_handshake = NixlHandshakePayload(
compatibility_hash=decode_worker.compat_hash,
agent_metadata_bytes=b"invalid msgpack for metadata",
)
msg_bytes = msgspec.msgpack.encode(valid_handshake)
elif error_scenario == "metadata_validation_error":
valid_handshake = NixlHandshakePayload(
compatibility_hash=decode_worker.compat_hash,
agent_metadata_bytes=msgspec.msgpack.encode({"missing": "fields"}),
)
msg_bytes = msgspec.msgpack.encode(valid_handshake)
else:
raise AssertionError(f"{error_scenario} not a valid scenario")
mock_socket = MagicMock()
mock_socket.recv.return_value = msg_bytes
with (
patch.object(decode_worker, "add_remote_agent", return_value="fake_agent"),
patch.object(nixl_connector, "zmq_ctx") as mock_zmq_ctx,
):
mock_zmq_ctx.return_value.__enter__.return_value = mock_socket
with pytest.raises(RuntimeError):
decode_worker._nixl_handshake(
host="localhost",
port=1234,
remote_tp_size=1,
expected_engine_id=FakeNixlConnectorWorker.REMOTE_ENGINE_ID,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/kv_connector/unit/test_nixl_connector.py",
"license": "Apache License 2.0",
"lines": 2021,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/v1/kv_connector/unit/test_remote_decode_lifecycle.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import copy
import pytest
from vllm.v1.outputs import EMPTY_MODEL_RUNNER_OUTPUT, KVConnectorOutput
from vllm.v1.request import FinishReason, RequestStatus
from .utils import (
assert_scheduler_empty,
create_model_runner_output,
create_request,
create_scheduler,
create_vllm_config,
)
pytestmark = pytest.mark.cpu_test
def test_basic_lifecycle():
"""Test lifecycle of a Remote Decode request."""
vllm_config = create_vllm_config()
scheduler = create_scheduler(vllm_config)
# 2 Full Blocks and 1 Half Block.
BLOCK_SIZE = vllm_config.cache_config.block_size
NUM_EXTERNAL_FULL_BLOCKS = 2
NUM_TOKENS = int(BLOCK_SIZE * (NUM_EXTERNAL_FULL_BLOCKS + 0.5))
request = create_request(
request_id=1,
block_size=BLOCK_SIZE,
max_tokens=1,
num_tokens=NUM_TOKENS,
do_remote_decode=True,
)
scheduler.add_request(request)
request_id = request.request_id
# STEP (1): Prefill.
# (1a): schedule()
scheduler_output = scheduler.schedule()
assert len(scheduler.requests) == 1
assert len(scheduler.running) == 1
assert len(scheduler_output.scheduled_new_reqs) == 1
# (1b): execute_model()
model_runner_output = create_model_runner_output(reqs=[request])
# (1c): update_from_output()
engine_core_outputs = scheduler.update_from_output(
scheduler_output, model_runner_output
)
# Ensure the request is finished after 1 token.
assert request.is_finished()
assert request.status == RequestStatus.FINISHED_LENGTH_CAPPED
output = engine_core_outputs[0].outputs[0]
assert output.finish_reason == FinishReason.LENGTH
assert output.kv_transfer_params is not None
# Request freed in Scheduler and in Persistent Batch ...
assert request_id in scheduler.finished_req_ids
assert len(scheduler.running) == 0
assert len(scheduler.waiting) == 0
# ... but blocks should not be freed.
assert len(scheduler.requests) == 1
blocks = scheduler.kv_cache_manager.coordinator.single_type_managers[
0
].req_to_blocks[request_id]
for block in blocks:
assert block.ref_cnt == 1
# STEP (2): Send Finished to PB.
# (2a): schedule() - pass finished request to PB.
scheduler_output = scheduler.schedule()
assert len(scheduler.requests) == 1
assert len(scheduler.running) == 0
assert len(scheduler_output.finished_req_ids) == 1
assert request_id in scheduler_output.finished_req_ids
assert len(scheduler_output.scheduled_new_reqs) == 0
assert scheduler_output.scheduled_cached_reqs.num_reqs == 0
assert len(scheduler.finished_req_ids) == 0
# (2b): execute_model()
model_runner_output = EMPTY_MODEL_RUNNER_OUTPUT
# (2c): update_from_output()
scheduler.update_from_output(scheduler_output, model_runner_output)
# STEP (3): Finished sending.
# (3a): schedule() - pass finished request to PB.
scheduler_output = scheduler.schedule()
assert len(scheduler.requests) == 1
assert len(scheduler.running) == 0
assert len(scheduler_output.finished_req_ids) == 0
assert len(scheduler_output.scheduled_new_reqs) == 0
assert scheduler_output.scheduled_cached_reqs.num_reqs == 0
assert len(scheduler.finished_req_ids) == 0
# (3b): execute_model()
model_runner_output = copy.deepcopy(EMPTY_MODEL_RUNNER_OUTPUT)
model_runner_output.kv_connector_output = KVConnectorOutput(
finished_sending={request_id}
)
# (3c): update_from_output()
scheduler.update_from_output(scheduler_output, model_runner_output)
# Confirm we do not have any memory leaks after req lifecycle.
assert_scheduler_empty(scheduler)
def test_short_prompt_lifecycle():
"""Test lifecycle of a Remote Decode request with short prompt."""
vllm_config = create_vllm_config()
scheduler = create_scheduler(vllm_config)
# Not enough tokens for full block.
BLOCK_SIZE = vllm_config.cache_config.block_size
NUM_TOKENS = BLOCK_SIZE // 2
request = create_request(
request_id=1,
block_size=BLOCK_SIZE,
max_tokens=1,
num_tokens=NUM_TOKENS,
do_remote_decode=True,
)
scheduler.add_request(request)
# STEP (1): Prefill.
# (1a): schedule()
scheduler_output = scheduler.schedule()
assert len(scheduler.requests) == 1
assert len(scheduler.running) == 1
assert len(scheduler_output.scheduled_new_reqs) == 1
# (1b): execute_model()
model_runner_output = create_model_runner_output(reqs=[request])
# (1c): update_from_output()
# Even though tokens < block_size, there will be kv xfer for partial block.
eco = scheduler.update_from_output(scheduler_output, model_runner_output)
kv_transfer_params = eco[0].outputs[0].kv_transfer_params
assert len(kv_transfer_params["remote_block_ids"]) == 1
# Confirm we do not have any memory leaks after req lifecycle.
# We need to mark sending finish to clear data for persistent batch.
scheduler_output = scheduler.schedule()
# Use create_model_runner_output to pass kv_connector_output along
model_runner_output = create_model_runner_output(
reqs=[request], finished_sending={request.request_id}
)
scheduler.update_from_output(scheduler_output, model_runner_output)
assert_scheduler_empty(scheduler)
def test_prefix_cache_lifecycle():
"""Test that remote decode params still work with a prefix cache hit."""
vllm_config = create_vllm_config()
scheduler = create_scheduler(vllm_config)
# Prime the KVCache.
BLOCK_SIZE = vllm_config.cache_config.block_size
NUM_EXTERNAL_FULL_BLOCKS = 3
NUM_TOKENS = int(BLOCK_SIZE * (NUM_EXTERNAL_FULL_BLOCKS + 0.5))
request_normal = create_request(
request_id=1, block_size=BLOCK_SIZE, num_tokens=NUM_TOKENS
)
scheduler.add_request(request_normal)
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(
reqs=[request_normal], use_eos=True
)
scheduler.update_from_output(scheduler_output, model_runner_output)
scheduler_output = scheduler.schedule()
scheduler.update_from_output(scheduler_output, EMPTY_MODEL_RUNNER_OUTPUT)
#####################
# Actual Test: confirm we send all blocks.
# Step (1): Send the KV Transfer.
NUM_EXTERNAL_FULL_BLOCKS -= 1
NUM_TOKENS = int(BLOCK_SIZE * (NUM_EXTERNAL_FULL_BLOCKS + 0.5))
request_remote = create_request(
request_id=1,
block_size=BLOCK_SIZE,
num_tokens=NUM_TOKENS,
do_remote_decode=True,
)
scheduler.add_request(request_remote)
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(reqs=[request_remote])
eco = scheduler.update_from_output(scheduler_output, model_runner_output)
kv_transfer_params = eco[0].outputs[0].kv_transfer_params
# Ensure we send all block ids, including the partial blocks,
# even if there is a cache hit.
assert len(kv_transfer_params["remote_block_ids"]) == (NUM_EXTERNAL_FULL_BLOCKS + 1)
# STEP (2): Ensure it is freed.
scheduler_output = scheduler.schedule()
model_runner_output = copy.deepcopy(EMPTY_MODEL_RUNNER_OUTPUT)
model_runner_output.kv_connector_output = KVConnectorOutput(
finished_sending={request_remote.request_id}
)
scheduler.update_from_output(scheduler_output, model_runner_output)
assert_scheduler_empty(scheduler)
def test_abort_during_kv_transfer():
"""Test aborting request does not release blocks for remote decode."""
vllm_config = create_vllm_config()
scheduler = create_scheduler(vllm_config)
# Prime the KVCache.
BLOCK_SIZE = vllm_config.cache_config.block_size
NUM_EXTERNAL_FULL_BLOCKS = 2
NUM_TOKENS = int(BLOCK_SIZE * (NUM_EXTERNAL_FULL_BLOCKS + 0.5))
request = create_request(
request_id=1,
block_size=BLOCK_SIZE,
num_tokens=NUM_TOKENS,
do_remote_decode=True,
)
scheduler.add_request(request)
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(reqs=[request])
scheduler.update_from_output(scheduler_output, model_runner_output)
scheduler_output = scheduler.schedule()
scheduler.update_from_output(scheduler_output, EMPTY_MODEL_RUNNER_OUTPUT)
# Request removed from PB but blocks should not be freed.
assert len(scheduler.requests) == 1
# Abort the request, and check the blocks are still not freed
scheduler.finish_requests([request.request_id], RequestStatus.FINISHED_ABORTED)
assert len(scheduler.requests) == 1
# Simulate a finished sending notification
scheduler_output = scheduler.schedule()
model_runner_output = copy.deepcopy(EMPTY_MODEL_RUNNER_OUTPUT)
model_runner_output.kv_connector_output = KVConnectorOutput(
finished_sending=[request.request_id]
)
scheduler.update_from_output(scheduler_output, model_runner_output)
assert_scheduler_empty(scheduler)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/kv_connector/unit/test_remote_decode_lifecycle.py",
"license": "Apache License 2.0",
"lines": 208,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/v1/kv_connector/unit/test_remote_prefill_lifecycle.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import copy
import pytest
from vllm.v1.outputs import EMPTY_MODEL_RUNNER_OUTPUT, KVConnectorOutput
from vllm.v1.request import FinishReason, RequestStatus
from .utils import (
assert_scheduler_empty,
create_model_runner_output,
create_request,
create_scheduler,
create_vllm_config,
)
pytestmark = pytest.mark.cpu_test
def test_basic_lifecycle():
"""Test lifecycle of a remote prefill."""
vllm_config = create_vllm_config()
scheduler = create_scheduler(vllm_config)
# 2 Full Blocks and 1 Half Block.
BLOCK_SIZE = vllm_config.cache_config.block_size
NUM_EXTERNAL_FULL_BLOCKS = 2
NUM_TOKENS = int(BLOCK_SIZE * (NUM_EXTERNAL_FULL_BLOCKS + 0.5))
START_FREE_BLOCK_QUEUE_SIZE = (
scheduler.kv_cache_manager.block_pool.free_block_queue.num_free_blocks
)
request = create_request(
request_id=1,
block_size=BLOCK_SIZE,
num_tokens=NUM_TOKENS,
do_remote_prefill=True,
)
scheduler.add_request(request)
request_id = request.request_id
# STEP (1):
# (1a): schedule()
scheduler_output = scheduler.schedule()
# Nothing running and empty scheduler output.
assert len(scheduler.running) == 0
assert len(scheduler_output.scheduled_new_reqs) == 0
assert scheduler_output.scheduled_cached_reqs.num_reqs == 0
assert len(scheduler_output.num_scheduled_tokens) == 0
assert scheduler_output.total_num_scheduled_tokens == 0
# Req waiting for KVs with no computed/scheduled toks ...
assert len(scheduler.waiting) == 1
assert request in scheduler.waiting
assert request.status == RequestStatus.WAITING_FOR_REMOTE_KVS
assert request.num_computed_tokens == 0
# ... but should have (uncached) blocks allocated to it.
block_pool = scheduler.kv_cache_manager.block_pool
assert block_pool.free_block_queue.num_free_blocks < START_FREE_BLOCK_QUEUE_SIZE
assert len(block_pool.cached_block_hash_to_block) == 0
blocks = scheduler.kv_cache_manager.coordinator.single_type_managers[
0
].req_to_blocks[request_id]
for block in blocks:
assert block._block_hash is None
# (1b): forward()
model_runner_output = EMPTY_MODEL_RUNNER_OUTPUT
# (1c): update_from_output()
engine_core_outputs = scheduler.update_from_output(
scheduler_output, model_runner_output
)
assert not engine_core_outputs or not engine_core_outputs[0].outputs
# STEP (2):
# (2a): schedule(): nothing happens!
scheduler_output = scheduler.schedule()
assert len(scheduler.waiting) == 1
assert len(scheduler.running) == 0
# (2b): forward(): request finishes recv.
model_runner_output = copy.deepcopy(EMPTY_MODEL_RUNNER_OUTPUT)
model_runner_output.kv_connector_output = KVConnectorOutput(
finished_recving={request_id}
)
# (2c): update_from_output():
engine_core_outputs = scheduler.update_from_output(
scheduler_output, model_runner_output
)
assert len(scheduler.waiting) == 1
assert request_id in scheduler.finished_recving_kv_req_ids
# STEP (3):
# (3a): schedule(): this should actually schedule.
scheduler_output = scheduler.schedule()
assert len(scheduler.running) == 1
# Confirm the block are actually allocated.
num_hashed_blocks = 0
blocks = scheduler.kv_cache_manager.coordinator.single_type_managers[
0
].req_to_blocks[request_id]
for block in blocks:
assert block.ref_cnt == 1
num_hashed_blocks += 1 if block._block_hash is not None else 0
assert num_hashed_blocks == NUM_EXTERNAL_FULL_BLOCKS
# Confirm the rest of the prompt is scheduled in this step.
scheduled_req = scheduler_output.scheduled_new_reqs[0]
num_scheduled_tokens = scheduler_output.num_scheduled_tokens[request_id]
num_computed_tokens = scheduled_req.num_computed_tokens
total_prompt_tokens = len(scheduled_req.prompt_token_ids)
assert num_scheduled_tokens == total_prompt_tokens - num_computed_tokens
# (3b): execute_model()
model_runner_output = create_model_runner_output([request])
# (3c): update_from_output()
scheduler.update_from_output(scheduler_output, model_runner_output)
# Step (4): Hit EOS.
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output([request], use_eos=True)
engine_core_outputs = scheduler.update_from_output(
scheduler_output, model_runner_output
)
scheduler.schedule()
outputs = engine_core_outputs[0].outputs
assert len(outputs) == 1
output = outputs[0]
assert output.finish_reason == FinishReason.STOP
assert_scheduler_empty(scheduler)
def test_interleaved_lifecycle():
"""Test Remote Prefills Work Well With Other Requests."""
vllm_config = create_vllm_config()
scheduler = create_scheduler(vllm_config)
# 2 Full Blocks and 1 Half Block.
BLOCK_SIZE = vllm_config.cache_config.block_size
NUM_EXTERNAL_FULL_BLOCKS = 2
NUM_TOKENS = int(BLOCK_SIZE * (NUM_EXTERNAL_FULL_BLOCKS + 0.5))
request_remote = create_request(
request_id=1,
block_size=BLOCK_SIZE,
num_tokens=NUM_TOKENS,
do_remote_prefill=True,
)
request_local_a = create_request(
request_id=2,
block_size=BLOCK_SIZE,
num_tokens=NUM_TOKENS,
)
request_local_b = create_request(
request_id=3,
block_size=BLOCK_SIZE,
num_tokens=NUM_TOKENS,
)
# STEP 1: Regular request is running.
scheduler.add_request(request_local_a)
scheduler_output = scheduler.schedule()
assert len(scheduler.running) == 1
model_runner_output = create_model_runner_output([request_local_a])
scheduler.update_from_output(scheduler_output, model_runner_output)
# STEP 2: Add a local and remote request.
scheduler.add_request(request_local_b)
scheduler.add_request(request_remote)
scheduler_output = scheduler.schedule()
assert len(scheduler.running) == 2
assert len(scheduler.waiting) == 1
assert len(scheduler_output.scheduled_new_reqs) == 1
assert scheduler_output.scheduled_cached_reqs.num_reqs == 1
model_runner_output = create_model_runner_output([request_local_a, request_local_b])
scheduler.update_from_output(scheduler_output, model_runner_output)
# STEP 3: continue running, KVs not arrived yet.
scheduler_output = scheduler.schedule()
assert len(scheduler.running) == 2
assert len(scheduler.waiting) == 1
assert len(scheduler_output.scheduled_new_reqs) == 0
assert scheduler_output.scheduled_cached_reqs.num_reqs == 2
model_runner_output = create_model_runner_output(
reqs=[request_local_a, request_local_b]
)
scheduler.update_from_output(scheduler_output, model_runner_output)
assert len(scheduler.running) == 2
assert len(scheduler.waiting) == 1
assert len(scheduler_output.scheduled_new_reqs) == 0
assert scheduler_output.scheduled_cached_reqs.num_reqs == 2
# STEP 4: KVs arrive.
scheduler_output = scheduler.schedule()
assert len(scheduler.running) == 2
assert len(scheduler.waiting) == 1
assert len(scheduler_output.scheduled_new_reqs) == 0
assert scheduler_output.scheduled_cached_reqs.num_reqs == 2
model_runner_output = create_model_runner_output(
[request_local_a, request_local_b], finished_recving={request_remote.request_id}
)
scheduler.update_from_output(scheduler_output, model_runner_output)
# STEP 5: RECVed KVs are sent to ModelRunner.
scheduler_output = scheduler.schedule()
assert len(scheduler.running) == 3
assert len(scheduler.waiting) == 0
assert len(scheduler_output.scheduled_new_reqs) == 1
assert scheduler_output.scheduled_cached_reqs.num_reqs == 2
model_runner_output = create_model_runner_output(
[request_local_a, request_local_b, request_remote]
)
scheduler.update_from_output(scheduler_output, model_runner_output)
# STEP 6: Hit EOS and free.
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(
[request_local_a, request_local_b, request_remote],
use_eos=True,
)
scheduler.update_from_output(scheduler_output, model_runner_output)
scheduler.schedule()
assert_scheduler_empty(scheduler)
def test_no_spurious_prefix_caching():
"""
With P/D, blocks can be allocated but uncomputed for
multiple engine steps. This test confirms that we do
not accidentally have cache hits against uncomputed
blocks.
"""
vllm_config = create_vllm_config()
scheduler = create_scheduler(vllm_config)
vllm_config = create_vllm_config()
scheduler = create_scheduler(vllm_config)
# 2 and a half full external blocks.
BLOCK_SIZE = vllm_config.cache_config.block_size
NUM_EXTERNAL_FULL_BLOCKS = 2
NUM_TOKENS = int(BLOCK_SIZE * (NUM_EXTERNAL_FULL_BLOCKS + 0.5))
# Both of these requests have prompts like [1,1,1,1,1, ...]
request_remote = create_request(
request_id=1,
block_size=BLOCK_SIZE,
num_tokens=NUM_TOKENS,
common_prefix_len=NUM_TOKENS,
do_remote_prefill=True,
)
request_local = create_request(
request_id=2,
block_size=BLOCK_SIZE,
num_tokens=NUM_TOKENS,
common_prefix_len=NUM_TOKENS,
do_remote_prefill=False,
)
# Schedule the remote prefill request. This should not
# cause any blocks to be cached.
scheduler.add_request(request_remote)
scheduler_output = scheduler.schedule()
scheduler.update_from_output(scheduler_output, EMPTY_MODEL_RUNNER_OUTPUT)
assert len(scheduler.waiting) == 1
# Schedule the local prefill request. This should
# cause blocks to be cached, but separately from
scheduler.add_request(request_local)
scheduler_output = scheduler.schedule()
assert len(scheduler.running) == 1
assert len(scheduler.waiting) == 1
local_blocks = scheduler.kv_cache_manager.coordinator.single_type_managers[
0
].req_to_blocks[request_local.request_id]
remote_blocks = scheduler.kv_cache_manager.coordinator.single_type_managers[
0
].req_to_blocks[request_remote.request_id]
# Local should have cached blocks (but not all due to preallocate).
num_hashed_blocks = 0
for block in local_blocks:
assert block.ref_cnt == 1
num_hashed_blocks += 1 if block._block_hash is not None else 0
assert num_hashed_blocks > 0
# Remote blocks should not be cached.
for block in remote_blocks:
assert block.ref_cnt == 1
assert block._block_hash is None
def test_full_block_prompt():
"""Test that we handle a prompt that is the full block size."""
vllm_config = create_vllm_config()
scheduler = create_scheduler(vllm_config)
# 2 Full Blocks and 1 Half Block.
BLOCK_SIZE = vllm_config.cache_config.block_size
NUM_EXTERNAL_FULL_BLOCKS = 2
NUM_TOKENS = int(BLOCK_SIZE * NUM_EXTERNAL_FULL_BLOCKS)
request = create_request(
request_id=1,
block_size=BLOCK_SIZE,
num_tokens=NUM_TOKENS,
do_remote_prefill=True,
)
scheduler.add_request(request)
request_id = request.request_id
# STEP (1): Initialize a recv.
scheduler_output = scheduler.schedule()
# All blocks should be allocated.
num_blocks = len(
scheduler.kv_cache_manager.coordinator.single_type_managers[0].req_to_blocks[
request_id
]
)
assert num_blocks == NUM_EXTERNAL_FULL_BLOCKS
model_runner_output = EMPTY_MODEL_RUNNER_OUTPUT
scheduler.update_from_output(scheduler_output, model_runner_output)
# # STEP (2): Recv.
scheduler_output = scheduler.schedule()
model_runner_output = copy.deepcopy(EMPTY_MODEL_RUNNER_OUTPUT)
model_runner_output.kv_connector_output = KVConnectorOutput(
finished_recving={request_id}
)
scheduler.update_from_output(scheduler_output, model_runner_output)
assert len(scheduler.waiting) == 1
assert request_id in scheduler.finished_recving_kv_req_ids
# # STEP (3): Run as usual.
scheduler_output = scheduler.schedule()
# We need to recompute the final token of the prompt to generate
# the first new token, so we should not have a new block.
num_blocks = len(
scheduler.kv_cache_manager.coordinator.single_type_managers[0].req_to_blocks[
request_id
]
)
assert num_blocks == NUM_EXTERNAL_FULL_BLOCKS
assert scheduler_output.scheduled_new_reqs[0].num_computed_tokens == NUM_TOKENS - 1
assert scheduler_output.num_scheduled_tokens[request_id] == 1
model_runner_output = create_model_runner_output([request])
scheduler.update_from_output(scheduler_output, model_runner_output)
# # Step (4): Hit EOS.
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output([request], use_eos=True)
engine_core_outputs = scheduler.update_from_output(
scheduler_output, model_runner_output
)
scheduler.schedule()
outputs = engine_core_outputs[0].outputs
assert len(outputs) == 1
output = outputs[0]
assert output.finish_reason == FinishReason.STOP
assert_scheduler_empty(scheduler)
def test_cannot_schedule_after_recv():
"""
Test that we can handle no schedule after recv due to not
enough remaining KV blocks.
"""
# NOTE: the KVCacheManager will use 1 null block.
# So there are 5 total working blocks.
TOTAL_NUM_BLOCKS = 6
vllm_config = create_vllm_config()
scheduler = create_scheduler(vllm_config, num_blocks=TOTAL_NUM_BLOCKS)
# Prime the KVCache.
NUM_PROMPT_BLOCKS = 2
BLOCK_SIZE = vllm_config.cache_config.block_size
# Prompt will use 2 blocks + 1 block after we schedule.
NUM_TOKENS_LOCAL = int(BLOCK_SIZE * NUM_PROMPT_BLOCKS)
NUM_TOKENS_REMOTE = int(BLOCK_SIZE * NUM_PROMPT_BLOCKS)
request_normal = create_request(
request_id=1, block_size=BLOCK_SIZE, num_tokens=NUM_TOKENS_LOCAL
)
request_remote = create_request(
request_id=2,
block_size=BLOCK_SIZE,
num_tokens=NUM_TOKENS_REMOTE,
do_remote_prefill=True,
)
# STEP 1: 3 blocks are in use (2 for prompt, 1 for decode).
scheduler.add_request(request_normal)
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(reqs=[request_normal])
scheduler.update_from_output(scheduler_output, model_runner_output)
assert len(scheduler.running) == 1
assert len(scheduler.waiting) == 0
# Step 2: 5 blocks are in use (2 new for remote blocks).
scheduler.add_request(request_remote)
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(reqs=[request_normal])
scheduler.update_from_output(scheduler_output, model_runner_output)
assert len(scheduler.running) == 1
assert len(scheduler.waiting) == 1
# Step 3: finish recving (5 blocks in use)
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(
reqs=[request_normal], finished_recving={request_remote.request_id}
)
scheduler.update_from_output(scheduler_output, model_runner_output)
assert len(scheduler.running) == 1
assert len(scheduler.waiting) == 1
# Step 4: try to schedule, remote request is put to running list
# because the transfer is completed.
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(
reqs=[request_normal, request_remote]
)
scheduler.update_from_output(scheduler_output, model_runner_output)
assert len(scheduler.running) == 2
assert len(scheduler.waiting) == 0
# Step 5: Remote request will be put back to waiting list
# because it needs new block to hold generated token.
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(reqs=[request_normal])
scheduler.update_from_output(scheduler_output, model_runner_output)
assert len(scheduler.running) == 1
assert len(scheduler.waiting) == 1
# Step 6: finish the request, free it.
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(
reqs=[request_normal], use_eos=True
)
scheduler.update_from_output(scheduler_output, model_runner_output)
assert len(scheduler.running) == 0
assert len(scheduler.waiting) == 1
# Step 7: now we can schedule (with 2 blocks computed),
# request is retrieved from preempted list.
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(reqs=[request_remote])
assert (
scheduler_output.scheduled_cached_reqs.num_computed_tokens[0]
== NUM_PROMPT_BLOCKS * BLOCK_SIZE
)
scheduler.update_from_output(scheduler_output, model_runner_output)
assert len(scheduler.running) == 1
assert len(scheduler.waiting) == 0
# Step 8: free everything.
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(
reqs=[request_remote], use_eos=True
)
scheduler.update_from_output(scheduler_output, model_runner_output)
_ = scheduler.schedule()
assert_scheduler_empty(scheduler)
def test_cannot_recv():
"""
Test that we can handle no schedule KV block transfer due to not
enough remaining KV blocks.
"""
# NOTE: the KVCacheManager will use 1 null block.
# So there are 5 total working blocks.
TOTAL_NUM_BLOCKS = 6
vllm_config = create_vllm_config()
scheduler = create_scheduler(vllm_config, num_blocks=TOTAL_NUM_BLOCKS)
# Prime the KVCache.
NUM_PROMPT_BLOCKS = 2
BLOCK_SIZE = vllm_config.cache_config.block_size
# Prompt will use 2 blocks + 1 block after we schedule.
NUM_TOKENS_LOCAL = int(BLOCK_SIZE * NUM_PROMPT_BLOCKS)
NUM_TOKENS_REMOTE = int(BLOCK_SIZE * (NUM_PROMPT_BLOCKS + 0.5))
request_normal = create_request(
request_id=1, block_size=BLOCK_SIZE, num_tokens=NUM_TOKENS_LOCAL
)
request_remote = create_request(
request_id=2,
block_size=BLOCK_SIZE,
num_tokens=NUM_TOKENS_REMOTE,
do_remote_prefill=True,
)
# STEP 1: 3 blocks are in use (2 for prompt, 1 for decode).
scheduler.add_request(request_normal)
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(reqs=[request_normal])
scheduler.update_from_output(scheduler_output, model_runner_output)
assert len(scheduler.running) == 1
assert len(scheduler.waiting) == 0
# Step 2: 3 blocks are in use,
# need 3 new for remote blocks but only 2 are available.
scheduler.add_request(request_remote)
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(reqs=[request_normal])
scheduler.update_from_output(scheduler_output, model_runner_output)
assert len(scheduler.running) == 1
assert len(scheduler.waiting) == 1
# Should not have KV transfer in progress.
assert request_remote.status != RequestStatus.WAITING_FOR_REMOTE_KVS
# Step 3: finish the request, free it.
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(
reqs=[request_normal], use_eos=True
)
scheduler.update_from_output(scheduler_output, model_runner_output)
assert len(scheduler.running) == 0
assert len(scheduler.waiting) == 1
# Step 4: now we can initiate KV transfer (with 2 blocks computed).
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(reqs=[])
scheduler.update_from_output(scheduler_output, model_runner_output)
assert len(scheduler.running) == 0
assert len(scheduler.waiting) == 1
assert request_remote.status == RequestStatus.WAITING_FOR_REMOTE_KVS
# Step 5: finish recving (5 blocks in use)
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(
reqs=[], finished_recving={request_remote.request_id}
)
scheduler.update_from_output(scheduler_output, model_runner_output)
assert len(scheduler.running) == 0
assert len(scheduler.waiting) == 1
# Step 6: schedule remote request
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(reqs=[request_remote])
scheduler.update_from_output(scheduler_output, model_runner_output)
assert len(scheduler.running) == 1
assert len(scheduler.waiting) == 0
# Step 7: free everything.
scheduler_output = scheduler.schedule()
model_runner_output = create_model_runner_output(
reqs=[request_remote], use_eos=True
)
scheduler.update_from_output(scheduler_output, model_runner_output)
_ = scheduler.schedule()
assert_scheduler_empty(scheduler)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/kv_connector/unit/test_remote_prefill_lifecycle.py",
"license": "Apache License 2.0",
"lines": 486,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/v1/kv_connector/unit/utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import tempfile
from collections import defaultdict
from collections.abc import Callable
from dataclasses import dataclass
from itertools import chain, count
from typing import Any, Literal
import torch
from vllm import SamplingParams
from vllm.config import (
AttentionConfig,
CacheConfig,
DeviceConfig,
KVTransferConfig,
ModelConfig,
SchedulerConfig,
VllmConfig,
)
from vllm.distributed.kv_transfer.kv_connector.factory import KVConnectorFactory
from vllm.distributed.kv_transfer.kv_connector.v1.base import (
KVConnectorBase_V1,
KVConnectorMetadata,
KVConnectorRole,
)
from vllm.distributed.kv_transfer.kv_connector.v1.example_connector import ( # noqa
ExampleConnector,
)
from vllm.utils.hashing import sha256
from vllm.v1.core.kv_cache_manager import KVCacheBlocks
from vllm.v1.core.kv_cache_utils import get_request_block_hasher, init_none_hash
from vllm.v1.core.sched.scheduler import Scheduler, SchedulerOutput
from vllm.v1.kv_cache_interface import (
FullAttentionSpec,
KVCacheConfig,
KVCacheGroupSpec,
)
from vllm.v1.outputs import KVConnectorOutput, ModelRunnerOutput
from vllm.v1.request import Request
from vllm.v1.structured_output import StructuredOutputManager
EOS_TOKEN_ID = 50256
def assert_scheduler_empty(scheduler: Scheduler):
"""Confirm the scheduler is "empty" - i.e. no leaks."""
# Scheduler Metadata.
assert len(scheduler.requests) == 0
assert len(scheduler.waiting) == 0
assert len(scheduler.running) == 0
assert len(scheduler.finished_req_ids) == 0
assert len(scheduler.finished_recving_kv_req_ids) == 0
# EncoderCacheManager.
assert len(scheduler.encoder_cache_manager.freed) == 0
assert len(scheduler.encoder_cache_manager.cached) == 0
# KVCache Manager.
assert (
len(
scheduler.kv_cache_manager.coordinator.single_type_managers[0].req_to_blocks
)
== 0
)
assert (
len(
scheduler.kv_cache_manager.coordinator.single_type_managers[
0
].num_cached_block
)
== 0
)
num_free_blocks = (
scheduler.kv_cache_manager.block_pool.free_block_queue.num_free_blocks
)
assert num_free_blocks == (scheduler.kv_cache_manager.block_pool.num_gpu_blocks - 1)
# NOTE(rob): just the ref count on blocks will be 0. The hash
# value, etc will remain since we lazily evict for prefix cache.
for block in scheduler.kv_cache_manager.block_pool.blocks:
assert block.ref_cnt == 0
def create_vllm_config(
model: str = "facebook/opt-125m",
max_num_seqs: int = 16,
max_num_batched_tokens: int = 64,
block_size: int = 16,
max_model_len: int = 10000,
enable_chunked_prefill: bool = True,
enable_permute_local_kv: bool = False,
kv_connector_extra_config: dict[str, Any] | None = None,
dtype: str = "float16",
cache_dtype: str = "auto",
hf_overrides: dict[str, Any] | None = None,
attention_backend: str | None = None,
kv_load_failure_policy: Literal["recompute", "fail"] = "fail",
) -> VllmConfig:
"""Initialize VllmConfig For Testing."""
model_config = ModelConfig(
model=model,
trust_remote_code=True,
dtype=dtype,
seed=42,
hf_overrides=hf_overrides or {},
)
scheduler_config = SchedulerConfig(
max_num_seqs=max_num_seqs,
max_num_batched_tokens=max_num_batched_tokens,
max_model_len=max_model_len,
enable_chunked_prefill=enable_chunked_prefill,
is_encoder_decoder=model_config.is_encoder_decoder,
)
# Cache config, optionally force APC
cache_config = CacheConfig(
block_size=block_size,
gpu_memory_utilization=0.9,
swap_space=0,
cache_dtype=cache_dtype,
enable_prefix_caching=True,
)
kv_transfer_config = KVTransferConfig(
kv_connector="NixlConnector",
kv_role="kv_both",
enable_permute_local_kv=enable_permute_local_kv,
kv_connector_extra_config=kv_connector_extra_config or {},
kv_load_failure_policy=kv_load_failure_policy,
)
attention_config = AttentionConfig(backend=attention_backend)
return VllmConfig(
scheduler_config=scheduler_config,
model_config=model_config,
cache_config=cache_config,
kv_transfer_config=kv_transfer_config,
device_config=DeviceConfig("cpu"),
attention_config=attention_config,
)
def create_scheduler(
vllm_config: VllmConfig,
num_blocks: int = 10000,
) -> Scheduler:
"""Initialize Scheduler For Testing."""
block_size = vllm_config.cache_config.block_size
kv_cache_config = KVCacheConfig(
num_blocks=num_blocks, # A large number of blocks to hold all requests
kv_cache_tensors=[],
kv_cache_groups=[
KVCacheGroupSpec(
["layer"],
FullAttentionSpec(
block_size=block_size,
num_kv_heads=1,
head_size=1,
dtype=torch.float32,
),
)
],
)
vllm_config.cache_config.num_gpu_blocks = num_blocks
return Scheduler(
vllm_config=vllm_config,
kv_cache_config=kv_cache_config,
log_stats=True,
structured_output_manager=StructuredOutputManager(vllm_config),
block_size=block_size,
)
_request_count = count(1)
_none_hash_initialized = False
def create_request(
request_id: int | None = None,
num_tokens: int = 10,
common_prefix_len=0,
max_tokens: int = 16,
do_remote_decode: bool = False,
do_remote_prefill: bool = False,
num_remote_blocks: int = 3,
block_size: int = 16,
hash_fn: Callable = sha256,
) -> Request:
"""Make dummy request for testing."""
assert num_tokens >= common_prefix_len >= 0
if request_id is None:
request_id = next(_request_count)
global _none_hash_initialized
if not _none_hash_initialized:
init_none_hash(hash_fn)
_none_hash_initialized = True
kv_transfer_params: dict[str, Any] | None = None
if do_remote_decode:
assert not do_remote_prefill
kv_transfer_params = dict(do_remote_prefill=False, do_remote_decode=True)
elif do_remote_prefill:
kv_transfer_params = dict(
do_remote_prefill=True,
do_remote_decode=False,
remote_engine_id="my-engine-id",
remote_request_id=f"prefill-{request_id}",
remote_block_ids=list(range(num_remote_blocks)),
remote_host="my-host",
remote_port=1234,
)
max_tokens = 1 if do_remote_decode else max_tokens
sampling_params = SamplingParams(max_tokens=max_tokens)
sampling_params.update_from_generation_config({}, EOS_TOKEN_ID)
common_prefix = [1] * common_prefix_len if common_prefix_len > 0 else []
suffix = [i * request_id for i in range(num_tokens - common_prefix_len)]
prompt_token_ids = common_prefix + suffix
req = Request(
request_id=f"id-{request_id}",
prompt_token_ids=prompt_token_ids,
sampling_params=sampling_params,
pooling_params=None,
mm_features=None,
block_hasher=get_request_block_hasher(block_size, hash_fn),
)
req.kv_transfer_params = kv_transfer_params
return req
def create_model_runner_output(
reqs: list[Request],
finished_sending: set[str] | None = None,
finished_recving: set[str] | None = None,
invalid_block_ids: set[int] | None = None,
use_eos: bool = False,
token_id: int = 0,
) -> ModelRunnerOutput:
"""Make dummy model runner output for testing."""
# Make request data.
req_ids = [req.request_id for req in reqs]
req_id_to_index = {req_id: idx for idx, req_id in enumerate(req_ids)}
# Make sampled tokens.
sampled_token = EOS_TOKEN_ID if use_eos else token_id
sampled_token_ids = [[sampled_token] for _ in req_ids]
kv_connector_output = (
None
if (
finished_sending is None
and finished_recving is None
and invalid_block_ids is None
)
else KVConnectorOutput(
finished_sending=finished_sending,
finished_recving=finished_recving,
invalid_block_ids=invalid_block_ids or set(),
)
)
# Make output data structure.
return ModelRunnerOutput(
req_ids=req_ids,
req_id_to_index=req_id_to_index,
sampled_token_ids=sampled_token_ids,
logprobs=None,
prompt_logprobs_dict={},
pooler_output=None,
kv_connector_output=kv_connector_output,
)
class TestExampleConnector(ExampleConnector):
def __init__(self, config: VllmConfig, role, kv_cache_config):
self.name = config.kv_transfer_config.kv_connector_extra_config["name"]
self._connector = ExampleConnector(config, role)
self.call_record: dict[str, int] = defaultdict(int)
# Use a unique temp file per connector
self._event_file = (
tempfile.gettempdir()
+ f"/connector_{self.name}-{self.role.name}_events.log"
)
# Start with an empty file
with open(self._event_file, "w") as _:
pass
def __getattribute__(self, name):
if name in (
"_connector",
"call_record",
"name",
"_event_file",
"__class__",
"__dict__",
"__getattribute__",
"__init__",
): # avoid recursion
return object.__getattribute__(self, name)
if not hasattr(self._connector, name):
return object.__getattribute__(self, name)
attr = getattr(self._connector, name)
# Intercept calls to the connector interface and write an event
# for each one to a file, which can be read back in the main test proc.
if callable(attr):
def wrapper(*args, **kwargs):
self.call_record[name] += 1
# Include args that we're interested in
to_log = [name]
for arg in args:
if isinstance(arg, int):
to_log.append(str(arg))
elif isinstance(arg, KVCacheBlocks):
to_log.append(f"num_blocks={[len(b) for b in arg.blocks]}")
# Log the event as a line to the file
try:
with open(self._event_file, "a") as f:
f.write(" ".join(to_log) + "\n")
except Exception as e:
print(f"[ERROR] Could not log event {name} for {self.name}: {e}")
return attr(*args, **kwargs)
return wrapper
return attr
@dataclass(frozen=True)
class MockKVConfig:
matched_tokens: int = 0
is_async: bool = False
class MockKVConnectorMetadata(KVConnectorMetadata):
def __init__(self):
# Scheduler tests check metadata.requests
self.requests: list = []
class MockKVConnector(KVConnectorBase_V1):
"""Mock KV connector for scheduler tests, supporting both sync and async mode."""
def __init__(
self,
vllm_config: VllmConfig,
role: KVConnectorRole,
kv_cache_config: KVCacheConfig | None = None,
):
super().__init__(vllm_config, role, kv_cache_config)
extra_config = self._kv_transfer_config.kv_connector_extra_config
self.config = MockKVConfig(
matched_tokens=extra_config["matched_tokens"],
is_async=extra_config["is_async"],
)
def get_num_new_matched_tokens(
self,
request: Request,
num_computed_tokens: int,
) -> tuple[int | None, bool]:
return (self.config.matched_tokens, self.config.is_async)
def update_state_after_alloc(
self,
request: Request,
blocks: KVCacheBlocks,
num_external_tokens: int,
):
pass
def build_connector_meta(
self, scheduler_output: SchedulerOutput
) -> KVConnectorMetadata:
metadata = MockKVConnectorMetadata()
cached_reqs = scheduler_output.scheduled_cached_reqs
for req_id in chain(
(req.req_id for req in scheduler_output.scheduled_new_reqs),
(
req_id
for req_id in cached_reqs.req_ids
if req_id in cached_reqs.resumed_req_ids
),
):
metadata.requests.append({"req_id": req_id})
return metadata
def start_load_kv(self, kv_caches, finished_req_ids):
pass
def wait_for_layer_load(self, layer_name):
pass
def save_kv_layer(self, layer_name, kv_layer, attn_metadata, **kwargs):
pass
def wait_for_save(self):
pass
KVConnectorFactory.register_connector(
"TestExampleConnector", __name__, TestExampleConnector.__name__
)
KVConnectorFactory.register_connector(
"MockKVConnector", __name__, MockKVConnector.__name__
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/kv_connector/unit/utils.py",
"license": "Apache License 2.0",
"lines": 360,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import contextlib
import copy
import logging
import math
import os
import queue
import sys
import threading
import time
import uuid
from collections import defaultdict
from collections.abc import Iterator
from concurrent.futures import Future, ThreadPoolExecutor
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any
import msgspec
import numpy as np
import torch
import zmq
from vllm import envs
from vllm.config import VllmConfig
from vllm.distributed.kv_transfer.kv_connector.utils import (
EngineId,
TpKVTopology,
get_current_attn_backend,
kv_postprocess_blksize_and_layout_on_receive,
kv_postprocess_blksize_on_receive,
kv_postprocess_layout_on_receive,
yield_req_data,
)
from vllm.distributed.kv_transfer.kv_connector.v1.base import (
CopyBlocksOp,
KVConnectorBase_V1,
KVConnectorHandshakeMetadata,
KVConnectorMetadata,
KVConnectorRole,
)
from vllm.distributed.kv_transfer.kv_connector.v1.metrics import (
KVConnectorPromMetrics,
KVConnectorStats,
PromMetric,
PromMetricT,
)
from vllm.distributed.parallel_state import (
get_tensor_model_parallel_rank,
get_tensor_model_parallel_world_size,
get_tp_group,
)
from vllm.forward_context import ForwardContext
from vllm.logger import init_logger
from vllm.platforms import current_platform
from vllm.utils.network_utils import make_zmq_path, make_zmq_socket
from vllm.v1.attention.backend import AttentionBackend, AttentionMetadata
from vllm.v1.attention.backends.utils import get_kv_cache_layout
from vllm.v1.core.sched.output import SchedulerOutput
from vllm.v1.worker.block_table import BlockTable
if TYPE_CHECKING:
from vllm.v1.core.kv_cache_manager import KVCacheBlocks
from vllm.v1.kv_cache_interface import KVCacheConfig
from vllm.v1.request import Request
TransferHandle = int
ReqId = str
#
# NIXL Connector Version
#
# Increment this version whenever there is an incompatible change to:
# - NixlAgentMetadata schema
# - kv_transfer_params schema or semantics
# - NIXL transfer protocol or wire format
# - KV cache memory layout or block organization
# - Any other change that breaks P/D interoperability
#
# Version History:
# 1: Initial version with compatibility checking
# 2: Add remote_request_id to kv_transfer_params
#
NIXL_CONNECTOR_VERSION: int = 2
GET_META_MSG = b"get_meta_msg"
logger = init_logger(__name__)
# Lazy import nixl_wrapper to avoid loading nixl_bindings if nixl is not used
try:
if "UCX_RCACHE_MAX_UNRELEASED" not in os.environ:
# avoid a memory leak in UCX when using NIXL on some models
# see: https://github.com/vllm-project/vllm/issues/24264
if "nixl" in sys.modules or "rixl" in sys.modules:
logger.warning(
"NIXL was already imported, we can't reset UCX_RCACHE_MAX_UNRELEASED. "
"Please set it to '1024' manually."
)
else:
logger.info(
"Setting UCX_RCACHE_MAX_UNRELEASED to '1024' to avoid a rare "
"memory leak in UCX when using NIXL."
)
os.environ["UCX_RCACHE_MAX_UNRELEASED"] = "1024"
if not current_platform.is_rocm():
from nixl._api import nixl_agent as NixlWrapper
from nixl._bindings import nixlXferTelemetry
else:
from rixl._api import nixl_agent as NixlWrapper
from rixl._bindings import nixlXferTelemetry
logger.info("NIXL is available")
except ImportError:
logger.warning("NIXL is not available")
NixlWrapper = None
nixlXferTelemetry = None
try:
if not current_platform.is_rocm():
from nixl._api import nixl_agent_config
else:
from rixl._api import nixl_agent_config
except ImportError:
nixl_agent_config = None
logger.warning("NIXL agent config is not available")
# Supported platforms and types of kv transfer buffer.
# {device: tuple of supported kv buffer types}
_NIXL_SUPPORTED_DEVICE = {
"cuda": (
"cuda",
"cpu",
),
"tpu": ("cpu",),
"xpu": ("cpu",),
"cpu": ("cpu",),
}
# support for oot platform by providing mapping in current_platform
_NIXL_SUPPORTED_DEVICE.update(current_platform.get_nixl_supported_devices())
@dataclass
class NixlAgentMetadata:
engine_id: str
agent_metadata: bytes
kv_caches_base_addr: list[int]
device_id: int
num_blocks: int
block_lens: list[int]
kv_cache_layout: str
block_size: int
@dataclass
class NixlHandshakePayload(KVConnectorHandshakeMetadata):
"""
Wrapper for NIXL handshake sent over the wire.
Enables two-phase decoding for graceful compatibility checking:
1. Decode NixlHandshakePayload to get compatibility_hash
2. Compute local hash and compare
3. Only if hashes match, decode agent_metadata_bytes
This prevents decoder errors when NixlAgentMetadata schema is
incompatible, allowing graceful failure with clear error message.
"""
compatibility_hash: str
agent_metadata_bytes: bytes # NixlAgentMetadata encoded
def compute_nixl_compatibility_hash(
vllm_config: VllmConfig, attn_backend_name: str, cross_layers_blocks: bool
) -> str:
"""
Compute compatibility hash for NIXL KV transfer.
Hash only the factors that affect whether two NIXL instances can
successfully transfer KV cache data.
Factors included:
- vLLM version and NIXL connector version
- Model architecture (name, dtype, KV heads, layers)
- KV cache format (dtype, sliding window)
- Attention backend
Note: Factors like tensor_parallel_size, block_size, and kv_cache_layout
are validated at runtime in _validate_remote_agent_handshake and are not
included in this hash to support heterogeneous deployments.
Note - the set of factors are likely to evolve significantly over
time to be more or less permissive.
Returns:
SHA-256 hex digest
"""
from vllm import __version__ as vllm_version
from vllm.config.utils import hash_factors
model_config = vllm_config.model_config
cache_config = vllm_config.cache_config
factors = {
# Version compatibility
"vllm_version": vllm_version,
"nixl_connector_version": NIXL_CONNECTOR_VERSION,
# Model architecture - affects KV cache shape
"model": model_config.model,
"dtype": str(model_config.dtype),
"num_kv_heads": model_config.get_total_num_kv_heads(),
"head_size": model_config.get_head_size(),
"num_hidden_layers": model_config.get_total_num_hidden_layers(),
# Attention backend and KV cache dtype affect memory layout
"attn_backend_name": attn_backend_name,
"cache_dtype": str(cache_config.cache_dtype),
"cross_layers_blocks": cross_layers_blocks,
}
compat_hash = hash_factors(factors)
logger.debug(
"NIXL compatibility hash: %s (model=%s, dtype=%s, num_kv_heads=%d, "
"cache_dtype=%s, attn_backend=%s)",
compat_hash,
factors["model"],
factors["dtype"],
factors["num_kv_heads"],
factors["cache_dtype"],
attn_backend_name,
)
return compat_hash
@dataclass
class RemoteMeta:
block_ids: list[int]
host: str
port: int
engine_id: str
request_id: str
@dataclass
class ReqMeta:
local_block_ids: list[int]
# To be used when logical block size does not match the kernel block size
local_physical_block_ids: list[int]
tp_size: int
remote: RemoteMeta | None = None
class NixlConnectorMetadata(KVConnectorMetadata):
def __init__(self):
self.reqs_to_recv: dict[ReqId, ReqMeta] = {}
self.reqs_to_save: dict[ReqId, ReqMeta] = {}
self.reqs_to_send: dict[ReqId, float] = {}
self.reqs_in_batch: set[ReqId] = set()
self.reqs_not_processed: set[ReqId] = set()
def _add_new_req(
self,
local_block_ids: list[int],
kv_transfer_params: dict[str, Any],
) -> ReqMeta:
return ReqMeta(
local_block_ids=local_block_ids,
local_physical_block_ids=local_block_ids,
# P workers don't need to receive tp_size from proxy here.
tp_size=kv_transfer_params.get("tp_size", 1),
)
def add_new_req_to_save(
self,
request_id: ReqId,
local_block_ids: list[int],
kv_transfer_params: dict[str, Any],
):
self.reqs_to_save[request_id] = self._add_new_req(
local_block_ids, kv_transfer_params
)
def add_new_req_to_recv(
self,
request_id: ReqId,
local_block_ids: list[int],
kv_transfer_params: dict[str, Any],
):
req = self._add_new_req(local_block_ids, kv_transfer_params)
req.remote = RemoteMeta(
block_ids=kv_transfer_params["remote_block_ids"],
engine_id=kv_transfer_params["remote_engine_id"],
request_id=kv_transfer_params["remote_request_id"],
host=kv_transfer_params["remote_host"],
port=kv_transfer_params["remote_port"],
)
self.reqs_to_recv[request_id] = req
class NixlConnector(KVConnectorBase_V1):
@property
def prefer_cross_layer_blocks(self) -> bool:
backend = get_current_attn_backend(self._vllm_config)
if backend.get_name() not in (
"FLASH_ATTN",
"FLASHINFER",
):
return False
# For now there is no benefit to run cross layers when backend
# does not support on HND
if get_kv_cache_layout() != "HND":
return False
extra_config = self.kv_transfer_config.kv_connector_extra_config
return (
str(extra_config.get("enable_cross_layers_blocks", "False")).lower()
== "true"
)
def __init__(
self,
vllm_config: VllmConfig,
role: KVConnectorRole,
kv_cache_config: "KVCacheConfig | None" = None,
):
super().__init__(vllm_config, role, kv_cache_config)
assert vllm_config.kv_transfer_config is not None
assert vllm_config.kv_transfer_config.engine_id is not None
self.engine_id: EngineId = vllm_config.kv_transfer_config.engine_id
self.kv_transfer_config = vllm_config.kv_transfer_config
if role == KVConnectorRole.SCHEDULER:
self.connector_scheduler: NixlConnectorScheduler | None = (
NixlConnectorScheduler(vllm_config, self.engine_id)
)
self.connector_worker: NixlConnectorWorker | None = None
elif role == KVConnectorRole.WORKER:
self.connector_scheduler = None
self.connector_worker = NixlConnectorWorker(vllm_config, self.engine_id)
############################################################
# Class Methods
############################################################
@classmethod
def get_required_kvcache_layout(cls, vllm_config: VllmConfig):
if vllm_config.model_config is None:
logger.warning_once(
"Unable to detect current VLLM config. "
"Fallback to default kv cache layout."
)
return None
use_mla = vllm_config.model_config.use_mla
if use_mla:
# return None when we have mla
# as the layout should not matter in that case,
# which fallback to the default behavior.
return None
logger.info_once(
"NixlConnector setting KV cache layout to HND for better xfer performance."
)
return "HND"
############################################################
# Scheduler Side Methods
############################################################
def get_num_new_matched_tokens(
self, request: "Request", num_computed_tokens: int
) -> tuple[int | None, bool]:
assert self.connector_scheduler is not None
return self.connector_scheduler.get_num_new_matched_tokens(
request, num_computed_tokens
)
def update_state_after_alloc(
self, request: "Request", blocks: "KVCacheBlocks", num_external_tokens: int
):
assert self.connector_scheduler is not None
return self.connector_scheduler.update_state_after_alloc(
request, blocks, num_external_tokens
)
def build_connector_meta(
self,
scheduler_output: SchedulerOutput,
) -> KVConnectorMetadata:
assert self.connector_scheduler is not None
return self.connector_scheduler.build_connector_meta(scheduler_output)
def request_finished(
self,
request: "Request",
block_ids: list[int],
) -> tuple[bool, dict[str, Any] | None]:
assert self.connector_scheduler is not None
return self.connector_scheduler.request_finished(request, block_ids)
def set_xfer_handshake_metadata(
self, metadata: dict[int, KVConnectorHandshakeMetadata]
) -> None:
"""
Set the KV connector handshake metadata for this connector.
Args:
metadata (dict): the handshake metadata to set.
"""
assert self.connector_scheduler is not None
self.connector_scheduler.set_xfer_handshake_metadata(metadata)
############################################################
# Worker Side Methods
############################################################
def register_kv_caches(self, kv_caches: dict[str, torch.Tensor]):
assert self.connector_worker is not None
self.connector_worker.register_kv_caches(kv_caches)
def register_cross_layers_kv_cache(
self, kv_cache: torch.Tensor, attn_backend: type[AttentionBackend]
):
assert self.connector_worker is not None
cross_layer_name = "ALL_LAYERS"
kv_caches = {cross_layer_name: kv_cache}
self.connector_worker.register_kv_caches(kv_caches)
def set_host_xfer_buffer_ops(self, copy_operation: CopyBlocksOp):
assert self.connector_worker is not None
self.connector_worker.set_host_xfer_buffer_ops(copy_operation)
def get_finished(self, finished_req_ids: set[str]) -> tuple[set[str], set[str]]:
"""Get the finished recving and sending requests."""
assert self.connector_worker is not None
return self.connector_worker.get_finished()
def get_block_ids_with_load_errors(self) -> set[int]:
"""Get block IDs that failed to load via NIXL."""
assert self.connector_worker is not None
return self.connector_worker.get_block_ids_with_load_errors()
def get_kv_connector_stats(self) -> KVConnectorStats | None:
if self.connector_worker is None:
return None
return self.connector_worker.get_kv_connector_stats()
@classmethod
def build_kv_connector_stats(
cls, data: dict[str, Any] | None = None
) -> KVConnectorStats | None:
return (
NixlKVConnectorStats(data=data)
if data is not None
else NixlKVConnectorStats()
)
@classmethod
def build_prom_metrics(
cls,
vllm_config: VllmConfig,
metric_types: dict[type[PromMetric], type[PromMetricT]],
labelnames: list[str],
per_engine_labelvalues: dict[int, list[object]],
) -> KVConnectorPromMetrics:
return NixlPromMetrics(
vllm_config, metric_types, labelnames, per_engine_labelvalues
)
def start_load_kv(self, forward_context: "ForwardContext", **kwargs) -> None:
assert self.connector_worker is not None
assert isinstance(self._connector_metadata, NixlConnectorMetadata)
self.connector_worker.start_load_kv(self._connector_metadata)
def wait_for_layer_load(self, layer_name: str) -> None:
"""NixlConnector does not do layerwise saving."""
pass
def save_kv_layer(
self,
layer_name: str,
kv_layer: torch.Tensor,
attn_metadata: AttentionMetadata,
**kwargs,
) -> None:
"""NixlConnector does not save explicitly."""
pass
def wait_for_save(self):
assert self.connector_worker is not None
assert isinstance(self._connector_metadata, NixlConnectorMetadata)
if self.connector_worker.use_host_buffer and self.connector_worker.copy_blocks:
self.connector_worker.save_kv_to_host(self._connector_metadata)
def shutdown(self):
if self.connector_worker is not None:
self.connector_worker.shutdown()
if self.connector_scheduler is not None:
self.connector_scheduler.shutdown()
def get_handshake_metadata(self) -> KVConnectorHandshakeMetadata | None:
"""
Get the KVConnector handshake metadata for this connector.
This metadata is used for out-of-band connector handshake
between P/D workers.
Returns:
KVConnectorHandshakeMetadata: the handshake metadata.
None if no handshake metadata is available.
"""
assert self.connector_worker is not None
return self.connector_worker.xfer_handshake_metadata
class NixlConnectorScheduler:
"""Implementation of Scheduler side methods"""
def __init__(self, vllm_config: VllmConfig, engine_id: str):
self.vllm_config = vllm_config
self.block_size = vllm_config.cache_config.block_size
self.engine_id: EngineId = engine_id
self.side_channel_host = envs.VLLM_NIXL_SIDE_CHANNEL_HOST
self.side_channel_port = (
envs.VLLM_NIXL_SIDE_CHANNEL_PORT
+ vllm_config.parallel_config.data_parallel_index
)
assert vllm_config.kv_transfer_config is not None
if current_platform.device_type == "cpu":
self.use_host_buffer = False
else:
self.use_host_buffer = (
vllm_config.kv_transfer_config.kv_buffer_device == "cpu"
)
logger.info("Initializing NIXL Scheduler %s", engine_id)
# Background thread for handling new handshake requests.
self._nixl_handshake_listener_t: threading.Thread | None = None
self._encoded_xfer_handshake_metadata: dict[int, Any] = {}
self._stop_event = threading.Event()
# Requests that need to start recv/send.
# New requests are added by update_state_after_alloc in
# the scheduler. Used to make metadata passed to Worker.
self._reqs_need_recv: dict[ReqId, tuple[Request, list[int]]] = {}
self._reqs_need_save: dict[ReqId, Request] = {}
# Reqs to send and their expiration time
self._reqs_need_send: dict[ReqId, float] = {}
self._reqs_in_batch: set[ReqId] = set()
# Reqs to remove from processed set because they're not to send after
# remote prefill or aborted.
self._reqs_not_processed: set[ReqId] = set()
def shutdown(self):
self._stop_event.set()
if self._nixl_handshake_listener_t is not None:
self._nixl_handshake_listener_t.join()
self._nixl_handshake_listener_t = None
def set_xfer_handshake_metadata(
self, metadata: dict[int, KVConnectorHandshakeMetadata]
) -> None:
"""
Set the KV connector handshake metadata for this connector.
Args:
metadata (dict): the handshake metadata to set.
"""
encoded_data: dict[int, bytes] = {}
encoder = msgspec.msgpack.Encoder()
for tp_rank, rank_metadata in metadata.items():
if not isinstance(rank_metadata, NixlHandshakePayload):
raise ValueError(
"NixlConnectorScheduler expects NixlHandshakePayload for "
"handshake metadata."
)
encoded_data[tp_rank] = encoder.encode(rank_metadata)
logger.debug(
"Tp rank %d: encoded NixlHandshakePayload size: %s bytes",
tp_rank,
str(len(encoded_data[tp_rank])),
)
self._encoded_xfer_handshake_metadata = encoded_data
# Only start the listener when we have metadata to serve.
if self._nixl_handshake_listener_t is None:
ready_event = threading.Event()
self._nixl_handshake_listener_t = threading.Thread(
target=self._nixl_handshake_listener,
args=(
encoded_data,
ready_event,
self._stop_event,
self.side_channel_port,
),
daemon=True,
name="nixl_handshake_listener",
)
self._nixl_handshake_listener_t.start()
ready_event.wait() # Wait for listener ZMQ socket to be ready.
@staticmethod
def _nixl_handshake_listener(
encoded_data: dict[int, Any],
ready_event: threading.Event,
stop_event: threading.Event,
port: int,
):
"""Background thread for getting new NIXL handshakes."""
# NOTE(rob): this is a simple implementation. We will move
# to a better approach via HTTP endpoint soon.
# Listen for new requests for metadata.
host = envs.VLLM_NIXL_SIDE_CHANNEL_HOST
path = make_zmq_path("tcp", host, port)
logger.debug("Starting listening on path: %s", path)
with zmq_ctx(zmq.ROUTER, path) as sock:
sock.setsockopt(zmq.RCVTIMEO, 1000)
ready_event.set()
while True:
try:
identity, _, msg = sock.recv_multipart()
except zmq.Again:
if stop_event.is_set():
break
continue
# Decode the message which contains (GET_META_MSG, rank)
msg, target_tp_rank = msgspec.msgpack.decode(msg)
logger.debug(
"Received message for tp rank %s",
target_tp_rank,
)
if msg != GET_META_MSG:
logger.warning("Connection listener got unexpected message %s", msg)
sock.send_multipart((identity, b"", encoded_data[target_tp_rank]))
def get_num_new_matched_tokens(
self, request: "Request", num_computed_tokens: int
) -> tuple[int, bool]:
"""
For remote prefill, pull all prompt blocks from remote
asynchronously relative to engine execution.
Args:
request (Request): the request object.
num_computed_tokens (int): the number of locally
computed tokens for this request
Returns:
* the number of tokens that can be loaded from the
external KV cache beyond what is already computed.
* true if the external KV cache tokens will be loaded
asynchronously (between scheduler steps).
"""
params = request.kv_transfer_params
logger.debug(
"NIXLConnector get_num_new_matched_tokens: "
"num_computed_tokens=%s, kv_transfer_params=%s",
num_computed_tokens,
params,
)
if params is not None and params.get("do_remote_prefill"):
# Remote prefill: get all prompt blocks from remote.
token_ids = request.prompt_token_ids or []
count = len(token_ids) - num_computed_tokens
if count > 0:
return count, True
# No remote prefill for this request.
return 0, False
def update_state_after_alloc(
self, request: "Request", blocks: "KVCacheBlocks", num_external_tokens: int
):
params = request.kv_transfer_params
logger.debug(
"NIXLConnector update_state_after_alloc: "
"num_external_tokens=%s, kv_transfer_params=%s",
num_external_tokens,
params,
)
if not params:
return
if params.get("do_remote_decode"):
self._reqs_in_batch.add(request.request_id)
if self.use_host_buffer and params.get("do_remote_decode"):
# NOTE: when accelerator is not directly supported by Nixl,
# prefilled blocks need to be saved to host memory before transfer.
self._reqs_need_save[request.request_id] = request
elif params.get("do_remote_prefill"):
if params.get("remote_block_ids"):
if all(
p in params
for p in (
"remote_engine_id",
"remote_request_id",
"remote_host",
"remote_port",
)
):
# If remote_blocks and num_external_tokens = 0, we have
# a full prefix cache hit on the D worker. We need to call
# send_notif in _read_blocks to free the memory on the P.
local_block_ids = (
blocks.get_unhashed_block_ids()
if num_external_tokens > 0
else []
)
# Get unhashed blocks to pull from remote.
self._reqs_need_recv[request.request_id] = (
request,
local_block_ids,
)
else:
logger.warning(
"Got invalid KVTransferParams: %s. This "
"request will not utilize KVTransfer",
params,
)
else:
assert num_external_tokens == 0
# Only trigger 1 KV transfer per request.
params["do_remote_prefill"] = False
def build_connector_meta(
self,
scheduler_output: SchedulerOutput,
) -> KVConnectorMetadata:
meta = NixlConnectorMetadata()
# Loop through scheduled reqs and convert to ReqMeta.
for req_id, (req, block_ids) in self._reqs_need_recv.items():
assert req.kv_transfer_params is not None
meta.add_new_req_to_recv(
request_id=req_id,
local_block_ids=block_ids,
kv_transfer_params=req.kv_transfer_params,
)
# NOTE: For the prefill side, there might be a chance that an early added
# request is a chunked prefill, so we need to check if new blocks are added
for req_id, new_block_id_groups, _ in yield_req_data(scheduler_output):
req_to_save = self._reqs_need_save.get(req_id)
if req_to_save is None or new_block_id_groups is None:
continue
req = req_to_save
assert req.kv_transfer_params is not None
meta.add_new_req_to_save(
request_id=req_id,
local_block_ids=new_block_id_groups[0],
kv_transfer_params=req.kv_transfer_params,
)
assert scheduler_output.num_scheduled_tokens is not None
num_scheduled_tokens = scheduler_output.num_scheduled_tokens[req_id]
is_partial = (
req.num_computed_tokens + num_scheduled_tokens
) < req.num_prompt_tokens
if not is_partial:
# For non-partial prefills, once new req_meta is scheduled, it
# can be removed from _reqs_need_save.
# For partial prefill case, we will retain the request in
# _reqs_need_save until all blocks are scheduled with req_meta.
# Therefore, only pop if `not is_partial`.
self._reqs_need_save.pop(req_id)
meta.reqs_to_send = self._reqs_need_send
meta.reqs_in_batch = self._reqs_in_batch
meta.reqs_not_processed = self._reqs_not_processed
# Clear the list once workers start the transfers
self._reqs_need_recv.clear()
self._reqs_in_batch = set()
self._reqs_not_processed = set()
self._reqs_need_send = {}
return meta
def request_finished(
self,
request: "Request",
block_ids: list[int],
) -> tuple[bool, dict[str, Any] | None]:
"""
Once a request is finished, determine whether request blocks
should be freed now or will be sent asynchronously and freed later.
"""
from vllm.v1.request import RequestStatus
params = request.kv_transfer_params
logger.debug(
"NIXLConnector request_finished(%s), request_status=%s, "
"kv_transfer_params=%s",
request.request_id,
request.status,
params,
)
if not params:
return False, None
if params.get("do_remote_prefill"):
# If do_remote_prefill is still True when the request is finished,
# update_state_after_alloc must not have been called (the request
# must have been aborted before it was scheduled).
# To avoid stranding the prefill blocks in the prefill instance,
# we must add empty block_ids to _reqs_need_recv so that our
# worker side will notify and free blocks in the prefill instance.
self._reqs_need_recv[request.request_id] = (request, [])
params["do_remote_prefill"] = False
return False, None
if not params.get("do_remote_decode"):
return False, None
if request.status != RequestStatus.FINISHED_LENGTH_CAPPED:
# Also include the case of a P/D Prefill request with immediate
# block free (eg abort). Stop tracking this request.
self._reqs_not_processed.add(request.request_id)
# Clear _reqs_need_save if a request is aborted as partial prefill.
self._reqs_need_save.pop(request.request_id, None)
return False, None
# TODO: check whether block_ids actually ever be 0. If not we could
# remove the conditional below
delay_free_blocks = len(block_ids) > 0
if delay_free_blocks:
# Prefill request on remote. It will be read from D upon completion
logger.debug(
"NIXLConnector request_finished(%s) waiting for %d seconds "
"for remote decode to fetch blocks",
request.request_id,
envs.VLLM_NIXL_ABORT_REQUEST_TIMEOUT,
)
self._reqs_need_send[request.request_id] = (
time.perf_counter() + envs.VLLM_NIXL_ABORT_REQUEST_TIMEOUT
)
return delay_free_blocks, dict(
do_remote_prefill=True,
do_remote_decode=False,
remote_block_ids=block_ids,
remote_engine_id=self.engine_id,
remote_request_id=request.request_id,
remote_host=self.side_channel_host,
remote_port=self.side_channel_port,
tp_size=self.vllm_config.parallel_config.tensor_parallel_size,
)
class NixlConnectorWorker:
"""Implementation of Worker side methods"""
def __init__(self, vllm_config: VllmConfig, engine_id: str):
if NixlWrapper is None:
logger.error("NIXL is not available")
raise RuntimeError("NIXL is not available")
logger.info("Initializing NIXL wrapper")
logger.info("Initializing NIXL worker %s", engine_id)
# Config.
self.vllm_config = vllm_config
self.block_size = vllm_config.cache_config.block_size
if vllm_config.kv_transfer_config is None:
raise ValueError("kv_transfer_config must be set for NixlConnector")
self.kv_transfer_config = vllm_config.kv_transfer_config
self.nixl_backends = vllm_config.kv_transfer_config.get_from_extra_config(
"backends", ["UCX"]
)
# Agent.
non_ucx_backends = [b for b in self.nixl_backends if b != "UCX"]
# Configure NIXL num_threads to avoid UAR exhaustion on Mellanox NICs.
# Each UCX thread allocates UARs (doorbell pages) via DevX, and
# excessive NIXL UAR usage can exhaust NIC UAR space. This can cause
# components like NVSHMEM (used by DeepEP kernels) to fail during RDMA
# initialization with "mlx5dv_devx_alloc_uar" errors.
# Ref: https://network.nvidia.com/files/doc-2020/ethernet-adapters-programming-manual.pdf#page=63
num_threads = vllm_config.kv_transfer_config.get_from_extra_config(
"num_threads", 4
)
if nixl_agent_config is None:
config = None
else:
# Enable telemetry by default for NIXL 0.7.1 and above.
config = (
nixl_agent_config(backends=self.nixl_backends, capture_telemetry=True)
if len(non_ucx_backends) > 0
else nixl_agent_config(num_threads=num_threads, capture_telemetry=True)
)
self.nixl_wrapper = NixlWrapper(str(uuid.uuid4()), config)
# Map of engine_id -> {rank0: agent_name0, rank1: agent_name1..}.
self._remote_agents: dict[EngineId, dict[int, str]] = defaultdict(dict)
# Metadata.
self.engine_id: EngineId = engine_id
self.tp_rank = get_tensor_model_parallel_rank()
self.world_size = get_tensor_model_parallel_world_size()
self.tp_group = get_tp_group()
self.num_blocks = 0
self.enable_permute_local_kv = False
# KV Caches and nixl tracking data.
self.device_type = current_platform.device_type
self.kv_buffer_device: str = vllm_config.kv_transfer_config.kv_buffer_device
if self.device_type not in _NIXL_SUPPORTED_DEVICE:
raise RuntimeError(f"{self.device_type} is not supported.")
elif self.kv_buffer_device not in _NIXL_SUPPORTED_DEVICE[self.device_type]:
raise RuntimeError(
f"{self.device_type} with {self.kv_buffer_device} kv_buffer "
"is not supported."
)
self.device_kv_caches: dict[str, torch.Tensor] = {}
# cpu kv buffer for xfer
# used when device memory can not be registered under nixl
self.host_xfer_buffers: dict[str, torch.Tensor] = {}
if self.device_type == "cpu":
self.use_host_buffer = False
else:
self.use_host_buffer = self.kv_buffer_device == "cpu"
# reserve different cores for start_load_kv() from model_forward()
if self.device_type == "cpu":
numa_core_list = current_platform.discover_numa_topology()
# setup one last core in each numa for kv transfer.
rsv_cores_for_kv = [
max(each_numa_core_list) for each_numa_core_list in numa_core_list
]
if rsv_cores_for_kv:
if not hasattr(os, "sched_setaffinity"):
raise NotImplementedError(
"os.sched_setaffinity is not available on this platform"
)
os.sched_setaffinity(0, rsv_cores_for_kv)
# support for oot platform which can't register nixl memory
# type based on kv_buffer_device
nixl_memory_type = current_platform.get_nixl_memory_type()
if nixl_memory_type is None:
if self.kv_buffer_device == "cuda":
nixl_memory_type = "VRAM"
elif self.kv_buffer_device == "cpu":
nixl_memory_type = "DRAM"
if nixl_memory_type is None:
raise RuntimeError(
f"{self.device_type} with {self.kv_buffer_device} kv_buffer "
"is not supported."
)
self.nixl_memory_type = nixl_memory_type
# Note: host xfer buffer ops when use_host_buffer is True
self.copy_blocks: CopyBlocksOp | None = None
# Map of engine_id -> kv_caches_base_addr. For TP case, each local
self.device_id: int = 0
# Current rank may pull from multiple remote TP workers.
# EngineId, dict[int, list[int]] -> engine_id, tp_rank, base_addr_for_layer
self.kv_caches_base_addr = defaultdict[EngineId, dict[int, list[int]]](dict)
# Number of NIXL regions. Currently one region per cache
# (so 1 per layer for MLA, otherwise 2 per layer)
self.num_regions = 0
self.num_layers = 0
# nixl_prepped_dlist_handle.
self.src_xfer_handles_by_block_size: dict[int, int] = {}
# Populated dynamically during handshake based on remote configuration.
# Keep track of regions at different tp_ratio values. tp_ratio->handles
self.src_xfer_handles_by_tp_ratio: dict[int, list[int]] = {}
# Map of engine_id -> {tp_rank: nixl_prepped_dlist_handle (int)}.
self.dst_xfer_side_handles = defaultdict[EngineId, dict[int, int]](dict)
# Map of engine_id -> num_blocks. All ranks in the same deployment will
# have the same number of blocks.
self.dst_num_blocks: dict[EngineId, int] = {}
self._registered_descs: list[Any] = []
# In progress transfers.
# [req_id -> list[handle]]
self._recving_metadata: dict[ReqId, ReqMeta] = {}
self._recving_transfers = defaultdict[ReqId, list[TransferHandle]](list)
# Track the expiration time of requests that are waiting to be sent.
self._reqs_to_send: dict[ReqId, float] = {}
# Set of requests that have been part of a batch, regardless of status.
self._reqs_to_process: set[ReqId] = set()
# invalid blocks from failed NIXL operations
self._invalid_block_ids: set[int] = set()
# requests that skipped transfer (handshake or transfer failures)
self._failed_recv_reqs: set[ReqId] = set()
# Handshake metadata of this worker for NIXL transfers.
self.xfer_handshake_metadata: NixlHandshakePayload | None = None
# Background thread for initializing new NIXL handshakes.
self._handshake_initiation_executor = ThreadPoolExecutor(
# NIXL is not guaranteed to be thread-safe, limit 1 worker.
max_workers=1,
thread_name_prefix="vllm-nixl-handshake-initiator",
)
self._ready_requests = queue.Queue[tuple[ReqId, ReqMeta]]()
self._handshake_futures: dict[EngineId, Future[dict[int, str]]] = {}
# Protects _handshake_futures and _remote_agents.
self._handshake_lock = threading.RLock()
self.block_size = vllm_config.cache_config.block_size
self.model_config = vllm_config.model_config
self.cache_config = vllm_config.cache_config
# TODO(mgoin): remove this once we have hybrid memory allocator
# Optimization for models with local attention (Llama 4)
# List of block window sizes for each layer for local attention
self.block_window_per_layer: list[int | None] = []
self.use_mla = self.model_config.use_mla
# Get the attention backend from the first layer
# NOTE (NickLucche) models with multiple backends are not supported yet
self.attn_backend = get_current_attn_backend(vllm_config)
self.backend_name = self.attn_backend.get_name()
self.kv_cache_layout = get_kv_cache_layout()
self.host_buffer_kv_cache_layout = self.kv_cache_layout
logger.debug("Detected attention backend %s", self.backend_name)
logger.debug("Detected kv cache layout %s", self.kv_cache_layout)
# lazy initialized in register_kv_caches
self.compat_hash: str | None = None
self.kv_topo: TpKVTopology | None = None
self._tp_size: dict[EngineId, int] = {self.engine_id: self.world_size}
self._block_size: dict[EngineId, int] = {self.engine_id: self.block_size}
# With heterogeneous TP, P must wait for all assigned D TP workers to
# finish reading before safely freeing the blocks.
self.consumer_notification_counts_by_req = defaultdict[ReqId, int](int)
self.xfer_stats = NixlKVConnectorStats()
self._physical_blocks_per_logical_kv_block = 1
self.enforce_compat_hash = self.kv_transfer_config.get_from_extra_config(
"enforce_handshake_compat", True
)
def _nixl_handshake(
self,
host: str,
port: int,
remote_tp_size: int,
expected_engine_id: str,
) -> dict[int, str]:
"""Do a NIXL handshake with a remote instance."""
# When target instance TP > local TP, we need to perform multiple
# handshakes. Do it in a single background job for simplicity.
# Regardless, only handshake with the remote TP rank(s) that current
# local rank will read from. Note that With homogeneous TP,
# this happens to be the same single rank_i.
assert self.kv_topo is not None
p_remote_ranks = self.kv_topo.get_target_remote_ranks(remote_tp_size)
remote_rank_to_agent_name = {}
path = make_zmq_path("tcp", host, port)
with zmq_ctx(zmq.REQ, path) as sock:
for remote_rank in p_remote_ranks:
logger.debug(
"Querying metadata on path: %s at remote tp rank %s",
path,
remote_rank,
)
start_time = time.perf_counter()
# Send query for the request.
msg = msgspec.msgpack.encode((GET_META_MSG, remote_rank))
# Set receive timeout to 5 seconds to avoid hanging on dead server
sock.setsockopt(zmq.RCVTIMEO, 5000) # milliseconds
sock.send(msg)
handshake_bytes = sock.recv()
# Decode handshake payload to get compatibility hash
handshake_decoder = msgspec.msgpack.Decoder(NixlHandshakePayload)
try:
handshake_payload = handshake_decoder.decode(handshake_bytes)
except (msgspec.DecodeError, msgspec.ValidationError) as e:
raise RuntimeError(
f"Failed to decode NixlHandshakePayload. This likely indicates "
f"an incompatibility between connector version. Error: {e}"
) from e
got_metadata_time = time.perf_counter()
logger.debug(
"NIXL handshake: get metadata took: %s",
got_metadata_time - start_time,
)
# Check compatibility hash BEFORE decoding agent metadata
assert self.compat_hash is not None
if (
self.enforce_compat_hash
and handshake_payload.compatibility_hash != self.compat_hash
):
raise RuntimeError(
f"NIXL compatibility hash mismatch. "
f"Local: {self.compat_hash}, "
f"Remote: {handshake_payload.compatibility_hash}. "
f"Prefill and decode instances have incompatible "
f"configurations. This may be due to: different vLLM versions,"
f" models, dtypes, KV cache layouts, attention backends, etc. "
f"Both instances must use identical configurations."
f"Disable this check using "
f'--kv-transfer-config \'{{"kv_connector_extra_config": '
f'{{"enforce_handshake_compat": false}}}}\''
)
logger.info(
"NIXL compatibility check passed (hash: %s)",
handshake_payload.compatibility_hash,
)
# Decode agent metadata
metadata_decoder = msgspec.msgpack.Decoder(NixlAgentMetadata)
try:
metadata = metadata_decoder.decode(
handshake_payload.agent_metadata_bytes
)
except (msgspec.DecodeError, msgspec.ValidationError) as e:
# This should not happen if hash matched
raise RuntimeError(
f"Failed to decode NixlAgentMetadata. Error: {e}"
) from e
# Ensure engine id matches.
if metadata.engine_id != expected_engine_id:
raise RuntimeError(
f"Remote NIXL agent engine ID mismatch. "
f"Expected {expected_engine_id},"
f"received {metadata.engine_id}."
)
setup_agent_time = time.perf_counter()
# Register Remote agent.
remote_agent_name = self.add_remote_agent(
metadata, remote_rank, remote_tp_size
)
logger.debug(
"NIXL handshake: add agent took: %s",
setup_agent_time - got_metadata_time,
)
remote_rank_to_agent_name[remote_rank] = remote_agent_name
return remote_rank_to_agent_name
def initialize_host_xfer_buffer(self, kv_caches: dict[str, torch.Tensor]) -> None:
"""
Initialize transfer buffer in CPU mem for accelerators
NOT directly supported by NIXL (e.g., tpu)
"""
xfer_buffers: dict[str, torch.Tensor] = {}
inv_order = [0, 1, 3, 2, 4]
try:
for layer_name, kv_cache in kv_caches.items():
kv_shape = kv_cache.shape
kv_dtype = kv_cache.dtype
permute_shape = False
if (
self.kv_cache_layout == "NHD"
and self.vllm_config.kv_transfer_config is not None
and self.vllm_config.kv_transfer_config.enable_permute_local_kv
):
logger.info_once(
"'enable_permute_local_kv' flag is enabled while "
"device KV Layout is NHD. Init host buffer with"
" HND to better support Decode/Prefill TP_ratio > 1."
)
# Since NHD will not support Decode/Prefill TP_ratio > 1,
# we can leverage host_buffer for permute
self.host_buffer_kv_cache_layout = "HND"
kv_shape = (
tuple(kv_shape[i] for i in inv_order)
if not self.use_mla
else kv_shape
)
permute_shape = not self.use_mla
xfer_buffers[layer_name] = torch.empty(
kv_shape, dtype=kv_dtype, device="cpu"
)
if permute_shape:
xfer_buffers[layer_name] = xfer_buffers[layer_name].permute(
inv_order
)
except MemoryError as e:
logger.error("NIXLConnectorWorker gets %s.", e)
raise
self.host_xfer_buffers = xfer_buffers
def set_host_xfer_buffer_ops(self, copy_operation: CopyBlocksOp):
"""Assign copy (d2h, h2d) operations when host buffer is used."""
# Set a no-op if the host buffer is not cpu.
if self.kv_buffer_device != "cpu":
return
# Set a no-op if self.device_type is 'cpu'.
if self.device_type == "cpu":
return
assert self.use_host_buffer
self.copy_blocks = copy_operation
def _log_failure(
self,
failure_type: str,
req_id: str | None,
msg: str = "",
error: Exception | None = None,
meta: ReqMeta | None = None,
**extra_context,
):
"""Log transfer failure with structured context for easier debugging."""
context: dict[str, Any] = {
"failure_type": failure_type,
"request_id": req_id,
"engine_id": self.engine_id,
}
if meta is None and req_id is not None:
# Try to get metadata from in progress transfers when not provided
meta = self._recving_metadata.get(req_id)
if meta and meta.remote:
context.update(
{
"remote_engine_id": meta.remote.engine_id,
"remote_request_id": meta.remote.request_id,
"remote_host": meta.remote.host,
"remote_port": meta.remote.port,
"num_local_blocks": len(meta.local_block_ids),
"num_remote_blocks": len(meta.remote.block_ids),
"local_block_ids_sample": meta.local_block_ids[:10],
}
)
context.update(extra_context)
if msg:
failure_type = f"{failure_type}. {msg}"
logger.error(
"NIXL transfer failure: %s | Context: %s",
failure_type,
context,
exc_info=error is not None,
stacklevel=2,
)
def _background_nixl_handshake(
self, req_id: str, remote_engine_id: EngineId, meta: ReqMeta
):
# Do NIXL handshake in background and add to _ready_requests when done.
fut = self._handshake_futures.get(remote_engine_id)
if fut is None:
assert meta.remote is not None
fut = self._handshake_initiation_executor.submit(
self._nixl_handshake,
meta.remote.host,
meta.remote.port,
meta.tp_size,
remote_engine_id,
)
self._handshake_futures[remote_engine_id] = fut
def done_callback(f: Future[dict[int, str]], eid=remote_engine_id):
with self._handshake_lock:
del self._handshake_futures[eid]
try:
self._remote_agents[eid] = f.result()
except Exception as e:
self._log_failure(
failure_type="handshake_setup_failed",
req_id=None,
error=e,
remote_engine_id=eid,
)
fut.add_done_callback(done_callback)
# check handshake success before proceeding with request
def request_ready(f: Future[Any], entry=(req_id, meta)):
try:
# check if handshake succeeded
f.result()
self._ready_requests.put(entry)
except Exception as e:
# handshake failed - mark blocks as invalid
self._log_failure(
failure_type="handshake_failed",
req_id=req_id,
error=e,
meta=meta,
)
if req_meta := self._recving_metadata.get(req_id):
self._invalid_block_ids.update(req_meta.local_block_ids)
self._failed_recv_reqs.add(req_id)
fut.add_done_callback(request_ready)
def register_kv_caches(self, kv_caches: dict[str, torch.Tensor]):
"""Register the KV Cache data in nixl."""
self.kv_topo = TpKVTopology(
tp_rank=self.tp_rank,
engine_id=self.engine_id,
remote_tp_size=self._tp_size, # shared state
remote_block_size=self._block_size, # shared state
is_mla=self.use_mla,
total_num_kv_heads=self.model_config.get_total_num_kv_heads(),
attn_backend=self.attn_backend,
tensor_shape=next(iter(kv_caches.values())).shape,
)
self.compat_hash = compute_nixl_compatibility_hash(
self.vllm_config, self.backend_name, self.kv_topo.cross_layers_blocks
)
if self.use_host_buffer:
self.initialize_host_xfer_buffer(kv_caches=kv_caches)
assert len(self.host_xfer_buffers) == len(kv_caches), (
f"host_buffer: {len(self.host_xfer_buffers)}, "
f"kv_caches: {len(kv_caches)}"
)
xfer_buffers = self.host_xfer_buffers
else:
xfer_buffers = kv_caches
assert not self.host_xfer_buffers, (
"host_xfer_buffer should not be initialized when "
f"kv_buffer_device is {self.kv_buffer_device}"
)
logger.info(
"Registering KV_Caches. use_mla: %s, kv_buffer_device: %s, "
"use_host_buffer: %s",
self.use_mla,
self.kv_buffer_device,
self.use_host_buffer,
)
caches_data = []
# With hybrid allocator, layers can share a kv cache tensor
seen_base_addresses = []
# Note(tms): I modified this from the original region setup code.
# K and V are now in different regions. Advantage is that we can
# elegantly support MLA and any cases where the K and V tensors
# are non-contiguous (it's not locally guaranteed that they will be)
# Disadvantage is that the encoded NixlAgentMetadata is now larger
# (roughly 8KB vs 5KB).
# Conversely for FlashInfer, K and V are registered in the same region
# to better exploit the memory layout (ie num_blocks is the first dim).
tensor_size_bytes = None
# Enable different block lengths for different layers when MLA is used.
self.block_len_per_layer = list[int]()
self.slot_size_per_layer = list[int]() # HD bytes in kv terms
for layer_name, cache_or_caches in xfer_buffers.items():
cache_list = (
cache_or_caches if self.kv_topo.split_k_and_v else [cache_or_caches]
)
for cache in cache_list:
base_addr = cache.data_ptr()
if base_addr in seen_base_addresses:
continue
logger.debug(
"Registering layer %s with cache shape: %s", layer_name, cache.shape
)
kernel_block_size = cache.shape[self.kv_topo.block_size_position]
if self.block_size != kernel_block_size:
logger.info_once(
"User-specified logical block size (%s) does not match"
" physical kernel block size (%s). Using the latter. ",
self.block_size,
kernel_block_size,
)
self._physical_blocks_per_logical_kv_block = (
self.block_size // kernel_block_size
)
self.block_size = kernel_block_size
self._block_size[self.engine_id] = kernel_block_size
seen_base_addresses.append(base_addr)
curr_tensor_size_bytes = cache.numel() * cache.element_size()
if tensor_size_bytes is None:
tensor_size_bytes = curr_tensor_size_bytes
self.num_blocks = cache.shape[0]
assert cache.shape[0] == self.num_blocks, (
"All kv cache tensors must have the same number of blocks"
)
self.block_len_per_layer.append(
curr_tensor_size_bytes // self.num_blocks
)
self.slot_size_per_layer.append(
self.block_len_per_layer[-1] // self.block_size
)
if not self.use_mla:
# Different kv cache shape is not supported by HeteroTP
assert tensor_size_bytes == curr_tensor_size_bytes, (
"All kv cache tensors must have the same size"
)
# Need to make sure the device ID is non-negative for NIXL,
# Torch uses -1 to indicate CPU tensors.
self.device_id = max(cache.get_device(), 0)
caches_data.append(
(base_addr, curr_tensor_size_bytes, self.device_id, "")
)
logger.debug(
"Different block lengths collected: %s", set(self.block_len_per_layer)
)
assert len(self.block_len_per_layer) == len(seen_base_addresses)
assert self.num_blocks != 0
self.kv_caches_base_addr[self.engine_id][self.tp_rank] = seen_base_addresses
self.num_regions = len(caches_data)
self.num_layers = len(xfer_buffers.keys())
descs = self.nixl_wrapper.get_reg_descs(caches_data, self.nixl_memory_type)
logger.debug("Registering descs: %s", caches_data)
self.nixl_wrapper.register_memory(descs, backends=self.nixl_backends)
logger.debug("Done registering descs")
self._registered_descs.append(descs)
self.device_kv_caches = kv_caches
self.dst_num_blocks[self.engine_id] = self.num_blocks
if self.kv_topo.is_kv_layout_blocks_first:
for i in range(len(self.slot_size_per_layer)):
assert self.slot_size_per_layer[i] % 2 == 0
self.slot_size_per_layer[i] //= 2
# NOTE (NickLucche) When FlashInfer is used, memory is registered
# with joint KV for each block. This minimizes the overhead in
# registerMem allowing faster descs queries. In order to be able to
# split on kv_heads dim as required by heterogeneous TP, one must
# be able to index K/V separately. Hence we double the number
# of 'virtual' regions here and halve `block_len` below.
self.num_regions *= 2
# Register local/src descr for NIXL xfer.
self.seen_base_addresses = seen_base_addresses
self.src_xfer_handles_by_block_size[self.block_size], self.src_blocks_data = (
self.register_local_xfer_handler(self.block_size)
)
# TODO(mgoin): Hybrid memory allocator is currently disabled for
# models with local attention (Llama 4). Can remove this once enabled.
if self.model_config.hf_config.model_type == "llama4":
from transformers import Llama4TextConfig
assert isinstance(self.model_config.hf_text_config, Llama4TextConfig)
llama4_config = self.model_config.hf_text_config
no_rope_layers = llama4_config.no_rope_layers
chunk_size = llama4_config.attention_chunk_size
chunk_block_size = math.ceil(chunk_size / self.block_size)
for layer_idx in range(self.num_layers):
# no_rope_layers[layer_idx] == 0 means NoPE (global)
# Any other value means RoPE (local chunked)
is_local_attention = no_rope_layers[layer_idx] != 0
block_window = chunk_block_size if is_local_attention else None
self.block_window_per_layer.append(block_window)
logger.debug(
"Llama 4 block window per layer mapping: %s",
self.block_window_per_layer,
)
assert len(self.block_window_per_layer) == self.num_layers
# After KV Caches registered, listen for new connections.
agent_metadata = NixlAgentMetadata(
engine_id=self.engine_id,
agent_metadata=self.nixl_wrapper.get_agent_metadata(),
device_id=self.device_id,
kv_caches_base_addr=self.kv_caches_base_addr[self.engine_id][self.tp_rank],
num_blocks=self.num_blocks,
block_lens=self.block_len_per_layer,
kv_cache_layout=self.kv_cache_layout
if not self.use_host_buffer
else self.host_buffer_kv_cache_layout,
block_size=self.block_size,
)
# Wrap metadata in payload with hash for defensive decoding
assert self.compat_hash is not None
encoder = msgspec.msgpack.Encoder()
self.xfer_handshake_metadata = NixlHandshakePayload(
compatibility_hash=self.compat_hash,
agent_metadata_bytes=encoder.encode(agent_metadata),
)
def register_local_xfer_handler(
self,
block_size: int,
) -> tuple[int, list[tuple[int, int, int]]]:
"""
Function used for register local xfer handler with local block_size or
Remote block_size.
When local block_size is same as remote block_size, we use local block_size
to register local_xfer_handler during init.
When remote block size is less than local block size, we need to use
register another local_xfer_handler using remote block len to ensure
data copy correctness.
"""
assert self.kv_topo is not None
block_size_ratio = self.block_size // block_size
blocks_data = []
for i, base_addr in enumerate(self.seen_base_addresses):
# The new block_len is using prefill block_len;
# and num_blocks is multiple with N
kv_block_len = (
self.get_backend_aware_kv_block_len(layer_idx=i) // block_size_ratio
)
block_len_per_layer = self.block_len_per_layer[i] // block_size_ratio
num_blocks = self.num_blocks * block_size_ratio
for block_id in range(num_blocks):
block_offset = block_id * block_len_per_layer
addr = base_addr + block_offset
# (addr, len, device id)
blocks_data.append((addr, kv_block_len, self.device_id))
if self.kv_topo.is_kv_layout_blocks_first:
# Separate and interleave K/V regions to maintain the same
# descs ordering. This is needed for selecting contiguous heads
# when split across TP ranks.
for block_id in range(num_blocks):
block_offset = block_id * block_len_per_layer
addr = base_addr + block_offset
# Register addresses for V cache (K registered first).
v_addr = addr + kv_block_len
blocks_data.append((v_addr, kv_block_len, self.device_id))
logger.debug(
"Created %s blocks for src engine %s and rank %s on device id %s",
len(blocks_data),
self.engine_id,
self.tp_rank,
self.device_id,
)
descs = self.nixl_wrapper.get_xfer_descs(blocks_data, self.nixl_memory_type)
# NIXL_INIT_AGENT to be used for preparations of local descs.
return self.nixl_wrapper.prep_xfer_dlist("NIXL_INIT_AGENT", descs), blocks_data
def add_remote_agent(
self,
nixl_agent_meta: NixlAgentMetadata,
remote_tp_rank: int = 0,
remote_tp_size: int = 1,
) -> str:
"""
Add the remote NIXL agent and prepare the descriptors for reading cache
blocks from remote.
In particular, handle both homogeneous and heterogeneous TP. The former
requires local rank_i to read from remote rank_i.
The latter, in the case of D.world_size < P.world_size, requires that a
local (D) TP worker reads from multiple remote (P) TP workers.
Conversely, assuming D.world_size > P.world_size, two or more local TP
workers will read from a single remote TP worker.
Here's an example for the last case described above (non-MLA):
rank_offset p_remote_tp_rank
(kv split no)
--------------------------------
0 0 Worker0 ---- 1st half of KV ----> Worker0 [ KV Cache ]
/
1 0 Worker1 ---- 2nd half of KV -----/
0 1 Worker2 ---- 1st half of KV ----> Worker1 [ KV Cache ]
/
1 1 Worker3 ---- 2nd half of KV -----/
Decoder TP workers Prefix TP workers
(world_size=4) (world_size=2)
tp_ratio = 4 // 2 = 2
Considering the KV Caches, if P-Worker_i has cache size [2, num_blocksP, kv_heads, block_size, head_dim]
then D-Worker_j has [2, num_blocksD, kv_heads//tp_ratio, block_size, head_dim]. Mind the "HND" layout format.
Assuming num_blocksD >= num_blocksP, D-Worker0 reads from P-Worker0 by preparing the kv_heads//tp_ratio
first heads from all the slots of all the blocks. D-Worker1 will do the same, but reading the second split
along the kv_heads dimension, and so forth until "tp_ratio" D TP workers have pulled from P-Worker0.
Note that the above will also hold true for the homogeneous TP case, where tp_ratio evaluates to 1.
Regarding MLA case, the cache is replicated across TP workers so the rank_offset will just always be 0
so that the whole cache is shared by "tp_ratio" D TP workers.
""" # noqa: E501
engine_id = nixl_agent_meta.engine_id
# TODO re-evaluate refreshing for scaling/recovery
if remote_tp_rank in self._remote_agents.get(engine_id, {}):
logger.debug(
"Remote agent with engine_id %s and rank"
"%s already exchanged metadata, skip handshake.",
engine_id,
remote_tp_rank,
)
return self._remote_agents[engine_id][remote_tp_rank]
### Register remote agent metadata
if engine_id not in self._tp_size:
self._tp_size[engine_id] = remote_tp_size
if engine_id not in self._block_size:
self._block_size[engine_id] = nixl_agent_meta.block_size
remote_agent_name = self.nixl_wrapper.add_remote_agent(
nixl_agent_meta.agent_metadata
)
# Create dst descs and xfer side handles. TP workers have same #blocks
# so we only register once per engine_id.
# Example:
# block_size_ratio > 1:
# remote: | 0| 1| 2| 3| 4| 5| 6| 7| 8| 9|10|11|12|
# local origin:| 0| 1| 8| 12|
# local mapped:| 0| 1| 2| 3| 4| 5| 6| 7| 8| 9|10|11|12|13|14|15|
assert self.kv_topo is not None
block_size_ratio = self.kv_topo.block_size_ratio_from_engine_id(engine_id)
if engine_id not in self.dst_num_blocks:
self.dst_num_blocks[engine_id] = nixl_agent_meta.num_blocks
# Keep track of remote agent kv caches base addresses.
self.kv_caches_base_addr[engine_id][remote_tp_rank] = (
nixl_agent_meta.kv_caches_base_addr
)
self._validate_remote_agent_handshake(nixl_agent_meta, remote_tp_size)
# This is 1 when P and D `--tensor-parallel-size` match. Otherwise,
# this is the ratio between the two sizes.
tp_ratio = self.kv_topo.tp_ratio_from_engine_id(engine_id)
# Handle tp_size>num_kv_heads: replicate KV cache.
indexes_into_remote = (
not self.kv_topo.replicates_kv_cache(engine_id) and tp_ratio > 0
)
logger.debug(
"Registering remote agent (%s, rank %s) memory regions with tp_ratio %s",
engine_id,
remote_tp_rank,
tp_ratio,
)
### (Optional) Register local agent memory regions. MLA is not split.
if (
tp_ratio < 0
and not self.use_mla
and tp_ratio not in self.src_xfer_handles_by_tp_ratio
):
# Remote tp_size > local tp_size: read from multiple remote ranks.
# Logically "split" own regions into |tp_ratio| chunks. Mind that
# we only do this once per remote tp_size (replica-friendly).
self.src_xfer_handles_by_tp_ratio[tp_ratio] = []
for i in range(-tp_ratio):
blocks_data = []
for memory_region in self.src_blocks_data:
addr, local_block_len, own_tp_rank = memory_region
# Computing block len layer by layer allows for different
# block sizes to be used.
remote_block_len = local_block_len // (-tp_ratio)
addr = addr + i * remote_block_len
blocks_data.append((addr, remote_block_len, own_tp_rank))
descs = self.nixl_wrapper.get_xfer_descs(
blocks_data, self.nixl_memory_type
)
handle = self.nixl_wrapper.prep_xfer_dlist("NIXL_INIT_AGENT", descs)
self.src_xfer_handles_by_tp_ratio[tp_ratio].append(handle)
### Register remote agent memory regions
blocks_data = []
# With homogeneous TP, D pulls the whole kv cache from corresponding
# rank. With heterogeneous TP, prepare the descriptors by splitting the
# P KV cache along kv_head dim, of D worker's kv_head size (D>P).
# Eg. PTP1 DTP2 => P0 KV:[block0-KV_0 | block0-KV_1..].
# Register all remote blocks, but only the corresponding kv heads.
for i, base_addr in enumerate(nixl_agent_meta.kv_caches_base_addr):
# Read our whole local region size from remote.
local_block_len = self.get_backend_aware_kv_block_len(layer_idx=i)
remote_kv_block_len = local_block_len // block_size_ratio
if block_size_ratio > 1:
# using remote kv_block_len as transfer unit
local_block_len = remote_kv_block_len
if tp_ratio < 0 and not self.use_mla:
# Remote tp is bigger: read a chunk of local region from remote
local_block_len = local_block_len // (-tp_ratio)
rank_offset = (
self.tp_rank % tp_ratio * remote_kv_block_len
if indexes_into_remote
else 0
)
for block_id in range(nixl_agent_meta.num_blocks):
block_offset = block_id * nixl_agent_meta.block_lens[i]
# For each block, grab the heads chunk belonging to rank_i
# of size remote_nheads // tp_ratio, which correspond to
# self.block_len == remote_block_len//tp_ratio bytes.
addr = base_addr + block_offset + rank_offset
# (addr, len, device id)
blocks_data.append((addr, local_block_len, nixl_agent_meta.device_id))
if self.kv_topo.is_kv_layout_blocks_first:
# With FlashInfer index V separately to allow head splitting.
for block_id in range(nixl_agent_meta.num_blocks):
block_offset = block_id * nixl_agent_meta.block_lens[i]
addr = base_addr + block_offset + rank_offset
v_addr = addr + nixl_agent_meta.block_lens[i] // 2
blocks_data.append(
(v_addr, local_block_len, nixl_agent_meta.device_id)
)
logger.debug(
"Created %s blocks for dst engine %s with remote rank %s and local rank %s",
len(blocks_data),
engine_id,
remote_tp_rank,
self.tp_rank,
)
# Register with NIXL.
descs = self.nixl_wrapper.get_xfer_descs(blocks_data, self.nixl_memory_type)
self.dst_xfer_side_handles[engine_id][remote_tp_rank] = (
self.nixl_wrapper.prep_xfer_dlist(remote_agent_name, descs)
)
if block_size_ratio > 1:
# when prefill with smaller block_size, we need to init a
# new handler with same block_len to match
self.src_xfer_handles_by_block_size[nixl_agent_meta.block_size] = (
self.register_local_xfer_handler(nixl_agent_meta.block_size)[0]
)
return remote_agent_name
def _validate_remote_agent_handshake(
self, nixl_agent_meta: NixlAgentMetadata, remote_tp_size: int
):
"""
Validate the remote agent handshake metadata ensuring the
invariants hold true.
"""
remote_engine_id = nixl_agent_meta.engine_id
assert self._tp_size[remote_engine_id] == remote_tp_size
assert self.kv_topo is not None
tp_ratio = self.kv_topo.tp_ratio_from_engine_id(remote_engine_id)
block_size_ratio = self.kv_topo.block_size_ratio_from_engine_id(
remote_engine_id
)
# Num kv_heads > tp_size and P TP > D TP case, not supported
assert not (tp_ratio < 0 and self.kv_topo.is_kv_replicated(remote_engine_id))
kv_cache_layout = (
self.kv_cache_layout
if not self.use_host_buffer
else self.host_buffer_kv_cache_layout
)
if not self.use_mla and nixl_agent_meta.kv_cache_layout != kv_cache_layout:
if (
self.kv_transfer_config.enable_permute_local_kv
and nixl_agent_meta.kv_cache_layout == "HND"
):
logger.info(
"Remote is HND and local is NHD, enabled additional permute "
"on local device KV."
)
self.enable_permute_local_kv = True
else:
raise RuntimeError(
"Heterogeneous TP expects same kv_cache_layout. "
"Or enable experimental feature to use HND to NHD support by "
"setting 'enable_permute_local_kv'=True in --kv-transfer-config."
)
# Block len can only vary across layers when using MLA.
remote_block_len = nixl_agent_meta.block_lens[0]
if self.use_mla or self.kv_topo.is_kv_replicated(remote_engine_id):
# With replicated KV cache, only the number of blocks can differ.
for i in range(len(self.block_len_per_layer)):
assert (
self.block_len_per_layer[i] // block_size_ratio
== nixl_agent_meta.block_lens[i]
), "KV cache sizes must match between P and D when replicated"
else:
# When MLA is not used, this is a list of the same block length
for block_len in nixl_agent_meta.block_lens:
assert block_len == remote_block_len, (
"All remote layers must have the same block size"
)
if tp_ratio > 0:
# Remote tp is smaller: remote block_len size is bigger
assert (
remote_block_len
== (self.block_len_per_layer[0] * tp_ratio) // block_size_ratio
), (
"Remote P worker KV layer cache must be of shape [2, N, "
"local_kv_heads*tp_ratio, page_size, head_dim] and same dtype."
) # noqa: E501
else:
assert block_size_ratio == 1, (
"Different local/remote block sizes are not supported when"
" P TP > D TP."
)
# Remote tp is bigger: remote block_len size is smaller
assert remote_block_len == self.block_len_per_layer[0] // (-tp_ratio), (
"Remote P worker KV layer cache must be of shape [2, N, "
"local_kv_heads/tp_ratio, page_size, head_dim] and same dtype."
) # noqa: E501
# TP workers that handhshake with same remote have same #blocks.
assert self.dst_num_blocks[remote_engine_id] == nixl_agent_meta.num_blocks
# Same number of regions/~layers.
assert len(nixl_agent_meta.kv_caches_base_addr) == len(self.block_len_per_layer)
def sync_recved_kv_to_device(self, req_id: str, meta: ReqMeta):
"""copy recved kv from host buffer to device."""
assert self.use_host_buffer
assert self.copy_blocks is not None
local_block_ids = meta.local_physical_block_ids
self.copy_blocks(
self.host_xfer_buffers,
self.device_kv_caches,
local_block_ids,
local_block_ids,
"h2d",
)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
"synced recved kv of request[%s] to device kv buffer,"
"local_block_ids: %s. ",
req_id,
",".join(map(str, local_block_ids)),
)
def save_kv_to_host(self, metadata: NixlConnectorMetadata):
"""copy kv from device to host buffer."""
assert self.use_host_buffer
assert self.copy_blocks is not None
for req_id, meta in metadata.reqs_to_save.items():
meta.local_physical_block_ids = self._logical_to_kernel_block_ids(
meta.local_block_ids
)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
"save_load_kv for request[%s] to host xfer buffer."
"local_block_ids: %s. ",
req_id,
",".join(map(str, meta.local_physical_block_ids)),
)
# blocking
self.copy_blocks(
self.device_kv_caches,
self.host_xfer_buffers,
meta.local_physical_block_ids,
meta.local_physical_block_ids,
"d2h",
)
def post_process_device_kv_on_receive(
self,
block_size_ratio: int,
block_ids_list: list[list[int]],
):
"""
Post process device kv cache after receiving from remote.
3 types of post processing supported:
* kv_cache_postprocess_layout => convert from HND to NHD
* kv_cache_postprocess_blksize => convert from small block size
to large block size
* kv_cache_postprocess_blksize_and_layout => convert from small
block size to large block size and convert from HND to NHD
"""
if len(self.device_kv_caches) == 0:
return
assert block_size_ratio >= 1, "Only nP < nD supported currently."
assert self.kv_topo is not None
if self.enable_permute_local_kv and block_size_ratio > 1:
logger.debug(
"Post-processing device kv cache on receive by converting "
"block_size with %sx bigger and permuting layout from HND"
" to NHD.",
block_size_ratio,
)
elif self.enable_permute_local_kv:
logger.debug(
"Post-processing device kv cache on receive by permuting layout"
"from HND to NHD."
)
else:
logger.debug(
"Post-processing device kv cache on receive by converting "
"block_size with %sx bigger.",
block_size_ratio,
)
split_k_and_v = self.kv_topo.split_k_and_v
for block_ids in block_ids_list:
indices = torch.tensor(block_ids, device=self.device_type, dtype=torch.long)
for _, cache_or_caches in self.device_kv_caches.items():
cache_list = cache_or_caches if split_k_and_v else [cache_or_caches]
for cache in cache_list:
if self.enable_permute_local_kv and block_size_ratio > 1:
kv_postprocess_blksize_and_layout_on_receive(
cache, indices, block_size_ratio
)
elif self.enable_permute_local_kv:
kv_postprocess_layout_on_receive(cache, indices)
else:
kv_postprocess_blksize_on_receive(
cache, indices, block_size_ratio
)
def get_finished(self) -> tuple[set[str], set[str]]:
"""
Get requests that are done sending or recving on this specific worker.
The scheduler process (via the MultiprocExecutor) will use this output
to track which workers are done.
"""
assert self.kv_topo is not None
done_sending = self._get_new_notifs()
done_recving = self._pop_done_transfers(self._recving_transfers)
# add requests that skipped transfer to done_recving
done_recving.update(self._failed_recv_reqs)
self._failed_recv_reqs.clear()
if len(done_sending) > 0 or len(done_recving) > 0:
logger.debug(
"Rank %s, get_finished: %s requests done sending "
"and %s requests done recving",
self.tp_rank,
len(done_sending),
len(done_recving),
)
block_ids_for_blocksize_post_process = defaultdict(list)
for req_id in done_recving:
# clean up metadata for completed requests
meta = self._recving_metadata.pop(req_id, None)
assert meta is not None, f"{req_id} not found in recving_metadata list"
assert meta.remote is not None
if self.use_host_buffer:
self.sync_recved_kv_to_device(req_id, meta)
# post processing for heteroblocksize
block_size_ratio = self.kv_topo.block_size_ratio_from_engine_id(
meta.remote.engine_id
)
if not self.use_mla and (
block_size_ratio > 1 or self.enable_permute_local_kv
):
block_ids_for_blocksize_post_process[block_size_ratio].append(
meta.local_physical_block_ids
)
for (
block_size_ratio,
block_ids_list,
) in block_ids_for_blocksize_post_process.items():
self.post_process_device_kv_on_receive(block_size_ratio, block_ids_list)
# Handle timeout to avoid stranding blocks on remote.
now = time.perf_counter()
while self._reqs_to_send:
req_id, expires = next(iter(self._reqs_to_send.items()))
# Sorted dict, oldest requests are put first so we can exit early.
if now < expires:
break
count = self.consumer_notification_counts_by_req.pop(req_id, 0)
self.xfer_stats.record_kv_expired_req()
logger.warning(
"Releasing expired KV blocks for request %s which were "
"retrieved by %d decode worker(s) within %d seconds.",
req_id,
count,
envs.VLLM_NIXL_ABORT_REQUEST_TIMEOUT,
)
self._reqs_to_process.remove(req_id)
del self._reqs_to_send[req_id]
done_sending.add(req_id)
return done_sending, done_recving
def _get_new_notifs(self) -> set[str]:
"""
Get req_ids which got a remote xfer message. When multiple consumers
are reading from the same producer (heterogeneous TP scenario), wait
for all consumers to be done pulling.
"""
assert self.kv_topo is not None
notified_req_ids: set[str] = set()
for notifs in self.nixl_wrapper.get_new_notifs().values():
for notif in notifs:
req_id, tp_size = notif.decode("utf-8").rsplit(":", 1)
if (
req_id not in self._reqs_to_send
and req_id not in self._reqs_to_process
):
logger.error(
"Potentially invalid KV blocks for "
"unrecognized request %s were retrieved by "
"a decode worker. They may have expired.",
req_id,
)
continue
# NOTE: `tp_ratio` is the opposite when swapping local<>remote
n_consumers = int(tp_size)
tp_ratio = self.kv_topo.tp_ratio(n_consumers)
# Number of reads *per producer* to wait for.
# When remote D TP > local P TP we expect `tp_ratio` reads.
consumers_per_producer = (
-tp_ratio if n_consumers > self.world_size else 1
)
self.consumer_notification_counts_by_req[req_id] += 1
# Wait all consumers (D) to be done reading before freeing.
if (
self.consumer_notification_counts_by_req[req_id]
== consumers_per_producer
):
notified_req_ids.add(req_id)
del self.consumer_notification_counts_by_req[req_id]
self._reqs_to_process.remove(req_id)
self._reqs_to_send.pop(req_id, None)
return notified_req_ids
def _pop_done_transfers(self, transfers: dict[str, list[int]]) -> set[str]:
"""
Pop completed xfers by checking for DONE state.
Args:
transfers: dict of req_id -> list[running_xfer]
Returns:
set of req_ids that have all done xfers
"""
done_req_ids: set[str] = set()
for req_id, handles in list(transfers.items()):
in_progress = []
for handle in handles:
try:
xfer_state = self.nixl_wrapper.check_xfer_state(handle)
if xfer_state == "DONE":
# Get telemetry from NIXL
res = self.nixl_wrapper.get_xfer_telemetry(handle)
self.xfer_stats.record_transfer(res)
self.nixl_wrapper.release_xfer_handle(handle)
elif xfer_state == "PROC":
in_progress.append(handle)
continue
else:
self._log_failure(
failure_type="transfer_failed",
msg="Marking blocks as invalid",
req_id=req_id,
xfer_state=xfer_state,
)
self._handle_failed_transfer(req_id, handle)
except Exception as e:
self._log_failure(
failure_type="transfer_exception",
msg="Marking blocks as invalid",
req_id=req_id,
error=e,
)
self._handle_failed_transfer(req_id, handle)
if not in_progress:
# Only report request as completed when all transfers are done.
done_req_ids.add(req_id)
del transfers[req_id]
else:
transfers[req_id] = in_progress
return done_req_ids
def _handle_failed_transfer(self, req_id: str, handle: int):
"""
Handle a failed transfer by marking all (logical) blocks as invalid and
recording the failure.
Args:
req_id: The request ID.
handle: The transfer handle.
"""
# Use .get() here as the metadata cleanup is handled by get_finished()
if meta := self._recving_metadata.get(req_id):
self._invalid_block_ids.update(meta.local_block_ids)
self.nixl_wrapper.release_xfer_handle(handle)
self.xfer_stats.record_failed_transfer()
def start_load_kv(self, metadata: NixlConnectorMetadata):
"""
Start loading by triggering non-blocking nixl_xfer.
We check for these trnxs to complete in each step().
"""
for req_id, meta in metadata.reqs_to_recv.items():
meta.local_physical_block_ids = self._logical_to_kernel_block_ids(
meta.local_block_ids
)
assert meta.remote is not None
meta.remote.block_ids = self._logical_to_kernel_block_ids(
meta.remote.block_ids
)
remote_engine_id = meta.remote.engine_id
logger.debug(
"start_load_kv for request %s from remote engine %s. "
"Num local_block_ids: %s. Num remote_block_ids: %s. ",
req_id,
remote_engine_id,
len(meta.local_physical_block_ids),
len(meta.remote.block_ids),
)
# always store metadata for failure recovery
self._recving_metadata[req_id] = meta
if remote_engine_id not in self._remote_agents:
# Initiate handshake with remote engine to exchange metadata.
with self._handshake_lock:
if remote_engine_id not in self._remote_agents:
self._background_nixl_handshake(req_id, remote_engine_id, meta)
continue
# Handshake already completed, start async read xfer.
self._read_blocks_for_req(req_id, meta)
# Start transfers for requests whose handshakes have now finished.
while not self._ready_requests.empty():
self._read_blocks_for_req(*self._ready_requests.get_nowait())
# Keep around the requests that have been part of a batch. This is
# needed because async scheduling pushes the misalignment between the
# moment in which requests expiration is set (P side) and the moment in
# which blocks are read from D. As P can now more easily lag behind D
# while processing the next batch, we make sure to only set an
# expiration for requests that have not been read from D yet.
for req_id in metadata.reqs_in_batch:
self._reqs_to_process.add(req_id)
# Remove all requests that are not to be processed (eg aborted).
for req_id in metadata.reqs_not_processed:
self._reqs_to_process.discard(req_id)
# We should never get an abort after setting an expiry timer
assert req_id not in self._reqs_to_send
# Add to requests that are waiting to be read and track expiration.
for req_id, expiration_time in metadata.reqs_to_send.items():
if req_id in self._reqs_to_process:
self._reqs_to_send[req_id] = expiration_time
def _read_blocks_for_req(self, req_id: str, meta: ReqMeta):
assert meta.remote is not None and self.kv_topo is not None
remote_ranks = self.kv_topo.get_target_remote_ranks_from_engine_id(
meta.remote.engine_id
)
tp_ratio = self.kv_topo.tp_ratio_from_engine_id(meta.remote.engine_id)
# D may have to perform multiple reads from different remote ranks.
for i, remote_rank in enumerate(remote_ranks):
if self.use_mla and tp_ratio < 0 and i > 0:
# MLA opt: when P TP > D TP, only a single read is executed for
# the first remote rank (cache is duplicated)..
break
remote_block_size = self.kv_topo.remote_block_size[meta.remote.engine_id]
logger.debug(
"Remote agent %s available, calling _read_blocks"
" on remote rank %s with remote block size %s for req %s",
meta.remote.engine_id,
remote_rank,
remote_block_size,
req_id,
)
# Get side handles.
if tp_ratio < 0 and not self.use_mla:
assert remote_block_size == self.block_size
# Remote tp_size > local tp_size: we must perform multiple
# reads. Get the memory chunk onto which we will write to.
local_xfer_side_handle = self.src_xfer_handles_by_tp_ratio[tp_ratio][i]
else:
# Single read from remote, we write to the whole memory region.
# Also handle remote block size different from local block size.
local_xfer_side_handle = self.src_xfer_handles_by_block_size[
remote_block_size
]
# Destination handle: remote_engine_id -> remote_rank -> handle.
remote_xfer_side_handle = self.dst_xfer_side_handles[meta.remote.engine_id][
remote_rank
]
self._read_blocks(
request_id=req_id,
dst_engine_id=meta.remote.engine_id,
remote_request_id=meta.remote.request_id,
local_block_ids=meta.local_physical_block_ids,
remote_block_ids=meta.remote.block_ids,
remote_rank=remote_rank,
local_xfer_side_handle=local_xfer_side_handle,
remote_xfer_side_handle=remote_xfer_side_handle,
)
if self.use_mla and tp_ratio < 0:
# ..but we still need to notify the other remote ranks that we
# have the blocks we need so they can update the request state.
notif_id = f"{req_id}:{self.world_size}".encode()
remote_agents = self._remote_agents[meta.remote.engine_id]
for rank_to_notify, agent in remote_agents.items():
if rank_to_notify != remote_rank:
self.nixl_wrapper.send_notif(agent, notif_msg=notif_id)
def _read_blocks(
self,
local_block_ids: list[int],
remote_block_ids: list[int],
dst_engine_id: str,
request_id: str,
remote_request_id: str,
remote_rank: int,
local_xfer_side_handle: int,
remote_xfer_side_handle: int,
):
"""
Post a READ point-to-point xfer request from a single local worker to
a single remote worker.
"""
assert self.kv_topo is not None
block_size_ratio = self.kv_topo.block_size_ratio_from_engine_id(dst_engine_id)
if block_size_ratio > 1:
local_block_ids = self.get_mapped_blocks(
np.asarray(local_block_ids), block_size_ratio
)
if len(local_block_ids) > len(remote_block_ids):
# NOTE:
# get_mapped_blocks will always expand block_ids for n times.
# ex:
# prefill block_ids with block_size as 4:
# [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# Local decode block_ids with block_size as 16: [1, 2, 3]
# expland ecode block_ids with get_mapped_blocks from [1, 2, 3] to
# [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
# Then we clip local to align with prefill
# [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] to
# [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
local_block_ids = local_block_ids[: len(remote_block_ids)]
# NOTE(rob): having the staging blocks be on the READER side is
# not going to work well (since we will have to call rearrange tensors).
# after we detect the txn is complete (which means we cannot make the
# read trxn async easily). If we want to make "READ" happen cleanly,
# then we will need to have the staging blocks on the remote side.
# NOTE(rob): according to nvidia the staging blocks are used to
# saturate IB with heterogeneous TP sizes. We should remove the staging
# blocks until we are ready.
# Number of D TP workers that will read from dst P. Propagate info
# on notification so that dst worker can wait before freeing blocks.
notif_id = f"{remote_request_id}:{self.world_size}".encode()
# Full prefix cache hit: do not need to read remote blocks,
# just notify P worker that we have the blocks we need.
num_local_blocks = len(local_block_ids)
if num_local_blocks == 0:
agent_name = self._remote_agents[dst_engine_id][remote_rank]
try:
self.nixl_wrapper.send_notif(agent_name, notif_msg=notif_id)
except Exception as e:
self._log_failure(
failure_type="notification_failed",
msg="P worker blocks will be freed after timeout. "
"This may indicate network issues.",
req_id=request_id,
error=e,
dst_engine_id=dst_engine_id,
remote_rank=remote_rank,
remote_agent_name=agent_name,
)
self.xfer_stats.record_failed_notification()
return
# Partial prefix cache hit: just read uncomputed blocks.
num_remote_blocks = len(remote_block_ids)
assert num_local_blocks <= num_remote_blocks
if num_local_blocks < num_remote_blocks:
remote_block_ids = remote_block_ids[-num_local_blocks:]
# NOTE (nicolo) With homogeneous TP, each TP worker loads KV from
# corresponding rank. With heterogeneous TP, fixing D>P, the D tp
# workers will issue xfers to parts of the P worker remote kv caches.
# Get descs ids.
local_block_descs_ids: np.ndarray
remote_block_descs_ids: np.ndarray
if not self.block_window_per_layer:
# Default case: assume global attention
remote_block_descs_ids = self._get_block_descs_ids(
dst_engine_id,
remote_block_ids,
)
local_block_descs_ids = self._get_block_descs_ids(
self.engine_id,
local_block_ids,
block_size_ratio=block_size_ratio,
)
else:
# TODO(mgoin): remove this once we have hybrid memory allocator
# Optimization for models with local attention (Llama 4)
local_descs_list = []
remote_descs_list = []
for layer_idx, block_window in enumerate(self.block_window_per_layer):
# For each layer:
if block_window is None:
# If not chunked, we just use the
# full block lists (global attention)
layer_local_block_ids = local_block_ids
layer_remote_block_ids = remote_block_ids
else:
# If chunked, get the last block_window blocks
layer_local_block_ids = local_block_ids[-block_window:]
layer_remote_block_ids = remote_block_ids[-block_window:]
# Get descs ids for the layer.
layer_local_desc_ids = self._get_block_descs_ids(
self.engine_id,
layer_local_block_ids,
layer_idx,
block_size_ratio=block_size_ratio,
)
layer_remote_desc_ids = self._get_block_descs_ids(
dst_engine_id,
layer_remote_block_ids,
layer_idx,
)
local_descs_list.append(layer_local_desc_ids)
remote_descs_list.append(layer_remote_desc_ids)
local_block_descs_ids = np.concatenate(local_descs_list)
remote_block_descs_ids = np.concatenate(remote_descs_list)
assert len(local_block_descs_ids) == len(remote_block_descs_ids)
# Prepare transfer with Nixl.
handle = None
try:
handle = self.nixl_wrapper.make_prepped_xfer(
"READ",
local_xfer_side_handle,
local_block_descs_ids,
remote_xfer_side_handle,
remote_block_descs_ids,
notif_msg=notif_id,
)
# Begin async xfer.
self.nixl_wrapper.transfer(handle)
# Use handle to check completion in future step().
self._recving_transfers[request_id].append(handle)
except Exception as e:
# mark all (logical) blocks for this request as invalid
self._log_failure(
failure_type="transfer_setup_failed",
req_id=request_id,
msg="Marking blocks as invalid",
error=e,
dst_engine_id=dst_engine_id,
remote_rank=remote_rank,
)
if meta := self._recving_metadata.get(request_id):
self._invalid_block_ids.update(meta.local_block_ids)
self.xfer_stats.record_failed_transfer()
if handle is not None:
self.nixl_wrapper.release_xfer_handle(handle)
self._failed_recv_reqs.add(request_id)
def get_mapped_blocks(self, block_ids, block_size_ratio):
"""
Calculates the new set of block IDs by mapping every element
in the (potentially sparse) input array.
Example: block_ids=[0, 2], block_size_ratio=2
get_mapped_blocks 0 1 [2 3] 4 5
# remote is |h0-b0|h1-b0||h0-b1|h1-b1||h0-b1|h1-b1||
# local is |h0-b0......||h1-b0......||h2-b0........
local_block_ids 0 [1] 2
"""
if block_ids.size == 0:
return np.array([], dtype=np.int64)
start_ids = block_ids * block_size_ratio
offsets = np.arange(block_size_ratio)
mapped_2d = start_ids[:, None] + offsets[None, :]
return mapped_2d.flatten().astype(np.int64)
def _get_block_descs_ids(
self,
engine_id: str,
block_ids: list[int],
layer_idx: int | None = None,
block_size_ratio: float | None = None,
) -> np.ndarray:
"""
Get the descs ids for a set of block ids.
If layer_idx is provided, we use the region_ids for the given layer.
Otherwise, we use all regions.
"""
if layer_idx is None:
region_ids = np.arange(self.num_regions)
else:
assert layer_idx < self.num_layers
if self.num_layers < self.num_regions:
# If we have more regions than layers, we assume that
# the regions are organized as [K0, V0, K1, V1, ...]
# and we select K_i and V_i
assert 2 * self.num_layers == self.num_regions
region_ids = np.arange(2 * layer_idx, 2 * layer_idx + 2)
else:
# Otherwise, we assume we have MLA and select i-th layer
assert self.num_layers == self.num_regions
region_ids = np.arange(layer_idx, layer_idx + 1)
num_blocks = self.dst_num_blocks[engine_id]
if block_size_ratio is not None:
num_blocks = int(num_blocks * block_size_ratio)
# Compute the desc ids for each block.
region_ids = region_ids[:, None]
block_ids = np.array(block_ids)[None, :]
descs_ids = region_ids * num_blocks + block_ids
return descs_ids.flatten()
def _logical_to_kernel_block_ids(self, block_ids: list[int]) -> list[int]:
"""
Convert logical block ids to kernel physical block ids.
This is required when the logical block size (the one set by the user)
does not match the one required by the attn backend.
"""
if self._physical_blocks_per_logical_kv_block == 1:
# Noop when physical and logical block sizes are the same
return block_ids
block_ids_np = np.array(block_ids)
block_arange = np.arange(0, self._physical_blocks_per_logical_kv_block).reshape(
1, -1
)
return BlockTable.map_to_kernel_blocks(
block_ids_np, self._physical_blocks_per_logical_kv_block, block_arange
).tolist()
def get_backend_aware_kv_block_len(self, layer_idx: int) -> int:
"""
Get the block length for one K/V element (K and V have the same size).
For FA and other backends, this is equal to the length of the whole
block, as K and V are in separate regions.
For FlashInfer, this is half the length of the whole block, as K and V
share the same region.
"""
assert self.kv_topo is not None
if self.kv_topo.is_kv_layout_blocks_first:
# For indexing only half (either just the K or V part).
block_len = self.block_len_per_layer[layer_idx] // 2
else:
block_len = self.block_len_per_layer[layer_idx]
return block_len
def get_kv_connector_stats(self) -> KVConnectorStats | None:
"""
Get the KV transfer stats for the connector.
"""
# Clear stats for next iteration
if not self.xfer_stats.is_empty():
return self.xfer_stats.clone_and_reset()
return None
def get_block_ids_with_load_errors(self) -> set[int]:
"""
Return and clear the set of block IDs that failed to load.
This is called by the scheduler to identify blocks that need
to be retried after a NIXL transfer failure.
"""
result = self._invalid_block_ids
self._invalid_block_ids = set()
return result
def __del__(self):
self.shutdown()
def shutdown(self):
"""Shutdown the connector worker."""
if not hasattr(self, "_handshake_initiation_executor"):
# error happens during init, no need to shutdown
return
self._handshake_initiation_executor.shutdown(wait=False)
for handles in self._recving_transfers.values():
for handle in handles:
self.nixl_wrapper.release_xfer_handle(handle)
self._recving_transfers.clear()
for handle in self.src_xfer_handles_by_block_size.values():
self.nixl_wrapper.release_dlist_handle(handle)
self.src_xfer_handles_by_block_size.clear()
for handles in self.src_xfer_handles_by_tp_ratio.values():
for handle in handles:
self.nixl_wrapper.release_dlist_handle(handle)
self.src_xfer_handles_by_tp_ratio.clear()
for dst_xfer_side_handles in self.dst_xfer_side_handles.values():
for dst_xfer_side_handle in dst_xfer_side_handles.values():
self.nixl_wrapper.release_dlist_handle(dst_xfer_side_handle)
self.dst_xfer_side_handles.clear()
for remote_agents in self._remote_agents.values():
for agent_name in remote_agents.values():
self.nixl_wrapper.remove_remote_agent(agent_name)
self._remote_agents.clear()
for desc in self._registered_descs:
self.nixl_wrapper.deregister_memory(desc)
self._registered_descs.clear()
@contextlib.contextmanager
def zmq_ctx(socket_type: Any, addr: str) -> Iterator[zmq.Socket]:
"""Context manager for a ZMQ socket"""
if socket_type not in (zmq.ROUTER, zmq.REQ):
raise ValueError(f"Unexpected socket type: {socket_type}")
ctx: zmq.Context | None = None
try:
ctx = zmq.Context() # type: ignore[attr-defined]
yield make_zmq_socket(
ctx=ctx, path=addr, socket_type=socket_type, bind=socket_type == zmq.ROUTER
)
finally:
if ctx is not None:
ctx.destroy(linger=0)
@dataclass
class NixlKVConnectorStats(KVConnectorStats):
"""Container for transfer performance metrics"""
def __post_init__(self):
if not self.data:
# Empty container init, no data is passed in.
self.reset()
def reset(self):
# Must be serializable
self.data: dict[str, list[float | int]] = {
"transfer_duration": [],
"post_duration": [],
"bytes_transferred": [],
"num_descriptors": [],
"num_failed_transfers": [],
"num_failed_notifications": [],
"num_kv_expired_reqs": [],
}
def record_transfer(self, res: nixlXferTelemetry):
# Keep metrics units consistent with rest of the code: time us->s
self.data["transfer_duration"].append(res.xferDuration / 1e6)
self.data["post_duration"].append(res.postDuration / 1e6)
self.data["bytes_transferred"].append(res.totalBytes)
self.data["num_descriptors"].append(res.descCount)
def record_failed_transfer(self):
"""Record a failed NIXL transfer operation."""
self.data["num_failed_transfers"].append(1)
def record_failed_notification(self):
"""Record a failed NIXL notification (send_notif)."""
self.data["num_failed_notifications"].append(1)
def record_kv_expired_req(self):
"""Record a request that had its KV blocks expire."""
self.data["num_kv_expired_reqs"].append(1)
def clone_and_reset(self) -> "NixlKVConnectorStats":
old = copy.copy(self)
self.reset()
return old
def is_empty(self) -> bool:
# Do not discard metrics update that are entirely failures related.
return (
self.num_successful_transfers == 0
and len(self.data["num_failed_transfers"]) == 0
and len(self.data["num_failed_notifications"]) == 0
and len(self.data["num_kv_expired_reqs"]) == 0
)
def aggregate(self, other: KVConnectorStats) -> KVConnectorStats:
if not other.is_empty():
for k, v in other.data.items():
accumulator = self.data[k]
assert isinstance(accumulator, list)
accumulator.extend(v)
return self
def reduce(self) -> dict[str, int | float]:
# Compute compact representative stats suitable for CLI logging
if self.num_successful_transfers == 0:
# CLI logging only reports successful transfers stats. If all requests in
# the interval were unsuccessful, Prom will report failures stats instead.
return {
"Num successful transfers": 0,
"Avg xfer time (ms)": 0,
"P90 xfer time (ms)": 0,
"Avg post time (ms)": 0,
"P90 post time (ms)": 0,
"Avg MB per transfer": 0,
"Throughput (MB/s)": 0,
"Avg number of descriptors": 0,
}
xfer_time = np.asarray(self.data["transfer_duration"])
post_time = np.asarray(self.data["post_duration"])
# Convert to MB for CLI logging.
mb = np.asarray(self.data["bytes_transferred"]) / 2**20
descs = np.asarray(self.data["num_descriptors"], dtype=np.uint32)
n = len(descs)
assert n == self.num_successful_transfers
total_mb = mb.sum()
avg_mb = total_mb / n
total_time_seconds = xfer_time.sum()
throughput_mb_s = total_mb / total_time_seconds
return {
"Num successful transfers": n,
"Avg xfer time (ms)": round(xfer_time.mean() * 1e3, 3),
"P90 xfer time (ms)": round(np.percentile(xfer_time, 90).item() * 1e3, 3),
"Avg post time (ms)": round(post_time.mean() * 1e3, 3),
"P90 post time (ms)": round(np.percentile(post_time, 90).item() * 1e3, 3),
"Avg MB per transfer": round(avg_mb, 3),
"Throughput (MB/s)": round(throughput_mb_s, 3),
"Avg number of descriptors": round(descs.mean(), 1),
}
@property
def num_successful_transfers(self) -> int:
return len(self.data["transfer_duration"])
class NixlPromMetrics(KVConnectorPromMetrics):
def __init__(
self,
vllm_config: VllmConfig,
metric_types: dict[type[PromMetric], type[PromMetricT]],
labelnames: list[str],
per_engine_labelvalues: dict[int, list[object]],
):
super().__init__(vllm_config, metric_types, labelnames, per_engine_labelvalues)
buckets = [
0.001,
0.005,
0.01,
0.025,
0.05,
0.075,
0.1,
0.2,
0.3,
0.5,
0.75,
1.0,
5.0,
]
nixl_histogram_xfer_time = self._histogram_cls(
name="vllm:nixl_xfer_time_seconds",
documentation="Histogram of transfer duration for NIXL KV Cache transfers.",
buckets=buckets[1:],
labelnames=labelnames,
)
self.nixl_histogram_xfer_time = self.make_per_engine(nixl_histogram_xfer_time)
nixl_histogram_post_time = self._histogram_cls(
name="vllm:nixl_post_time_seconds",
documentation="Histogram of transfer post time for NIXL KV"
" Cache transfers.",
buckets=buckets,
labelnames=labelnames,
)
self.nixl_histogram_post_time = self.make_per_engine(nixl_histogram_post_time)
# uniform 2kb to 16gb range
buckets = [2 ** (10 + i) for i in range(1, 25, 2)]
nixl_histogram_bytes_transferred = self._histogram_cls(
name="vllm:nixl_bytes_transferred",
documentation="Histogram of bytes transferred per NIXL KV Cache transfers.",
buckets=buckets,
labelnames=labelnames,
)
self.nixl_histogram_bytes_transferred = self.make_per_engine(
nixl_histogram_bytes_transferred
)
buckets = [
10,
20,
30,
50,
75,
100,
200,
400,
1000,
2000,
4000,
10000,
20000,
50000,
]
nixl_histogram_num_descriptors = self._histogram_cls(
name="vllm:nixl_num_descriptors",
documentation="Histogram of number of descriptors per NIXL"
" KV Cache transfers.",
buckets=buckets,
labelnames=labelnames,
)
self.nixl_histogram_num_descriptors = self.make_per_engine(
nixl_histogram_num_descriptors
)
counter_nixl_num_failed_transfers = self._counter_cls(
name="vllm:nixl_num_failed_transfers",
documentation="Number of failed NIXL KV Cache transfers.",
labelnames=labelnames,
)
self.counter_nixl_num_failed_transfers = self.make_per_engine(
counter_nixl_num_failed_transfers
)
counter_nixl_num_failed_notifications = self._counter_cls(
name="vllm:nixl_num_failed_notifications",
documentation="Number of failed NIXL KV Cache notifications.",
labelnames=labelnames,
)
self.counter_nixl_num_failed_notifications = self.make_per_engine(
counter_nixl_num_failed_notifications
)
counter_nixl_num_kv_expired_reqs = self._counter_cls(
name="vllm:nixl_num_kv_expired_reqs",
documentation="Number of requests that had their KV expire. "
"NOTE: This metric is tracked on the P instance.",
labelnames=labelnames,
)
self.counter_nixl_num_kv_expired_reqs = self.make_per_engine(
counter_nixl_num_kv_expired_reqs
)
def observe(self, transfer_stats_data: dict[str, Any], engine_idx: int = 0):
for prom_obj, list_item_key in zip(
[
self.nixl_histogram_xfer_time,
self.nixl_histogram_post_time,
self.nixl_histogram_bytes_transferred,
self.nixl_histogram_num_descriptors,
],
[
"transfer_duration",
"post_duration",
"bytes_transferred",
"num_descriptors",
],
):
for list_item in transfer_stats_data[list_item_key]:
prom_obj[engine_idx].observe(list_item)
for counter_obj, counter_item_key in zip(
[
self.counter_nixl_num_failed_transfers,
self.counter_nixl_num_failed_notifications,
self.counter_nixl_num_kv_expired_reqs,
],
["num_failed_transfers", "num_failed_notifications", "num_kv_expired_reqs"],
):
for list_item in transfer_stats_data[counter_item_key]:
counter_obj[engine_idx].inc(list_item)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py",
"license": "Apache License 2.0",
"lines": 2482,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/v1/entrypoints/openai/test_chat_completion.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import openai # use the official client for correctness check
import pytest
import pytest_asyncio
from tests.utils import RemoteOpenAIServer
# any model with a chat template defined in tokenizer_config should work here
MODEL_NAME = "Qwen/Qwen2.5-1.5B-Instruct"
@pytest.fixture(scope="module")
def default_server_args():
return [
# use half precision for speed and memory savings in CI environment
"--max-model-len",
"2048",
"--max-num-seqs",
"128",
"--enforce-eager",
]
@pytest.fixture(scope="module")
def server(default_server_args):
with RemoteOpenAIServer(MODEL_NAME, default_server_args) as remote_server:
yield remote_server
@pytest_asyncio.fixture
async def client(server):
async with server.get_async_client() as async_client:
yield async_client
@pytest.mark.asyncio
@pytest.mark.parametrize(
"model_name",
[MODEL_NAME],
)
async def test_invalid_json_schema(client: openai.AsyncOpenAI, model_name: str) -> None:
invalid_json_schema = {
"$defs": {
"CarType": {
"enum": ["sedan", "SUV", "Truck", "Coupe"],
"title": "CarType",
"type": "string",
}
},
"properties": {
"brand": {"title": "Brand", "type": "string"},
"model": {"title": "Model", "type": "string"},
"car_type": {"$ref": "#/$defs/CarType"},
"foo": "bar",
},
"required": ["brand", "model", "car_type"],
"title": "CarDescription",
"type": "object",
}
prompt = (
"Generate a JSON with the brand, model and car_type of"
"the most iconic car from the 90's"
)
with pytest.raises((openai.BadRequestError, openai.APIError)):
await client.chat.completions.create(
model=model_name,
messages=[
{
"role": "user",
"content": prompt,
}
],
extra_body={"structured_outputs": {"json": invalid_json_schema}},
)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"model_name",
[MODEL_NAME],
)
async def test_invalid_regex(client: openai.AsyncOpenAI, model_name: str):
prompt = (
"Generate an email address for Alan Turing, who works in Enigma."
"End in .com and new line. Example result:"
"alan.turing@enigma.com\n"
)
with pytest.raises((openai.BadRequestError, openai.APIError)):
await client.chat.completions.create(
model=model_name,
messages=[
{
"role": "user",
"content": prompt,
}
],
extra_body={"structured_outputs": {"regex": r"[.*"}, "stop": ["\n"]},
)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"model_name",
[MODEL_NAME],
)
async def test_invalid_grammar(client: openai.AsyncOpenAI, model_name: str):
invalid_simplified_sql_grammar = """
root ::= select_statementinvalidsyntax
select_statement ::= "SELECT " column " from " table " where " condition
column ::= "col_1 " | "col_2 "
table ::= "table_1 " | "table_2 "
condition ::= column "= " number
number ::= "1 " | "2 "
"""
prompt = (
"Generate an SQL query to show the 'username' and 'email'"
"from the 'users' table."
)
with pytest.raises((openai.BadRequestError, openai.APIError)):
await client.chat.completions.create(
model=model_name,
messages=[
{
"role": "user",
"content": prompt,
}
],
extra_body={
"structured_outputs": {"grammar": invalid_simplified_sql_grammar}
},
)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"model_name",
[MODEL_NAME],
)
async def test_empty_grammar(client: openai.AsyncOpenAI, model_name: str) -> None:
prompt = "Say hello"
with pytest.raises((openai.BadRequestError, openai.APIError)):
await client.chat.completions.create(
model=model_name,
messages=[
{
"role": "user",
"content": prompt,
}
],
extra_body={"structured_outputs": {"grammar": ""}},
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/entrypoints/openai/test_chat_completion.py",
"license": "Apache License 2.0",
"lines": 136,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_executor/models/bert_with_rope.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Iterable
import torch
from torch import nn
from transformers import PretrainedConfig
from vllm.compilation.decorators import support_torch_compile
from vllm.config import CacheConfig, VllmConfig
from vllm.distributed import (
divide,
get_tensor_model_parallel_rank,
get_tensor_model_parallel_world_size,
tensor_model_parallel_all_reduce,
)
from vllm.model_executor.layers.activation import get_act_and_mul_fn, get_act_fn
from vllm.model_executor.layers.attention import (
EncoderOnlyAttention,
)
from vllm.model_executor.layers.fused_moe import activation_without_mul, fused_topk
from vllm.model_executor.layers.linear import (
ColumnParallelLinear,
MergedColumnParallelLinear,
QKVParallelLinear,
ReplicatedLinear,
RowParallelLinear,
)
from vllm.model_executor.layers.pooler import DispatchPooler
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.rotary_embedding import get_rope
from vllm.model_executor.layers.vocab_parallel_embedding import VocabParallelEmbedding
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
from vllm.model_executor.models.utils import (
AutoWeightsLoader,
WeightsMapper,
maybe_prefix,
)
from vllm.model_executor.utils import set_weight_attrs
from vllm.platforms import current_platform
from vllm.sequence import IntermediateTensors
from .bert import BertPooler
from .interfaces import SupportsCrossEncoding, SupportsQuant
from .interfaces_base import default_pooling_type
class BertWithRopeEmbedding(nn.Module):
def __init__(self, config: PretrainedConfig):
super().__init__()
if config.position_embedding_type not in ["rope", "rotary"]:
raise ValueError(
"Only 'rotary'('rope') position_embedding_type" + " is supported"
)
self.word_embeddings = VocabParallelEmbedding(
config.vocab_size, config.hidden_size
)
if config.type_vocab_size > 0:
self.token_type_embeddings = VocabParallelEmbedding(
config.type_vocab_size, config.hidden_size
)
else:
self.token_type_embeddings = None
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(
self,
input_ids: torch.Tensor,
token_type_ids: torch.Tensor | None = None,
) -> torch.Tensor:
input_shape = input_ids.size()
inputs_embeds = self.word_embeddings(input_ids)
embeddings = inputs_embeds
if self.token_type_embeddings is not None:
if token_type_ids is None:
token_type_ids = torch.zeros(
input_shape, dtype=torch.long, device=inputs_embeds.device
)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings += token_type_embeddings
embeddings = self.LayerNorm(embeddings)
return embeddings
class BertWithRopeAttention(nn.Module):
def __init__(
self,
hidden_size: int,
num_attention_heads: int,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
bias: bool = True,
rotary_kwargs: dict | None = None,
prefix: str = "",
):
super().__init__()
self.hidden_size = hidden_size
tp_size = get_tensor_model_parallel_world_size()
self.total_num_heads = num_attention_heads
assert self.total_num_heads % tp_size == 0
self.num_heads = self.total_num_heads // tp_size
self.total_num_kv_heads = self.total_num_heads
self.head_dim = self.hidden_size // self.total_num_heads
assert self.head_dim * self.total_num_heads == self.hidden_size
self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
self.q_size = self.num_heads * self.head_dim
self.kv_size = self.num_kv_heads * self.head_dim
self.scaling = self.head_dim**-0.5
self.qkv_proj = QKVParallelLinear(
hidden_size=self.hidden_size,
head_size=self.head_dim,
total_num_heads=self.total_num_heads,
total_num_kv_heads=self.total_num_kv_heads,
bias=bias,
quant_config=quant_config,
prefix=f"{prefix}.qkv_proj",
)
self.rotary_emb = get_rope(**rotary_kwargs)
self.attn = EncoderOnlyAttention(
num_heads=self.num_heads,
head_size=self.head_dim,
scale=self.scaling,
num_kv_heads=self.num_kv_heads,
cache_config=cache_config,
quant_config=quant_config,
prefix=f"{prefix}.attn",
)
self.out_proj = RowParallelLinear(
input_size=hidden_size,
output_size=hidden_size,
bias=bias,
quant_config=quant_config,
prefix=f"{prefix}.dense",
)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
) -> torch.Tensor:
qkv, _ = self.qkv_proj(hidden_states)
q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
q, k = self.rotary_emb(positions, q, k)
attn_output = self.attn(q, k, v)
output, _ = self.out_proj(attn_output)
return output
class BertWithRopeGatedMLP(nn.Module):
def __init__(
self,
hidden_size: int,
intermediate_size: int,
hidden_act: str,
bias: bool = True,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
):
super().__init__()
self.act_fn = get_act_and_mul_fn(hidden_act)
self.gate_up_proj = MergedColumnParallelLinear(
hidden_size,
[intermediate_size] * 2,
bias=bias,
quant_config=quant_config,
prefix=f"{prefix}.gate_up_proj",
)
self.down_proj = RowParallelLinear(
input_size=intermediate_size,
output_size=hidden_size,
bias=bias,
quant_config=quant_config,
prefix=f"{prefix}.down_proj",
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
gate_up, _ = self.gate_up_proj(hidden_states)
hidden_states = self.act_fn(gate_up)
hidden_states, _ = self.down_proj(hidden_states)
return hidden_states
class BertWithRopeMLP(nn.Module):
def __init__(
self,
hidden_size: int,
intermediate_size: int,
hidden_act: str,
bias: bool = True,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
):
super().__init__()
self.act_fn = get_act_fn(hidden_act)
self.up_proj = ColumnParallelLinear(
input_size=hidden_size,
output_size=intermediate_size,
bias=bias,
quant_config=quant_config,
prefix=f"{prefix}.up_proj",
)
self.down_proj = RowParallelLinear(
input_size=intermediate_size,
output_size=hidden_size,
bias=bias,
quant_config=quant_config,
prefix=f"{prefix}.down_proj",
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states, _ = self.up_proj(hidden_states)
hidden_states = self.act_fn(hidden_states)
hidden_states, _ = self.down_proj(hidden_states)
return hidden_states
class NomicMoE(nn.Module):
def __init__(
self,
num_experts: int,
top_k: int,
hidden_size: int,
intermediate_size: int,
hidden_act: str,
params_dtype: torch.dtype | None = None,
tp_size: int | None = None,
):
super().__init__()
self.tp_size = tp_size or get_tensor_model_parallel_world_size()
self.num_total_experts = num_experts
self.top_k = top_k
self.hidden_size = hidden_size
self.total_intermediate_size = intermediate_size
self.intermediate_size = divide(intermediate_size, self.tp_size)
self.hidden_act = activation_without_mul(hidden_act)
if params_dtype is None:
params_dtype = torch.get_default_dtype()
self.params_dtype = params_dtype
self.router = ReplicatedLinear(
self.hidden_size, self.num_total_experts, bias=False
)
self.w1 = nn.Parameter(
torch.empty(
self.num_total_experts,
self.intermediate_size,
self.hidden_size,
device=current_platform.device_type,
dtype=self.params_dtype,
)
)
self.w2 = nn.Parameter(
torch.empty(
self.num_total_experts,
self.hidden_size,
self.intermediate_size,
device=current_platform.device_type,
dtype=self.params_dtype,
)
)
self.bias = nn.Parameter(torch.zeros(self.hidden_size))
set_weight_attrs(
self.w1,
{
"weight_loader": self.weight_loader,
},
)
set_weight_attrs(
self.w2,
{
"weight_loader": self.weight_loader,
},
)
def weight_loader(
self,
param: nn.Parameter,
loaded_weight: torch.Tensor,
weight_name: str,
):
# NOTE: Nomic-MoE has fused experts weights with shape
# (num_experts * intermediate_size, hidden_size)
tp_rank = get_tensor_model_parallel_rank()
param_data = param.data
shard_size = self.intermediate_size
shard = slice(tp_rank * shard_size, (tp_rank + 1) * shard_size)
if weight_name.endswith("w1"):
loaded_weight = loaded_weight.reshape(
self.num_total_experts,
self.total_intermediate_size,
self.hidden_size,
)[:, shard]
if weight_name.endswith("w2"):
loaded_weight = loaded_weight.reshape(
self.num_total_experts,
self.total_intermediate_size,
self.hidden_size,
)[:, shard].transpose(1, 2)
param_data.copy_(loaded_weight)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
num_tokens, hidden_size = hidden_states.shape
hidden_states = hidden_states.view(-1, self.hidden_size)
# router_logits: (num_tokens, n_experts)
router_logits, _ = self.router(hidden_states)
# FIXME(Isotr0py): This implementation is too tricky,
# we should use FusedMoE instead in the future
# after supporting ungated activation for it.
topk_weights, topk_ids, _ = fused_topk(
hidden_states, router_logits, self.top_k, renormalize=False
)
final_hidden_states = torch.ops.vllm.outplace_fused_experts(
hidden_states=hidden_states,
w1=self.w1,
w2=self.w2,
topk_weights=topk_weights,
topk_ids=topk_ids,
activation=self.hidden_act,
)
if self.tp_size > 1:
final_hidden_states = tensor_model_parallel_all_reduce(final_hidden_states)
return final_hidden_states.view(num_tokens, hidden_size) + self.bias
class BertWithRopeBlock(nn.Module):
def __init__(
self,
config: PretrainedConfig,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
moe: bool = False,
bias: bool = True,
rotary_kwargs: dict | None = None,
prefix: str = "",
):
super().__init__()
self.attn = BertWithRopeAttention(
hidden_size=config.hidden_size,
num_attention_heads=config.num_attention_heads,
cache_config=cache_config,
quant_config=quant_config,
bias=bias,
rotary_kwargs=rotary_kwargs,
prefix=f"{prefix}.attention",
)
if moe:
self.mlp = NomicMoE(
num_experts=config.num_experts,
top_k=config.moe_top_k,
hidden_size=config.hidden_size,
intermediate_size=config.intermediate_size,
hidden_act=config.hidden_act,
)
else:
if config.hidden_act in ["silu", "geglu"]:
self.mlp = BertWithRopeGatedMLP(
hidden_size=config.hidden_size,
intermediate_size=config.intermediate_size,
hidden_act=config.hidden_act,
bias=bias,
quant_config=quant_config,
prefix=f"{prefix}.mlp",
)
else:
self.mlp = BertWithRopeMLP(
hidden_size=config.hidden_size,
intermediate_size=config.intermediate_size,
hidden_act=config.hidden_act,
bias=bias,
quant_config=quant_config,
prefix=f"{prefix}.mlp",
)
self.attn_ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.mlp_ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, positions: torch.Tensor, hidden_states: torch.Tensor):
attn_output = self.attn(positions, hidden_states)
hidden_states = self.attn_ln(hidden_states + attn_output)
mlp_out = self.mlp(hidden_states)
hidden_states = self.mlp_ln(hidden_states + mlp_out)
return hidden_states
class BertWithRopeEncoder(nn.Module):
def __init__(
self,
vllm_config: VllmConfig,
bias: bool = True,
rotary_kwargs: dict | None = None,
prefix: str = "",
):
super().__init__()
config = vllm_config.model_config.hf_config
cache_config = vllm_config.cache_config
quant_config = vllm_config.quant_config
every_n = getattr(config, "moe_every_n_layers", 0)
self.layers = nn.ModuleList(
[
BertWithRopeBlock(
config=config,
cache_config=cache_config,
quant_config=quant_config,
bias=bias,
moe=every_n > 0 and (layer_idx % every_n == 1),
rotary_kwargs=rotary_kwargs,
prefix=f"{prefix}.layer.{layer_idx}",
)
for layer_idx in range(config.num_hidden_layers)
]
)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
) -> torch.Tensor:
for layer in self.layers:
hidden_states = layer(positions, hidden_states)
return hidden_states
@support_torch_compile
@default_pooling_type(seq_pooling_type="CLS")
class BertWithRope(nn.Module, SupportsQuant):
hf_to_vllm_mapper = WeightsMapper(orig_to_new_prefix={"model.": ""})
def __init__(
self,
*,
vllm_config: VllmConfig,
prefix: str = "",
add_pooling_layer: bool = False,
):
super().__init__()
self.vllm_config = vllm_config
self.add_pooling_layer = add_pooling_layer
self.config = vllm_config.model_config.hf_config
self.embeddings = BertWithRopeEmbedding(self.config)
self.encoder = BertWithRopeEncoder(
vllm_config=vllm_config,
bias=getattr(self.config, "bias", True),
rotary_kwargs=self.config.rotary_kwargs,
prefix=f"{prefix}.encoder",
)
if add_pooling_layer:
self.pooler = BertPooler(vllm_config.model_config)
else:
self.pooler = None
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.embeddings(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
token_type_ids: torch.Tensor | None = None,
) -> torch.Tensor:
if inputs_embeds is not None:
hidden_states = inputs_embeds
else:
hidden_states = self.embeddings(
input_ids=input_ids, token_type_ids=token_type_ids
)
return self.encoder(positions, hidden_states)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
weights = self.hf_to_vllm_mapper.apply(weights)
if self.config.hidden_act in ["silu", "geglu"]:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
else:
stacked_params_mapping = []
params_dict = dict(self.named_parameters())
loaded_params: set[str] = set()
for name, loaded_weight in weights:
if not self.add_pooling_layer and "pooler" in name:
continue
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
break
else:
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
if name.endswith((".w1", ".w2")):
# Nomic-MoE has fused experts weights
weight_loader(param, loaded_weight, name)
else:
weight_loader(param, loaded_weight)
loaded_params.add(name)
return loaded_params
class NomicBertModel(BertWithRope):
# for https://huggingface.co/nomic-ai/nomic-bert-2048
hf_to_vllm_mapper = WeightsMapper(
orig_to_new_substr={
"emb_ln": "embeddings.LayerNorm",
"attn.Wqkv": "attn.qkv_proj",
"norm1": "attn_ln",
"mlp.fc1.": "mlp.up_proj.",
"mlp.fc11": "mlp.up_proj",
"mlp.fc12": "mlp.gate_proj",
"mlp.fc2": "mlp.down_proj",
"norm2": "mlp_ln",
# MoE mapping
"experts.mlp.": "",
"experts.": "",
"router.layer": "router",
}
)
class GteNewModel(BertWithRope):
# for https://huggingface.co/Alibaba-NLP/new-impl
hf_to_vllm_mapper = WeightsMapper(
orig_to_new_substr={
"new.": "",
"layer": "layers",
"attention.qkv_proj": "attn.qkv_proj",
"attention.o_proj": "attn.out_proj",
}
)
def __init__(self, *, vllm_config: VllmConfig, prefix: str = "", **kwargs):
super().__init__(vllm_config=vllm_config, prefix=prefix, **kwargs)
# GteNewModel only gate_up_proj does not have bias.
# Hack method learned from vllm/model_executor/models/glm.py
for layer in self.encoder.layers:
layer.mlp.gate_up_proj.bias = None
layer.mlp.gate_up_proj.skip_bias_add = True
def split_up_gate_proj(self, weights: Iterable[tuple[str, torch.Tensor]]):
n = "mlp.up_gate_proj"
for name, weight in weights:
if n in name:
up, gate = weight.chunk(2, dim=0)
yield name.replace(n, "mlp.up_proj"), up
yield name.replace(n, "mlp.gate_proj"), gate
else:
yield name, weight
def ignore_unnecessary_layers(self, weights: Iterable[tuple[str, torch.Tensor]]):
for name, weight in weights:
if name.startswith("classifier"):
continue
yield name, weight
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
weights = self.ignore_unnecessary_layers(weights)
weights = self.split_up_gate_proj(weights)
return super().load_weights(weights)
class SnowflakeGteNewModel(GteNewModel):
# for Snowflake/snowflake-arctic-embed-m-v2.0
hf_to_vllm_mapper = WeightsMapper(
orig_to_new_substr={
"layer": "layers",
"attention.qkv_proj": "attn.qkv_proj",
"attention.o_proj": "attn.out_proj",
}
)
class JinaRobertaModel(BertWithRope):
# for https://huggingface.co/jinaai/jina-embeddings-v3
hf_to_vllm_mapper = WeightsMapper(
orig_to_new_substr={
"emb_ln": "embeddings.LayerNorm",
"mixer.Wqkv": "attn.qkv_proj",
"mixer.out_proj": "attn.out_proj",
"norm1": "attn_ln",
"mlp.fc1.": "mlp.up_proj.",
"mlp.fc2": "mlp.down_proj",
"norm2": "mlp_ln",
}
)
@torch.inference_mode()
def jina_merge_lora_weights(self, weights: Iterable[tuple[str, torch.Tensor]]):
# use for jina-embeddings-v3
# Merge Lora weights into a single weight tensor.
# This is a temporary solution until we have a better way to handle
scaling = self.config.lora_alpha / self.config.lora_rank
device = self.vllm_config.device_config.device
weights = {name: weight for name, weight in weights}
o = ".original"
a = ".0.lora_A"
b = ".0.lora_B"
# text-matching
i = -1
for name in list(weights.keys()):
if o in name:
dtype = weights[name].dtype
shape = weights[name].shape
weight_name = name[: -len(o)]
if "embeddings" in weight_name:
B = weights[weight_name + a][i].to(device).float()
A = weights[weight_name + b][i].to(device).float()
else:
B = weights[weight_name + b][i].to(device).float()
A = weights[weight_name + a][i].to(device).float()
weight = (
weights[weight_name + o].to(device)
+ torch.matmul(B, A).view(shape) * scaling
)
weight = weight.cpu().to(dtype)
weights[weight_name.replace(".parametrizations", "")] = weight
del (
weights[weight_name + o],
weights[weight_name + a],
weights[weight_name + b],
)
return [(name, weight) for name, weight in weights.items()]
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
weights = self.jina_merge_lora_weights(weights)
return super().load_weights(weights)
@default_pooling_type(seq_pooling_type="CLS")
class GteNewForSequenceClassification(nn.Module, SupportsCrossEncoding):
is_pooling_model = True
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
self.new = GteNewModel(
vllm_config=vllm_config, prefix=prefix, add_pooling_layer=True
)
self.classifier = ReplicatedLinear(
config.hidden_size,
config.num_labels,
bias=True,
quant_config=quant_config,
params_dtype=vllm_config.model_config.head_dtype,
prefix=maybe_prefix(prefix, "classifier"),
return_bias=False,
)
pooler_config = vllm_config.model_config.pooler_config
assert pooler_config is not None
self.pooler = DispatchPooler.for_seq_cls(
pooler_config,
pooling=self.new.pooler,
classifier=self.classifier,
)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]):
loader = AutoWeightsLoader(self)
loaded_params = loader.load_weights(weights)
return loaded_params
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.new.embed_input_ids(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor:
return self.new(
input_ids=input_ids,
positions=positions,
inputs_embeds=inputs_embeds,
intermediate_tensors=intermediate_tensors,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/bert_with_rope.py",
"license": "Apache License 2.0",
"lines": 634,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Callable
import torch
from torch.nn.parameter import Parameter
from vllm.model_executor.layers.quantization.compressed_tensors.schemes import (
CompressedTensorsScheme,
)
from vllm.model_executor.layers.quantization.utils.marlin_utils_fp4 import (
apply_fp4_marlin_linear,
prepare_fp4_layer_for_marlin,
)
from vllm.model_executor.parameter import (
GroupQuantScaleParameter,
ModelWeightParameter,
PerTensorScaleParameter,
)
__all__ = ["CompressedTensorsW4A16Fp4"]
class CompressedTensorsW4A16Fp4(CompressedTensorsScheme):
def __init__(self):
self.group_size = 16
@classmethod
def get_min_capability(cls) -> int:
# don't restrict as emulations
return 75
def create_weights(
self,
layer: torch.nn.Module,
output_partition_sizes: list[int],
input_size_per_partition: int,
params_dtype: torch.dtype,
weight_loader: Callable,
**kwargs,
):
output_size_per_partition = sum(output_partition_sizes)
layer.logical_widths = output_partition_sizes
layer.input_size_per_partition = input_size_per_partition
layer.output_size_per_partition = output_size_per_partition
# Weight
weight = ModelWeightParameter(
data=torch.empty(
sum(output_partition_sizes),
input_size_per_partition // 2,
dtype=torch.uint8,
),
input_dim=1,
output_dim=0,
weight_loader=weight_loader,
)
layer.register_parameter("weight_packed", weight)
# Global Weight Scale
weight_global_scale = PerTensorScaleParameter(
data=torch.empty(len(output_partition_sizes), dtype=torch.float32),
weight_loader=weight_loader,
)
layer.register_parameter("weight_global_scale", weight_global_scale)
# Per Group Weight Scale
weight_scale = GroupQuantScaleParameter(
data=torch.empty(
sum(output_partition_sizes),
input_size_per_partition // self.group_size,
dtype=torch.float8_e4m3fn,
),
input_dim=1,
output_dim=0,
weight_loader=weight_loader,
)
layer.register_parameter("weight_scale", weight_scale)
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
# Process parameters for marlin repacking
# Rename weight_packed to weight that marlin expects
layer.weight = Parameter(layer.weight_packed.data, requires_grad=False)
del layer.weight_packed
# ct stores the inverse of what is expected by the marlin kernel
layer.weight_global_scale = Parameter(
1.0 / layer.weight_global_scale.max().to(torch.float32), requires_grad=False
)
prepare_fp4_layer_for_marlin(layer)
def apply_weights(
self,
layer: torch.nn.Module,
x: torch.Tensor,
bias: torch.Tensor | None = None,
) -> torch.Tensor:
return apply_fp4_marlin_linear(
input=x,
weight=layer.weight,
weight_scale=layer.weight_scale,
weight_global_scale=layer.weight_global_scale,
workspace=layer.workspace,
size_n=layer.output_size_per_partition,
size_k=layer.input_size_per_partition,
bias=bias,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a16_nvfp4.py",
"license": "Apache License 2.0",
"lines": 94,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import torch
from vllm.scalar_type import scalar_types
__all__ = [
"break_fp4_bytes",
"dequantize_to_dtype",
"ref_nvfp4_quant",
]
FLOAT4_E2M1_MAX = scalar_types.float4_e2m1f.max()
kE2M1ToFloat = torch.tensor(
[0.0, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 6.0], dtype=torch.float32
)
def break_fp4_bytes(a, dtype):
assert a.dtype == torch.uint8
m, n = a.shape
# Vectorized nibble processing
a_flat = a.flatten()
high = (a_flat & 0xF0) >> 4 # Upper nibbles
low = a_flat & 0x0F # Lower nibbles
# Combine nibbles for batch processing
combined = torch.stack((low, high), dim=1).flatten()
# Vectorized sign and magnitude extraction
signs = (combined & 0x08).to(torch.bool) # Sign bits
abs_vals = (combined & 0x07).to(torch.long)
# Device-aware lookup and sign application
kE2M1 = kE2M1ToFloat.to(device=a.device)
values = kE2M1[abs_vals] * torch.where(signs, -1.0, 1.0)
# Reshape to final form
return values.reshape(m, n * 2).to(dtype=dtype)
def convert_swizzled_to_linear(a_sf_swizzled: torch.Tensor, m, k, block_size):
m_tiles = (m + 128 - 1) // 128
f = block_size * 4
k_tiles = (k + f - 1) // f
tmp = torch.reshape(a_sf_swizzled, (1, m_tiles, k_tiles, 32, 4, 4))
tmp = torch.permute(tmp, (0, 1, 4, 3, 2, 5))
out = tmp.reshape(m_tiles * 128, k_tiles * f // block_size)
return out[0:m, 0:k]
def dequantize_to_dtype(
tensor_fp4, tensor_sf, global_scale, dtype, device, block_size=16
):
"""Dequantize the fp4 tensor back to high precision."""
# Two fp4 values are packed into one uint8.
assert tensor_fp4.dtype == torch.uint8
m, packed_k = tensor_fp4.shape
k = packed_k * 2
tensor_f32 = break_fp4_bytes(tensor_fp4, torch.float32)
tensor_f32 = tensor_f32.reshape(m, k // block_size, block_size)
tensor_sf = tensor_sf.view(torch.float8_e4m3fn)
tensor_sf = convert_swizzled_to_linear(tensor_sf, m, k, block_size)
tensor_sf_dtype = tensor_sf.to(torch.float32) / global_scale
# scale the tensor
out = (tensor_f32 * tensor_sf_dtype.unsqueeze(-1)).reshape(m, k)
return out.to(dtype)
def get_reciprocal(x):
if isinstance(x, torch.Tensor):
return torch.where(x == 0, torch.tensor(0.0, dtype=x.dtype), 1.0 / x)
elif isinstance(x, (float, int)):
return 0.0 if x == 0 else 1.0 / x
else:
raise TypeError("Input must be a float, int, or a torch.Tensor.")
def cast_to_fp4(x):
sign = torch.sign(x)
x = torch.abs(x)
x[(x >= 0.0) & (x <= 0.25)] = 0.0
x[(x > 0.25) & (x < 0.75)] = 0.5
x[(x >= 0.75) & (x <= 1.25)] = 1.0
x[(x > 1.25) & (x < 1.75)] = 1.5
x[(x >= 1.75) & (x <= 2.5)] = 2.0
x[(x > 2.5) & (x < 3.5)] = 3.0
x[(x >= 3.5) & (x <= 5.0)] = 4.0
x[x > 5.0] = 6.0
return x * sign
def ref_nvfp4_quant(x, global_scale, block_size):
assert global_scale.dtype == torch.float32
assert x.ndim == 2
m, n = x.shape
x = torch.reshape(x, (m, n // block_size, block_size))
vec_max = torch.max(torch.abs(x), dim=-1, keepdim=True)[0].to(torch.float32)
scale = global_scale * (vec_max * get_reciprocal(FLOAT4_E2M1_MAX))
scale = torch.clamp(scale, max=448, min=-448)
scale = scale.to(torch.float8_e4m3fn).to(torch.float32)
output_scale = get_reciprocal(scale * get_reciprocal(global_scale))
scaled_x = x.to(torch.float32) * output_scale
clipped_x = torch.clamp(scaled_x, -6.0, 6.0).reshape(m, n)
# both outputs are float32
return cast_to_fp4(clipped_x), scale.squeeze(-1)
def run_nvfp4_emulations(
x: torch.Tensor,
input_global_scale: torch.Tensor,
weight: torch.Tensor,
weight_scale_swizzled: torch.Tensor,
weight_global_scale: torch.Tensor,
):
group_size = 16
x_m, x_k = x.shape
output_dtype = x.dtype
# quantize input to (FP4 and interleaved block scale)
x_fp4, x_blockscale = ref_nvfp4_quant(x, input_global_scale, group_size)
# dequantize input
x_fp4 = x_fp4.reshape(x_m, x_k // group_size, group_size)
x_blockscale = x_blockscale.unsqueeze(-1) / input_global_scale
x_dq = (x_fp4 * x_blockscale).reshape(x_m, x_k).to(output_dtype)
del x_fp4, x_blockscale
# dequantize weight
w_fp4 = weight.data.view(torch.uint8)
w_dq = dequantize_to_dtype(
w_fp4,
weight_scale_swizzled.data,
weight_global_scale,
output_dtype,
x.device,
group_size,
)
# matmul
out = torch.matmul(x_dq, w_dq.t())
del w_dq, x_dq
return out
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py",
"license": "Apache License 2.0",
"lines": 118,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import torch
import vllm._custom_ops as ops
from vllm.logger import init_logger
from vllm.model_executor.layers.quantization.utils.marlin_utils import (
USE_FP32_REDUCE_DEFAULT,
get_marlin_input_dtype,
marlin_make_workspace_new,
marlin_permute_bias,
marlin_permute_scales,
marlin_quant_input,
should_use_atomic_add_reduce,
)
from vllm.platforms import current_platform
from vllm.scalar_type import scalar_types
FP4_MARLIN_SUPPORTED_GROUP_SIZES = [16]
logger = init_logger(__name__)
def is_fp4_marlin_supported():
return current_platform.has_device_capability(75)
def nvfp4_marlin_process_scales(marlin_scales):
if not (marlin_scales >= 0).all():
logger.warning_once(
"NVFP4 Marlin assumes the scales to be >=0, but has encountered "
"negative scales. Accuracy will likely be degraded. This is "
"because it changes the scales from FP8-S1E4M3 to a special "
"FP8-S0E5M3 format to speedup the dequantization."
)
# convert to half first, we would convert to fp8 later
marlin_scales = marlin_scales.to(torch.half)
# fit the layout of fp8 dequantization
marlin_scales = marlin_scales.view(-1, 4)[:, [0, 2, 1, 3]].view(
marlin_scales.size(0), -1
)
# We assume that weight_scale (FP8-S1E4M3) is always greater
# than or equal to 0. So we can convert
# (weight_scale * (2 ** 7) to a special FP8-S0E5M3 format.
# After multiplying by 2 ** 7, the top bit of FP8-S0E5M3 would always be 1
# when weight_scale > 0. This allows us to have an exponent bias
# closer to zero after dequantization.
marlin_scales = (marlin_scales * (2**7)).view(torch.int16) << 1
marlin_scales = marlin_scales.view(torch.float8_e4m3fn)
marlin_scales = marlin_scales[:, 1::2].contiguous()
return marlin_scales
def mxfp4_marlin_process_scales(marlin_scales, input_dtype=None):
# fit the layout of fp8 dequantization
if input_dtype is None or input_dtype.itemsize == 2:
marlin_scales = marlin_scales.view(-1, 4)[:, [0, 2, 1, 3]].view(
marlin_scales.size(0), -1
)
marlin_scales = marlin_scales.to(torch.float8_e8m0fnu)
if input_dtype == torch.float8_e4m3fn:
marlin_scales = marlin_scales.view(torch.uint8)
assert marlin_scales.max() <= 249
# exponent_bias (fp4->fp8) = 2 ** 3 - 2 ** 1 = 6
marlin_scales = marlin_scales + 6
marlin_scales = marlin_scales.view(torch.float8_e8m0fnu)
return marlin_scales
def nvfp4_marlin_process_global_scale(global_scale):
assert global_scale.dtype in [torch.half, torch.bfloat16]
fp4_exponent = 2
if global_scale.dtype == torch.half:
target_exponent = 5
elif global_scale.dtype == torch.bfloat16:
target_exponent = 8
# exponent_bias_fp16 = 2 ** 4 - 2 ** 1 = 14
# exponent_bias_bf16 = 2 ** 7 - 2 ** 1 = 126
exponent_bias = 2 ** (target_exponent - 1) - 2 ** (fp4_exponent - 1)
return global_scale * (2.0 ** (exponent_bias - 7))
def apply_fp4_marlin_linear(
input: torch.Tensor,
weight: torch.Tensor,
weight_scale: torch.Tensor,
weight_global_scale: torch.Tensor | None,
workspace: torch.Tensor,
size_n: int,
size_k: int,
bias: torch.Tensor | None = None,
input_dtype: torch.dtype | None = None,
use_fp32_reduce: bool = USE_FP32_REDUCE_DEFAULT,
) -> torch.Tensor:
# For GPUs that lack FP4 hardware support, we can leverage the
# Marlin kernel for fast weight-only FP4 quantization
reshaped_x = input.reshape(-1, input.shape[-1])
out_shape = input.shape[:-1] + (size_n,)
use_atomic_add = should_use_atomic_add_reduce(
m=reshaped_x.size(0), n=size_n, k=size_k, device=input.device, dtype=input.dtype
)
inputs = reshaped_x
a_scales = None
is_nvfp4 = weight_global_scale is not None
if input_dtype is not None and input_dtype.itemsize == 1:
if is_nvfp4:
raise RuntimeError("NVFP4 weight + INT8/FP8 activation is not supported.")
elif input_dtype != torch.float8_e4m3fn:
raise RuntimeError("MXFP4 weight + INT8 activation is not supported.")
inputs, a_scales = marlin_quant_input(inputs, torch.float8_e4m3fn)
output = ops.marlin_gemm(
a=inputs,
c=None,
b_q_weight=weight,
b_bias=bias,
b_scales=weight_scale,
a_scales=a_scales,
global_scale=weight_global_scale,
b_zeros=None,
g_idx=None,
perm=None,
workspace=workspace,
b_q_type=scalar_types.float4_e2m1f,
size_m=reshaped_x.size(0),
size_n=size_n,
size_k=size_k,
use_atomic_add=use_atomic_add,
use_fp32_reduce=use_fp32_reduce,
)
return output.reshape(out_shape)
def prepare_fp4_layer_for_marlin(
layer: torch.nn.Module, input_dtype: torch.dtype | None = None
) -> None:
logger.warning_once(
"Your GPU does not have native support for FP4 computation but "
"FP4 quantization is being used. Weight-only FP4 compression will "
"be used leveraging the Marlin kernel. This may degrade "
"performance for compute-heavy workloads."
)
is_nvfp4 = hasattr(layer, "weight_global_scale")
if input_dtype is not None and input_dtype.itemsize == 1:
if is_nvfp4:
raise RuntimeError("NVFP4 weight + INT8/FP8 activation is not supported.")
elif input_dtype != torch.float8_e4m3fn:
raise RuntimeError("MXFP4 weight + INT8 activation is not supported.")
group_size = 16 if is_nvfp4 else 32
part_size_n = layer.output_size_per_partition
part_size_k = layer.input_size_per_partition
param_dtype = layer.params_dtype
assert layer.weight.shape == (part_size_n, part_size_k // 2)
device = layer.weight.device
# WORKSPACE
layer.workspace = marlin_make_workspace_new(device)
# WEIGHT
# Repack weights to marlin format
perm = torch.empty(0, dtype=torch.int, device=device)
qweight = layer.weight.view(torch.int32).T.contiguous()
is_a_8bit = input_dtype is not None and input_dtype.itemsize == 1
marlin_qweight = ops.gptq_marlin_repack(
b_q_weight=qweight,
perm=perm,
size_k=part_size_k,
size_n=part_size_n,
num_bits=4,
is_a_8bit=is_a_8bit,
)
layer.weight = torch.nn.Parameter(marlin_qweight, requires_grad=False)
# WEIGHT SCALES
# Permute scales
weight_scale = layer.weight_scale.T.contiguous()
if not is_nvfp4:
weight_scale = weight_scale.view(torch.float8_e8m0fnu)
weight_scale = weight_scale.to(param_dtype)
weight_scale = marlin_permute_scales(
s=weight_scale,
size_k=part_size_k,
size_n=part_size_n,
group_size=group_size,
is_a_8bit=is_a_8bit,
)
if is_nvfp4:
weight_scale = nvfp4_marlin_process_scales(weight_scale)
layer.weight_scale = torch.nn.Parameter(weight_scale, requires_grad=False)
weight_global_scale = layer.weight_global_scale.to(param_dtype)
weight_global_scale = nvfp4_marlin_process_global_scale(weight_global_scale)
layer.weight_global_scale = torch.nn.Parameter(
weight_global_scale, requires_grad=False
)
else:
weight_scale = mxfp4_marlin_process_scales(
weight_scale, input_dtype=input_dtype
)
layer.weight_scale = torch.nn.Parameter(weight_scale, requires_grad=False)
if hasattr(layer, "bias") and layer.bias is not None:
assert layer.bias.shape == (part_size_n,)
bias = marlin_permute_bias(layer.bias)
layer.bias = torch.nn.Parameter(bias, requires_grad=False)
return
def prepare_nvfp4_moe_layer_for_marlin(
layer: torch.nn.Module,
w13: torch.Tensor,
w13_scale: torch.Tensor,
w13_scale_2: torch.Tensor,
w2: torch.Tensor,
w2_scale: torch.Tensor,
w2_scale_2: torch.Tensor,
is_act_and_mul: bool,
) -> tuple[
torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor
]:
logger.warning_once(
"Your GPU does not have native support for FP4 computation but "
"FP4 quantization is being used. Weight-only FP4 compression will "
"be used leveraging the Marlin kernel. This may degrade "
"performance for compute-heavy workloads."
)
input_dtype = get_marlin_input_dtype(prefix="")
if input_dtype is not None and input_dtype.itemsize == 1:
raise RuntimeError("NVFP4 weight + INT8/FP8 activation is not supported.")
GROUP_SIZE = 16
E = layer.num_experts
K = layer.hidden_size
N = layer.intermediate_size_per_partition
device = w13.device
param_dtype = layer.params_dtype
is_a_8bit = input_dtype is not None and input_dtype.itemsize == 1
# WORKSPACE
layer.workspace = marlin_make_workspace_new(device, 4)
perm = torch.empty(0, dtype=torch.int, device=device)
# WEIGHT
# Repack weights to marlin format
def repack_weight(weight: torch.Tensor, name: str) -> torch.Tensor:
tensor_list = []
num_shards = 2 if is_act_and_mul else 1
if "w13" in name:
size_n, size_k = N * num_shards, K
else:
size_n, size_k = K, N
assert weight.shape == (E, size_n, size_k // 2)
for i in range(E):
qweight = weight[i].view(torch.int32).T.contiguous()
marlin_qweight = ops.gptq_marlin_repack(
b_q_weight=qweight,
perm=perm,
size_k=size_k,
size_n=size_n,
num_bits=4,
is_a_8bit=is_a_8bit,
)
tensor_list.append(marlin_qweight)
return torch.cat([x.unsqueeze(0) for x in tensor_list], 0)
w13 = repack_weight(w13, "w13")
w2 = repack_weight(w2, "w2")
# WEIGHT SCALES
# Permute scales
def premute_scales(
scales: torch.Tensor, g_scales: torch.Tensor, name: str
) -> tuple[torch.Tensor, torch.Tensor]:
scales = scales.to(param_dtype)
g_scales = g_scales.to(param_dtype)
tensor_list = []
num_shards = 2 if is_act_and_mul else 1
if "w13" in name:
size_n, size_k = N * num_shards, K
else:
size_n, size_k = K, N
for i in range(E):
scale = scales[i].T
marlin_scales = marlin_permute_scales(
s=scale,
size_k=size_k,
size_n=size_n,
group_size=GROUP_SIZE,
is_a_8bit=is_a_8bit,
)
marlin_scales = nvfp4_marlin_process_scales(marlin_scales)
tensor_list.append(marlin_scales)
scales = torch.cat([x.unsqueeze(0) for x in tensor_list], 0)
g_scales = nvfp4_marlin_process_global_scale(g_scales)
return scales, g_scales
w13_scale, w13_scale_2 = premute_scales(w13_scale, w13_scale_2, "w13")
w2_scale, w2_scale_2 = premute_scales(w2_scale, w2_scale_2, "w2")
return w13, w13_scale, w13_scale_2, w2, w2_scale, w2_scale_2
def prepare_moe_fp4_layer_for_marlin(
layer: torch.nn.Module, input_dtype: torch.dtype | None = None
) -> None:
logger.warning_once(
"Your GPU does not have native support for FP4 computation but "
"FP4 quantization is being used. Weight-only FP4 compression will "
"be used leveraging the Marlin kernel. This may degrade "
"performance for compute-heavy workloads."
)
is_nvfp4 = hasattr(layer, "w13_weight_scale_2")
if input_dtype is not None and input_dtype.itemsize == 1:
if is_nvfp4:
raise RuntimeError("NVFP4 weight + INT8/FP8 activation is not supported.")
elif input_dtype != torch.float8_e4m3fn:
raise RuntimeError("MXFP4 weight + INT8 activation is not supported.")
group_size = 16 if is_nvfp4 else 32
e = layer.num_experts
k = layer.hidden_size
n = layer.intermediate_size_per_partition
# WORKSPACE
device = layer.w13_weight.device
param_dtype = layer.params_dtype
layer.workspace = marlin_make_workspace_new(device, 4)
perm = torch.empty(0, dtype=torch.int, device=device)
is_a_8bit = input_dtype is not None and input_dtype.itemsize == 1
# WEIGHT
# Repack weights to marlin format
for name in ["w13_weight", "w2_weight"]:
weight = getattr(layer, name)
tensor_list = []
if "w13" in name:
size_n, size_k = n * 2, k
else:
size_n, size_k = k, n
assert weight.shape == (e, size_n, size_k // 2)
for i in range(e):
qweight = weight[i].view(torch.int32).T.contiguous()
marlin_qweight = ops.gptq_marlin_repack(
b_q_weight=qweight,
perm=perm,
size_k=size_k,
size_n=size_n,
num_bits=4,
is_a_8bit=is_a_8bit,
)
tensor_list.append(marlin_qweight)
weight = torch.cat([x.unsqueeze(0) for x in tensor_list], 0)
weight = torch.nn.Parameter(weight, requires_grad=False)
setattr(layer, name, weight)
# WEIGHT SCALES
# Permute scales
for name in ["w13", "w2"]:
scales = getattr(layer, name + "_weight_scale")
if not is_nvfp4:
scales = scales.view(torch.float8_e8m0fnu)
scales = scales.to(param_dtype)
if is_nvfp4:
global_scale = getattr(layer, name + "_weight_scale_2").to(param_dtype)
tensor_list = []
if "w13" in name:
size_n, size_k = n * 2, k
else:
size_n, size_k = k, n
for i in range(e):
scale = scales[i].T
marlin_scales = marlin_permute_scales(
s=scale,
size_k=size_k,
size_n=size_n,
group_size=group_size,
is_a_8bit=is_a_8bit,
)
if is_nvfp4:
marlin_scales = nvfp4_marlin_process_scales(marlin_scales)
else:
marlin_scales = mxfp4_marlin_process_scales(
marlin_scales, input_dtype=input_dtype
)
tensor_list.append(marlin_scales)
scales = torch.cat([x.unsqueeze(0) for x in tensor_list], 0)
scales = torch.nn.Parameter(scales, requires_grad=False)
setattr(layer, name + "_weight_scale", scales)
if is_nvfp4:
global_scale = nvfp4_marlin_process_global_scale(global_scale)
global_scale = torch.nn.Parameter(global_scale, requires_grad=False)
setattr(layer, name + "_weight_scale_2", global_scale)
# BIAS
# Permute bias
for name in ["w13_bias", "w2_bias"]:
if not hasattr(layer, name):
continue
bias = getattr(layer, name).to(param_dtype)
tensor_list = []
for i in range(e):
expert_bias = bias[i]
tensor_list.append(marlin_permute_bias(expert_bias))
bias = torch.cat([x.unsqueeze(0) for x in tensor_list], 0)
bias = torch.nn.Parameter(bias, requires_grad=False)
setattr(layer, name, bias)
def rand_marlin_weight_nvfp4_like(weight, group_size, input_dtype=None):
is_a_8bit = input_dtype is not None and input_dtype.itemsize == 1
assert not is_a_8bit, "NVFP4 weight + INT8/FP8 activation is not supported."
assert group_size > 0
size_n, size_k = weight.shape
device = weight.device
scales = weight.view(size_n, -1, group_size).abs().max(-1)[0] / 6
global_scale = scales.max() / 448
scales = (scales / global_scale).to(torch.float8_e4m3fn)
fp4_weight = torch.randint(
0, 256, (size_n, size_k // 2), dtype=torch.uint8, device=weight.device
)
fp4_weight_part_1 = (fp4_weight & 0b10000000) | ((fp4_weight & 0b01110000) >> 2)
fp4_weight_part_1 = fp4_weight_part_1.view(torch.float8_e4m3fn)
fp4_weight_part_1 = fp4_weight_part_1.to(weight.dtype) * (2**6)
fp4_weight2 = fp4_weight << 4
fp4_weight_part_2 = (fp4_weight2 & 0b10000000) | ((fp4_weight2 & 0b01110000) >> 2)
fp4_weight_part_2 = fp4_weight_part_2.view(torch.float8_e4m3fn)
fp4_weight_part_2 = fp4_weight_part_2.to(weight.dtype) * (2**6)
weight_ref = torch.cat(
[fp4_weight_part_2.unsqueeze(2), fp4_weight_part_1.unsqueeze(2)], 2
).view(size_n, size_k)
weight_ref = (
weight_ref
* global_scale.to(weight.dtype)
* scales.repeat_interleave(group_size, 1).to(weight.dtype)
)
marlin_qweight = ops.gptq_marlin_repack(
b_q_weight=fp4_weight.view(torch.int32).T.contiguous(),
perm=torch.empty(0, dtype=torch.int, device=device),
size_k=size_k,
size_n=size_n,
num_bits=4,
is_a_8bit=is_a_8bit,
)
marlin_scales = marlin_permute_scales(
s=scales.T.to(weight.dtype),
size_k=size_k,
size_n=size_n,
group_size=group_size,
is_a_8bit=is_a_8bit,
)
marlin_scales = nvfp4_marlin_process_scales(marlin_scales)
global_scale = nvfp4_marlin_process_global_scale(global_scale)
return weight_ref.T, marlin_qweight, marlin_scales, global_scale
def rand_marlin_weight_mxfp4_like(weight, group_size, input_dtype=None):
is_a_8bit = input_dtype is not None and input_dtype.itemsize == 1
if is_a_8bit:
assert input_dtype == torch.float8_e4m3fn, (
"MXFP4 weight + INT8 activation is not supported."
)
assert group_size > 0
size_n, size_k = weight.shape
device = weight.device
scales = torch.randint(
110,
120,
(size_n, size_k // group_size),
dtype=torch.uint8,
device=weight.device,
)
scales = scales.view(torch.float8_e8m0fnu)
fp4_weight = torch.randint(
0, 256, (size_n, size_k // 2), dtype=torch.uint8, device=weight.device
)
fp4_weight_part_1 = (fp4_weight & 0b10000000) | ((fp4_weight & 0b01110000) >> 2)
fp4_weight_part_1 = fp4_weight_part_1.view(torch.float8_e4m3fn)
fp4_weight_part_1 = fp4_weight_part_1.to(weight.dtype) * (2**6)
fp4_weight2 = fp4_weight << 4
fp4_weight_part_2 = (fp4_weight2 & 0b10000000) | ((fp4_weight2 & 0b01110000) >> 2)
fp4_weight_part_2 = fp4_weight_part_2.view(torch.float8_e4m3fn)
fp4_weight_part_2 = fp4_weight_part_2.to(weight.dtype) * (2**6)
weight_ref = torch.cat(
[fp4_weight_part_2.unsqueeze(2), fp4_weight_part_1.unsqueeze(2)], 2
).view(size_n, size_k)
weight_ref = weight_ref * scales.repeat_interleave(group_size, 1).to(weight.dtype)
perm = torch.empty(0, dtype=torch.int, device=device)
fp4_weight = fp4_weight.view(torch.int32).T.contiguous()
marlin_qweight = ops.gptq_marlin_repack(
b_q_weight=fp4_weight,
perm=perm,
size_k=size_k,
size_n=size_n,
num_bits=4,
is_a_8bit=is_a_8bit,
)
marlin_scales = marlin_permute_scales(
s=scales.T.to(weight.dtype),
size_k=size_k,
size_n=size_n,
group_size=group_size,
is_a_8bit=is_a_8bit,
)
marlin_scales = mxfp4_marlin_process_scales(marlin_scales, input_dtype=input_dtype)
return weight_ref.T, marlin_qweight, marlin_scales.to(torch.float8_e8m0fnu)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/quantization/utils/marlin_utils_fp4.py",
"license": "Apache License 2.0",
"lines": 464,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/kernels/moe/test_nvfp4_moe.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
import torch
import vllm.model_executor.layers.fused_moe.modular_kernel as mk
from tests.kernels.moe.utils import make_dummy_moe_config, make_test_weights
from tests.kernels.quantization.nvfp4_utils import (
FLOAT4_E2M1_MAX,
FLOAT8_E4M3_MAX,
dequantize_nvfp4_to_dtype,
)
from tests.kernels.utils import torch_moe
from vllm import _custom_ops as ops
from vllm.config import ParallelConfig, VllmConfig, set_current_vllm_config
from vllm.model_executor.layers.fused_moe import fused_topk
from vllm.model_executor.layers.fused_moe.activation import MoEActivation
from vllm.model_executor.layers.fused_moe.config import nvfp4_moe_quant_config
from vllm.model_executor.layers.fused_moe.cutlass_moe import (
CutlassExpertsFp4,
)
from vllm.model_executor.layers.fused_moe.prepare_finalize import (
MoEPrepareAndFinalizeNoEP,
)
from vllm.platforms import current_platform
from vllm.utils.torch_utils import set_random_seed
if not current_platform.has_device_capability(100):
pytest.skip(
"Nvfp4 Requires compute capability of 10 or above.", allow_module_level=True
)
MNK_FACTORS = [
(2, 1024, 1024),
(2, 1024, 1536),
(2, 3072, 1024),
(64, 1024, 1024),
(64, 3072, 1024),
(64, 2048, 1536),
(224, 1024, 1024),
(224, 1024, 1536),
]
@pytest.mark.parametrize("m,n,k", MNK_FACTORS)
@pytest.mark.parametrize("e", [40, 64, 256])
@pytest.mark.parametrize("topk", [1, 6, 8])
@pytest.mark.parametrize("dtype", [torch.bfloat16])
@torch.inference_mode()
def test_cutlass_fp4_moe_no_graph(
m: int, n: int, k: int, e: int, topk: int, dtype: torch.dtype, workspace_init
):
set_random_seed(7)
with set_current_vllm_config(
VllmConfig(parallel_config=ParallelConfig(pipeline_parallel_size=1))
):
quant_blocksize = 16
a = torch.randn((m, k), device="cuda", dtype=dtype) / 10
(_, w1_q, w1_blockscale, w1_gs), (_, w2_q, w2_blockscale, w2_gs) = (
make_test_weights(
e,
n,
k,
in_dtype=dtype,
quant_dtype="nvfp4",
block_shape=None, # use quant_blocksize?
per_out_ch_quant=False,
)
)
score = torch.randn((m, e), device="cuda", dtype=dtype)
topk_weights, topk_ids, _ = fused_topk(a, score, topk, renormalize=False)
a1_gs = torch.ones((e,), device="cuda", dtype=torch.float32)
a2_gs = torch.ones((e,), device="cuda", dtype=torch.float32)
assert w1_gs is not None
assert w2_gs is not None
assert w1_blockscale is not None
assert w2_blockscale is not None
quant_config = nvfp4_moe_quant_config(
g1_alphas=(1 / w1_gs),
g2_alphas=(1 / w2_gs),
a1_gscale=a1_gs,
a2_gscale=a2_gs,
w1_scale=w1_blockscale,
w2_scale=w2_blockscale,
)
kernel = mk.FusedMoEModularKernel(
MoEPrepareAndFinalizeNoEP(),
CutlassExpertsFp4(
moe_config=make_dummy_moe_config(),
quant_config=quant_config,
),
inplace=False,
)
cutlass_output = kernel(
hidden_states=a,
w1=w1_q,
w2=w2_q,
topk_weights=topk_weights,
topk_ids=topk_ids,
)
# Reference check:
a_global_scale = (
(FLOAT8_E4M3_MAX * FLOAT4_E2M1_MAX) / torch.amax(a.flatten(), dim=-1)
).to(torch.float32)
a_fp4, a_scale_interleaved = ops.scaled_fp4_quant(a, a_global_scale)
a_in_dtype = dequantize_nvfp4_to_dtype(
a_fp4,
a_scale_interleaved,
a_global_scale,
dtype=a.dtype,
device=a.device,
block_size=quant_blocksize,
)
w1_d = torch.empty((e, 2 * n, k), device="cuda", dtype=dtype)
w2_d = torch.empty((e, k, n), device="cuda", dtype=dtype)
for idx in range(0, e):
w1_d[idx] = dequantize_nvfp4_to_dtype(
w1_q[idx],
w1_blockscale[idx],
w1_gs[idx],
dtype=dtype,
device=w1_q.device,
block_size=quant_blocksize,
)
w2_d[idx] = dequantize_nvfp4_to_dtype(
w2_q[idx],
w2_blockscale[idx],
w2_gs[idx],
dtype=dtype,
device=w2_q.device,
block_size=quant_blocksize,
)
torch_output = torch_moe(a_in_dtype, w1_d, w2_d, score, topk)
torch.testing.assert_close(torch_output, cutlass_output, atol=1e-1, rtol=1e-1)
# step3.5-flash uses swiglustep activation (clipped SwiGLU with limit=7.0)
# for MoE layers 43-44. This tests the non-fused activation fallback path
# in run_cutlass_moe_fp4 (apply_moe_activation + separate fp4 quantization).
# Model dims: e=288, topk=8, n=1280 (moe_intermediate_size), k=4096 (hidden)
SWIGLUSTEP_MNK_FACTORS = [
(2, 1280, 4096),
(64, 1280, 4096),
(224, 1280, 4096),
]
@pytest.mark.parametrize("m,n,k", SWIGLUSTEP_MNK_FACTORS)
@pytest.mark.parametrize("e", [64, 288])
@pytest.mark.parametrize("topk", [1, 8])
@pytest.mark.parametrize("dtype", [torch.bfloat16])
@torch.inference_mode()
def test_cutlass_fp4_moe_swiglustep(
m: int, n: int, k: int, e: int, topk: int, dtype: torch.dtype, workspace_init
):
set_random_seed(7)
with set_current_vllm_config(
VllmConfig(parallel_config=ParallelConfig(pipeline_parallel_size=1))
):
quant_blocksize = 16
a = torch.randn((m, k), device="cuda", dtype=dtype) / 10
(_, w1_q, w1_blockscale, w1_gs), (_, w2_q, w2_blockscale, w2_gs) = (
make_test_weights(
e,
n,
k,
in_dtype=dtype,
quant_dtype="nvfp4",
block_shape=None,
per_out_ch_quant=False,
)
)
score = torch.randn((m, e), device="cuda", dtype=dtype)
topk_weights, topk_ids, _ = fused_topk(a, score, topk, renormalize=False)
a1_gs = torch.ones((e,), device="cuda", dtype=torch.float32)
a2_gs = torch.ones((e,), device="cuda", dtype=torch.float32)
assert w1_gs is not None
assert w2_gs is not None
assert w1_blockscale is not None
assert w2_blockscale is not None
quant_config = nvfp4_moe_quant_config(
g1_alphas=(1 / w1_gs),
g2_alphas=(1 / w2_gs),
a1_gscale=a1_gs,
a2_gscale=a2_gs,
w1_scale=w1_blockscale,
w2_scale=w2_blockscale,
)
kernel = mk.FusedMoEModularKernel(
MoEPrepareAndFinalizeNoEP(),
CutlassExpertsFp4(
moe_config=make_dummy_moe_config(),
quant_config=quant_config,
),
inplace=False,
)
cutlass_output = kernel(
hidden_states=a,
w1=w1_q,
w2=w2_q,
topk_weights=topk_weights,
topk_ids=topk_ids,
activation=MoEActivation.SWIGLUSTEP,
)
# Reference: dequantize everything and run torch_moe with swiglustep
a_global_scale = (
(FLOAT8_E4M3_MAX * FLOAT4_E2M1_MAX) / torch.amax(a.flatten(), dim=-1)
).to(torch.float32)
a_fp4, a_scale_interleaved = ops.scaled_fp4_quant(a, a_global_scale)
a_in_dtype = dequantize_nvfp4_to_dtype(
a_fp4,
a_scale_interleaved,
a_global_scale,
dtype=a.dtype,
device=a.device,
block_size=quant_blocksize,
)
w1_d = torch.empty((e, 2 * n, k), device="cuda", dtype=dtype)
w2_d = torch.empty((e, k, n), device="cuda", dtype=dtype)
for idx in range(0, e):
w1_d[idx] = dequantize_nvfp4_to_dtype(
w1_q[idx],
w1_blockscale[idx],
w1_gs[idx],
dtype=dtype,
device=w1_q.device,
block_size=quant_blocksize,
)
w2_d[idx] = dequantize_nvfp4_to_dtype(
w2_q[idx],
w2_blockscale[idx],
w2_gs[idx],
dtype=dtype,
device=w2_q.device,
block_size=quant_blocksize,
)
torch_output = torch_moe(
a_in_dtype,
w1_d,
w2_d,
score,
topk,
activation=MoEActivation.SWIGLUSTEP,
)
torch.testing.assert_close(torch_output, cutlass_output, atol=1e-1, rtol=1e-1)
if __name__ == "__main__":
test_cutlass_fp4_moe_no_graph((2, 1024, 1024), 40, 1, torch.half)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/kernels/moe/test_nvfp4_moe.py",
"license": "Apache License 2.0",
"lines": 238,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/kernels/quantization/nvfp4_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import torch
from vllm._custom_ops import scaled_fp4_quant
from vllm.scalar_type import scalar_types
FLOAT4_E2M1_MAX = scalar_types.float4_e2m1f.max()
FLOAT8_E4M3_MAX = torch.finfo(torch.float8_e4m3fn).max
kE2M1ToFloat = torch.tensor(
[0.0, 0.5, 1.0, 1.5, 2.0, 3.0, 4.0, 6.0], dtype=torch.float32
)
def convert_swizzled_to_linear(a_sf_swizzled: torch.Tensor, m, k, block_size):
m_tiles = (m + 128 - 1) // 128
f = block_size * 4
k_tiles = (k + f - 1) // f
tmp = torch.reshape(a_sf_swizzled, (1, m_tiles, k_tiles, 32, 4, 4))
tmp = torch.permute(tmp, (0, 1, 4, 3, 2, 5))
out = tmp.reshape(m_tiles * 128, k_tiles * f // block_size)
return out[0:m, 0:k]
def convert_swizzled_8x4_layout_to_linear(
a_sf_swizzled: torch.Tensor, m, k, block_size
):
m_tiles = (m + 8 - 1) // 8
f = block_size * 4
k_tiles = (k + f - 1) // f
tmp = torch.reshape(a_sf_swizzled, (1, m_tiles, k_tiles, 8, 4))
tmp = torch.permute(tmp, (0, 1, 3, 2, 4))
out = tmp.reshape(m_tiles * 8, k_tiles * f // block_size)
return out[0:m, 0:k]
def dequantize_nvfp4_to_dtype(
tensor_fp4,
tensor_sf,
global_scale,
dtype,
device,
block_size=16,
is_sf_128x4_layout=True,
):
"""Dequantize the fp4 tensor back to high precision."""
# Two fp4 values are packed into one uint8.
assert tensor_fp4.dtype == torch.uint8
m, packed_k = tensor_fp4.shape
k = packed_k * 2
tensor_f32 = break_fp4_bytes(tensor_fp4, dtype)
tensor_f32 = tensor_f32.reshape(m, k // block_size, block_size)
tensor_sf = tensor_sf.view(torch.float8_e4m3fn)
if is_sf_128x4_layout:
tensor_sf = convert_swizzled_to_linear(tensor_sf, m, k, block_size)
else:
tensor_sf = convert_swizzled_8x4_layout_to_linear(tensor_sf, m, k, block_size)
tensor_sf_dtype = tensor_sf.to(torch.float32) / global_scale
# scale the tensor
out = (tensor_f32 * tensor_sf_dtype.unsqueeze(-1)).reshape(m, k)
return out.to(dtype=dtype)
def break_fp4_bytes(a, dtype):
assert a.dtype == torch.uint8
m, n = a.shape
# Vectorized nibble processing
a_flat = a.flatten()
high = (a_flat & 0xF0) >> 4 # Upper nibbles
low = a_flat & 0x0F # Lower nibbles
# Combine nibbles for batch processing
combined = torch.stack((low, high), dim=1).flatten()
# Vectorized sign and magnitude extraction
signs = (combined & 0x08).to(torch.bool) # Sign bits
abs_vals = (combined & 0x07).to(torch.long) # Magnitude indices
# Device-aware lookup and sign application
kE2M1 = kE2M1ToFloat.to(device=a.device)
values = kE2M1[abs_vals] * torch.where(signs, -1.0, 1.0)
# Reshape to final form
return values.reshape(m, n * 2).to(dtype=dtype)
def get_nvfp4_global_scale(a: torch.Tensor):
return (FLOAT8_E4M3_MAX * FLOAT4_E2M1_MAX) / torch.abs(a).max().to(torch.float32)
def quant_nvfp4_tensor(a: torch.Tensor):
a_global_scale = get_nvfp4_global_scale(a)
a_quant, a_block_scale = scaled_fp4_quant(a, a_global_scale)
return a_quant, a_block_scale, a_global_scale
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/kernels/quantization/nvfp4_utils.py",
"license": "Apache License 2.0",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/v1/attention/backends/mla/rocm_aiter_mla.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from dataclasses import dataclass
from typing import ClassVar
import torch
from vllm._aiter_ops import rocm_aiter_ops
from vllm.config import VllmConfig
from vllm.model_executor.layers.attention.mla_attention import (
MLACommonBackend,
MLACommonDecodeMetadata,
MLACommonImpl,
MLACommonMetadata,
MLACommonMetadataBuilder,
QueryLenSupport,
)
from vllm.v1.attention.backend import AttentionCGSupport, AttentionLayer, MultipleOf
from vllm.v1.kv_cache_interface import AttentionSpec
class AiterMLABackend(MLACommonBackend):
@staticmethod
def get_supported_kernel_block_sizes() -> list[int | MultipleOf]:
return [1]
@staticmethod
def get_name() -> str:
return "ROCM_AITER_MLA"
@staticmethod
def get_impl_cls() -> type["AiterMLAImpl"]:
return AiterMLAImpl
@staticmethod
def get_builder_cls() -> type["AiterMLAMetadataBuilder"]:
return AiterMLAMetadataBuilder
@dataclass
class AiterMLADecodeMetadata(MLACommonDecodeMetadata):
# The indptr of the paged kv cache, shape: [batch_size + 1]
paged_kv_indptr: torch.Tensor | None = None
# The page indices of the paged kv cache
paged_kv_indices: torch.Tensor | None = None
# The number of entries in the last page of each request in
# the paged kv cache, shape: [batch_size]
paged_kv_last_page_len: torch.Tensor | None = None
# The query indptr, shape : [num_decode + 1]
qo_indptr: torch.Tensor | None = None
# The dtype of MLA out tensor
attn_out_dtype: torch.dtype = torch.bfloat16
# The max query output length: int
max_qo_len: int | None = None
class AiterMLAMetadata(MLACommonMetadata[AiterMLADecodeMetadata]):
pass
class AiterMLAMetadataBuilder(MLACommonMetadataBuilder[AiterMLAMetadata]):
# TODO(luka, lucas): audit this as part of:
# https://github.com/vllm-project/vllm/issues/22945
_cudagraph_support: ClassVar[AttentionCGSupport] = AttentionCGSupport.UNIFORM_BATCH
query_len_support: ClassVar[QueryLenSupport] = QueryLenSupport.UNIFORM
def __init__(
self,
kv_cache_spec: AttentionSpec,
layer_names: list[str],
vllm_config: VllmConfig,
device: torch.device,
):
super().__init__(
kv_cache_spec, layer_names, vllm_config, device, AiterMLAMetadata
)
self.compilation_config = vllm_config.compilation_config
self.decode_attn_out_dtype = vllm_config.model_config.dtype
# kernel block size is always 1.
max_num_pages_per_req = vllm_config.model_config.max_model_len
max_num_reqs = vllm_config.scheduler_config.max_num_seqs
max_num_pages = max_num_reqs * max_num_pages_per_req
# Preparing persistent buffers
# TODO: we can disambiguate between decode and mixed-prefill decode here
# so we can only use the persistent buffer if a cudagraph is actually
# being used.
# paged_kv_last_page_len is always 1s (kernel block size is always 1),
# so we create it once and reuse slices in both eager and cudagraph modes.
self.paged_kv_last_page_len = torch.ones(
max_num_reqs, dtype=torch.int32, device=device
)
if self.compilation_config.cudagraph_mode.has_full_cudagraphs():
self.paged_kv_indptr = torch.zeros(
max_num_reqs + 1, dtype=torch.int32, device=device
)
self.paged_kv_indices = torch.zeros(
max_num_pages, dtype=torch.int32, device=device
)
self.qo_indptr = torch.zeros(
max_num_reqs + 1, dtype=torch.int32, device=device
)
def _build_decode(
self,
block_table_tensor: torch.Tensor,
seq_lens_device: torch.Tensor,
max_seq_len: int,
query_start_loc_cpu: torch.Tensor,
query_start_loc_device: torch.Tensor,
num_decode_tokens: int,
dcp_tot_seq_lens_device: torch.Tensor | None,
) -> AiterMLADecodeMetadata:
# kernel block size is always 1, although the kv block size is not 1.
device = self.device
num_reqs = seq_lens_device.size(0)
mask = torch.arange(
block_table_tensor.size(1), dtype=block_table_tensor.dtype, device=device
).unsqueeze(0) < seq_lens_device.unsqueeze(1)
paged_kv_indices = block_table_tensor[mask]
# kernel block size is always 1, so each page has exactly 1 token.
# last_page_len is always 1 - just slice the pre-initialized buffer.
paged_kv_last_page_len = self.paged_kv_last_page_len[:num_reqs]
paged_kv_indptr = torch.cat(
[
torch.zeros(1, dtype=seq_lens_device.dtype, device=device),
seq_lens_device.cumsum(dim=0, dtype=torch.int32),
]
)
qo_len = query_start_loc_cpu[1:] - query_start_loc_cpu[:-1]
max_qo_len = qo_len.max().item()
if self.compilation_config.cudagraph_mode.has_full_cudagraphs():
num_actual_pages = paged_kv_indices.size(0)
self.paged_kv_indices[:num_actual_pages].copy_(
paged_kv_indices, non_blocking=True
)
self.paged_kv_indices[num_actual_pages:].fill_(-1)
paged_kv_indices = self.paged_kv_indices[:num_actual_pages]
self.paged_kv_indptr[: 1 + num_reqs].copy_(
paged_kv_indptr, non_blocking=True
)
self.paged_kv_indptr[1 + num_reqs :].fill_(paged_kv_indptr[-1])
paged_kv_indptr = self.paged_kv_indptr[: 1 + num_reqs]
# paged_kv_last_page_len already uses the pre-initialized buffer slice
# (set above), so no copy needed - buffer is always 1s.
self.qo_indptr[: 1 + num_reqs].copy_(
query_start_loc_device, non_blocking=True
)
self.qo_indptr[1 + num_reqs :] = query_start_loc_device[-1]
qo_indptr = self.qo_indptr[: 1 + num_reqs]
else:
qo_indptr = torch.arange(
0, num_reqs + 1, step=1, dtype=torch.int32, device=device
)
attn_metadata = AiterMLADecodeMetadata(
block_table=block_table_tensor,
seq_lens=seq_lens_device,
paged_kv_indptr=paged_kv_indptr,
paged_kv_indices=paged_kv_indices,
paged_kv_last_page_len=paged_kv_last_page_len,
qo_indptr=qo_indptr,
dcp_tot_seq_lens=dcp_tot_seq_lens_device,
max_qo_len=max_qo_len,
attn_out_dtype=self.decode_attn_out_dtype,
)
return attn_metadata
class AiterMLAImpl(MLACommonImpl[AiterMLAMetadata]):
def __init__(
self,
num_heads: int,
head_size: int,
scale: float,
num_kv_heads: int,
alibi_slopes: list[float] | None,
sliding_window: int | None,
kv_cache_dtype: str,
logits_soft_cap: float | None,
attn_type: str,
kv_sharing_target_layer_name: str | None,
# MLA Specific Arguments
**mla_args,
) -> None:
super().__init__(
num_heads,
head_size,
scale,
num_kv_heads,
alibi_slopes,
sliding_window,
kv_cache_dtype,
logits_soft_cap,
attn_type,
kv_sharing_target_layer_name,
**mla_args,
)
assert num_heads == 16 or num_heads == 128, (
f"Aiter MLA only supports 16 or 128 number of heads.\n"
f"Provided {num_heads} number of heads.\n"
"Try adjusting tensor_parallel_size value."
)
unsupported_features = [alibi_slopes, sliding_window, logits_soft_cap]
if any(unsupported_features):
raise NotImplementedError(
"Aiter MLA does not support one of the following: "
"alibi_slopes, sliding_window, logits_soft_cap"
)
from aiter import flash_attn_varlen_func
self.flash_attn_varlen_func = flash_attn_varlen_func
def _flash_attn_varlen_diff_headdims(
self, q, k, v, return_softmax_lse=False, softmax_scale=None, **kwargs
):
output = self.flash_attn_varlen_func( # type: ignore[call-arg]
q=q,
k=k,
v=v,
softmax_scale=softmax_scale,
return_lse=return_softmax_lse,
**kwargs,
)
return output
def forward_mqa(
self,
q: torch.Tensor | tuple[torch.Tensor, torch.Tensor],
kv_c_and_k_pe_cache: torch.Tensor,
attn_metadata: AiterMLAMetadata,
layer: AttentionLayer,
) -> tuple[torch.Tensor, torch.Tensor | None]:
assert kv_c_and_k_pe_cache.numel() > 0
assert attn_metadata.decode is not None
assert attn_metadata.decode.max_qo_len is not None
if type(q) is tuple:
q = torch.cat(q, dim=-1)
assert isinstance(q, torch.Tensor)
B = q.shape[0]
o = torch.zeros(
B,
self.num_heads,
self.kv_lora_rank,
dtype=attn_metadata.decode.attn_out_dtype,
device=q.device,
)
kv_buffer = kv_c_and_k_pe_cache.unsqueeze(2)
rocm_aiter_ops.mla_decode_fwd(
q,
kv_buffer,
o,
self.scale,
attn_metadata.decode.qo_indptr,
attn_metadata.decode.max_qo_len,
attn_metadata.decode.paged_kv_indptr,
attn_metadata.decode.paged_kv_indices,
attn_metadata.decode.paged_kv_last_page_len,
q_scale=layer._q_scale,
kv_scale=layer._k_scale,
)
return o, None
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/attention/backends/mla/rocm_aiter_mla.py",
"license": "Apache License 2.0",
"lines": 240,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/transformers_utils/chat_templates/registry.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Callable
from pathlib import Path
from typing import TypeAlias
from vllm.logger import init_logger
logger = init_logger(__file__)
CHAT_TEMPLATES_DIR = Path(__file__).parent
ChatTemplatePath: TypeAlias = Path | Callable[[str], Path | None]
def _get_qwen_chat_template_fallback(tokenizer_name_or_path: str) -> Path | None:
if tokenizer_name_or_path.endswith("-Chat"):
return CHAT_TEMPLATES_DIR / "template_chatml.jinja"
return CHAT_TEMPLATES_DIR / "template_basic.jinja"
def _get_minicpmv_chat_template_fallback(tokenizer_name_or_path: str) -> Path | None:
# MiniCPM-V-4.5 version uses a dedicated template
if "4.5" in tokenizer_name_or_path or "4_5" in tokenizer_name_or_path:
return CHAT_TEMPLATES_DIR / "template_minicpmv45.jinja"
# Other versions use chatml template
return CHAT_TEMPLATES_DIR / "template_chatml.jinja"
_MODEL_TYPE_TO_CHAT_TEMPLATE_FALLBACK: dict[str, ChatTemplatePath] = {
"blip-2": CHAT_TEMPLATES_DIR / "template_blip2.jinja",
"chameleon": CHAT_TEMPLATES_DIR / "template_basic.jinja",
"clip": CHAT_TEMPLATES_DIR / "template_basic.jinja",
"deepseek_ocr": CHAT_TEMPLATES_DIR / "template_deepseek_ocr.jinja",
"deepseek_ocr2": CHAT_TEMPLATES_DIR / "template_deepseek_ocr.jinja",
"deepseek_vl_v2": CHAT_TEMPLATES_DIR / "template_deepseek_vl2.jinja",
"fuyu": CHAT_TEMPLATES_DIR / "template_fuyu.jinja",
"minicpmv": _get_minicpmv_chat_template_fallback,
"paligemma": CHAT_TEMPLATES_DIR / "template_basic.jinja",
"qwen": _get_qwen_chat_template_fallback,
"siglip": CHAT_TEMPLATES_DIR / "template_basic.jinja",
"siglip2": CHAT_TEMPLATES_DIR / "template_basic.jinja",
}
def register_chat_template_fallback_path(
model_type: str,
chat_template: ChatTemplatePath,
) -> None:
if model_type in _MODEL_TYPE_TO_CHAT_TEMPLATE_FALLBACK:
logger.warning(
"Model type %s already has a chat template registered. "
"It will be overwritten by the new chat template %s.",
model_type,
chat_template,
)
_MODEL_TYPE_TO_CHAT_TEMPLATE_FALLBACK[model_type] = chat_template
def get_chat_template_fallback_path(
model_type: str,
tokenizer_name_or_path: str,
) -> Path | None:
chat_template = _MODEL_TYPE_TO_CHAT_TEMPLATE_FALLBACK.get(model_type)
if callable(chat_template):
chat_template = chat_template(tokenizer_name_or_path)
if chat_template is None:
return None
return chat_template
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/transformers_utils/chat_templates/registry.py",
"license": "Apache License 2.0",
"lines": 55,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/logging_utils/dump_input.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import contextlib
import enum
import json
import torch
from vllm.config import VllmConfig
from vllm.logger import init_logger
from vllm.v1.core.sched.output import SchedulerOutput
from vllm.v1.metrics.stats import SchedulerStats
from vllm.version import __version__ as VLLM_VERSION
logger = init_logger(__name__)
def prepare_object_to_dump(obj) -> str:
if isinstance(obj, str):
return f"'{obj}'" # Double quotes
elif isinstance(obj, dict):
dict_str = ", ".join(
{f"{str(k)}: {prepare_object_to_dump(v)}" for k, v in obj.items()}
)
return f"{{{dict_str}}}"
elif isinstance(obj, list):
return f"[{', '.join([prepare_object_to_dump(v) for v in obj])}]"
elif isinstance(obj, set):
return f"[{', '.join([prepare_object_to_dump(v) for v in list(obj)])}]"
# return [prepare_object_to_dump(v) for v in list(obj)]
elif isinstance(obj, tuple):
return f"[{', '.join([prepare_object_to_dump(v) for v in obj])}]"
elif isinstance(obj, enum.Enum):
return repr(obj)
elif isinstance(obj, torch.Tensor):
# We only print the 'draft' of the tensor to not expose sensitive data
# and to get some metadata in case of CUDA runtime crashed
return f"Tensor(shape={obj.shape}, device={obj.device},dtype={obj.dtype})"
elif hasattr(obj, "anon_repr"):
return obj.anon_repr()
elif hasattr(obj, "__dict__"):
items = obj.__dict__.items()
dict_str = ", ".join(
[f"{str(k)}={prepare_object_to_dump(v)}" for k, v in items]
)
return f"{type(obj).__name__}({dict_str})"
else:
# Hacky way to make sure we can serialize the object in JSON format
try:
return json.dumps(obj)
except (TypeError, OverflowError):
return repr(obj)
def dump_engine_exception(
config: VllmConfig,
scheduler_output: SchedulerOutput,
scheduler_stats: SchedulerStats | None,
):
# NOTE: ensure we can log extra info without risking raises
# unexpected errors during logging
with contextlib.suppress(Exception):
_dump_engine_exception(config, scheduler_output, scheduler_stats)
def _dump_engine_exception(
config: VllmConfig,
scheduler_output: SchedulerOutput,
scheduler_stats: SchedulerStats | None,
):
logger.error(
"Dumping input data for V1 LLM engine (v%s) with config: %s, ",
VLLM_VERSION,
config,
)
try:
dump_obj = prepare_object_to_dump(scheduler_output)
logger.error("Dumping scheduler output for model execution: %s", dump_obj)
if scheduler_stats:
logger.error("Dumping scheduler stats: %s", scheduler_stats)
except Exception:
logger.exception("Error preparing object to dump")
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/logging_utils/dump_input.py",
"license": "Apache License 2.0",
"lines": 73,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/models/quantization/test_mxfp4.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# flake8: noqa
"""Tests Quark mxfp4 models against ground truth generation"""
import pytest
from vllm import LLM, SamplingParams
MODELS = ["amd/Llama-2-7b-chat-hf-wmxfp4-amxfp4-kvfp8-scale-uint8"]
EXPECTED_STRS_MAP = {
"amd/Llama-2-7b-chat-hf-wmxfp4-amxfp4-kvfp8-scale-uint8": [
"\n### Key Features\n\n* **High-throughput Inference**: vLL",
"\nArtificial intelligence (AI) has evolved significantly since its inception in the 1",
"Artificial intelligence (AI) and human intelligence (HI) are two distinct concepts that have been",
"A neural network is a machine learning model inspired by the structure of the human brain. It consists of",
"\nTitle: The Dreaming Robot\n\nAs the sun set on the bustling metropol",
"\nThe COVID-19 pandemic has had a profound impact on global economic structures and business",
"The Mona Lisa painting, created by Leonardo da Vinci in the early 16th",
" everybody knows this proverbial saying, but did you know that it's not entirely accurate?",
]
}
@pytest.mark.skip(reason="Model to be released in the future")
@pytest.mark.quant_model
@pytest.mark.parametrize("model_name", MODELS)
def test_models(example_prompts, model_name) -> None:
sampling_params = SamplingParams(max_tokens=20, temperature=0)
llm = LLM(
model=model_name,
kv_cache_dtype="fp8",
quantization="quark",
)
outputs = llm.generate(example_prompts, sampling_params)
for i, output in enumerate(outputs):
output_str = output.outputs[0].text
expected_str = EXPECTED_STRS_MAP[model_name][i]
assert expected_str == output_str, (
f"Expected: {expected_str!r}\nvLLM: {output_str!r}"
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/models/quantization/test_mxfp4.py",
"license": "Apache License 2.0",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_executor/layers/quantization/utils/mxfp4_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Callable
from typing import Any
import torch
from vllm.logger import init_logger
from vllm.model_executor.layers.fused_moe.activation import MoEActivation
from vllm.platforms import current_platform
from vllm.triton_utils import triton
from vllm.utils.import_utils import has_triton_kernels
from vllm.utils.torch_utils import direct_register_custom_op, is_torch_equal_or_newer
logger = init_logger(__name__)
def _swizzle_mxfp4(quant_tensor, scale, num_warps):
"""weight swizzle for mxfp4 moe, used for OAI mxfp4 kernel"""
assert has_triton_kernels()
import triton_kernels.matmul_ogs_details.opt_flags as opt_flags
from triton_kernels.numerics import InFlexData
from triton_kernels.tensor import FP4, convert_layout, wrap_torch_tensor
from triton_kernels.tensor_details import layout
from triton_kernels.tensor_details.layout import StridedLayout
value_layout_opts: dict[str, Any] = {}
scale_layout_opts: dict[str, Any] = {}
if (
current_platform.is_cuda()
and current_platform.is_device_capability(90)
and not is_torch_equal_or_newer("2.8.1")
):
logger.warning_once(
"Mxfp4 on hopper is running on torch < 2.8.1, "
"this cause swizling to be disabled, which may "
"cause performance degradation. Please upgrade to torch nightly"
)
value_layout = StridedLayout
scale_layout = StridedLayout
elif current_platform.is_rocm():
from vllm.platforms.rocm import on_gfx950
value_layout = StridedLayout
if on_gfx950():
from triton_kernels.tensor_details.layout import GFX950MXScaleLayout
scale_layout = GFX950MXScaleLayout
else:
scale_layout = StridedLayout
else:
value_layout, value_layout_opts = layout.make_default_matmul_mxfp4_w_layout(
mx_axis=1
)
scale_layout, scale_layout_opts = (
layout.make_default_matmul_mxfp4_w_scale_layout(
mx_axis=1, num_warps=num_warps
)
)
if current_platform.is_cuda():
if current_platform.is_device_capability(90):
constraints = {
"split_k": 1,
}
opt_flags.update_opt_flags_constraints(constraints)
elif current_platform.is_device_capability_family(100):
constraints = {
"is_persistent": True,
"epilogue_subtile": 1,
}
opt_flags.update_opt_flags_constraints(constraints)
# transpose the tensor so that the quantization axis is on dim1
quant_tensor = quant_tensor.transpose(-2, -1)
scale = scale.transpose(-2, -1)
quant_tensor = convert_layout(
wrap_torch_tensor(quant_tensor, dtype=FP4), value_layout, **value_layout_opts
)
scale = convert_layout(wrap_torch_tensor(scale), scale_layout, **scale_layout_opts)
return quant_tensor, InFlexData(), scale
def _can_support_mxfp4(
use_grouped_topk: bool = False,
topk_group: int | None = None,
num_expert_group: int | None = None,
expert_map: torch.Tensor | None = None,
custom_routing_function: Callable | None = None,
e_score_correction_bias: torch.Tensor | None = None,
apply_router_weight_on_input: bool = False,
scoring_func: str = "softmax",
activation: MoEActivation = MoEActivation.SWIGLUOAI,
expert_load_view: torch.Tensor | None = None,
logical_to_physical_map: torch.Tensor | None = None,
logical_replica_count: torch.Tensor | None = None,
):
return not (
use_grouped_topk
or topk_group
or num_expert_group
or custom_routing_function
or e_score_correction_bias
or apply_router_weight_on_input
or scoring_func != "softmax"
or activation != MoEActivation.SWIGLUOAI
or expert_load_view
or logical_to_physical_map
or logical_replica_count
)
def get_padding_alignment():
return (
256
if triton.runtime.driver.active.get_current_target().arch in ("gfx950",)
else 128
)
def _dequant_mxfp4(
x: torch.Tensor, scale: torch.Tensor, float_dtype: torch.dtype
) -> torch.Tensor:
try:
from quark.torch.kernel import mx
except ImportError as err:
raise ImportError(
"The package `amd-quark` is required to use "
"MX-FP4 models. Please install it with `pip install "
"amd-quark`."
) from err
return mx.dq_mxfp4(x, scale, float_dtype)
def _dequant_mxfp4_fake(
x: torch.Tensor, scale: torch.Tensor, float_dtype: torch.dtype
) -> torch.Tensor:
return torch.empty(
(*x.shape[:-1], x.shape[-1] * 2), dtype=float_dtype, device=x.device
)
def _quant_dequant_mxfp4(
x: torch.Tensor, scale_calculation_mode: str = "even"
) -> torch.Tensor:
try:
from quark.torch.kernel import mx
except ImportError as err:
raise ImportError(
"The package `amd-quark` is required to use "
"MX-FP4 models. Please install it with `pip install "
"amd-quark`."
) from err
return mx.qdq_mxfp4(x, scale_calculation_mode)
def _quant_dequant_mxfp4_fake(
x: torch.Tensor, scale_calculation_mode: str = "even"
) -> torch.Tensor:
return torch.empty_like(x)
# Protect these operations into a torch custom op to avoid errors as
# torch._dynamo.exc.Unsupported: Attempted to call function marked as skipped
# Explanation: Dynamo does not know how to trace the builtin
# `kernel_ext.PyCapsule.dq_uint8_mxfp4_to_half.` This function is either a
# Python builtin (e.g. _warnings.warn) or a third-party C/C++ Python
# extension (perhaps created with pybind).
# TODO: Make sure there is no way to avoid having these functions
# marked as skipped by dynamo.
try:
direct_register_custom_op(
op_name="dequant_mxfp4",
op_func=_dequant_mxfp4,
fake_impl=_dequant_mxfp4_fake,
)
dequant_mxfp4 = torch.ops.vllm.dequant_mxfp4
except AttributeError as error:
raise error
try:
direct_register_custom_op(
op_name="quant_dequant_mxfp4",
op_func=_quant_dequant_mxfp4,
fake_impl=_quant_dequant_mxfp4_fake,
)
quant_dequant_mxfp4 = torch.ops.vllm.quant_dequant_mxfp4
except AttributeError as error:
raise error
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/quantization/utils/mxfp4_utils.py",
"license": "Apache License 2.0",
"lines": 164,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/model_loader/base_loader.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from abc import ABC, abstractmethod
import torch
import torch.nn as nn
import vllm.envs as envs
from vllm.config import ModelConfig, VllmConfig
from vllm.config.load import LoadConfig
from vllm.logger import init_logger
from vllm.model_executor.model_loader.utils import (
initialize_model,
process_weights_after_loading,
)
from vllm.platforms import current_platform
from vllm.tracing import instrument
from vllm.utils.mem_utils import format_gib
from vllm.utils.torch_utils import set_default_torch_dtype
logger = init_logger(__name__)
class BaseModelLoader(ABC):
"""Base class for model loaders."""
def __init__(self, load_config: LoadConfig):
self.load_config = load_config
@abstractmethod
def download_model(self, model_config: ModelConfig) -> None:
"""Download a model so that it can be immediately loaded."""
raise NotImplementedError
@abstractmethod
def load_weights(self, model: nn.Module, model_config: ModelConfig) -> None:
"""Load weights into a model. This standalone API allows
inplace weights loading for an already-initialized model"""
raise NotImplementedError
@instrument(span_name="Load model")
def load_model(
self, vllm_config: VllmConfig, model_config: ModelConfig, prefix: str = ""
) -> nn.Module:
"""Load a model with the given configurations."""
device_config = vllm_config.device_config
load_config = vllm_config.load_config
load_device = (
device_config.device if load_config.device is None else load_config.device
)
target_device = torch.device(load_device)
with set_default_torch_dtype(model_config.dtype):
with target_device:
model = initialize_model(
vllm_config=vllm_config, model_config=model_config, prefix=prefix
)
log_model_inspection(model)
logger.debug("Loading weights on %s ...", load_device)
# Quantization does not happen in `load_weights` but after it
self.load_weights(model, model_config)
# Log peak GPU memory after loading weights. This is needed
# to have test coverage on peak memory for online quantization.
if current_platform.is_cuda():
peak_memory = torch.cuda.max_memory_allocated()
logger.debug_once(
"Peak GPU memory after loading weights: %s GiB",
format_gib(peak_memory),
scope="local",
)
process_weights_after_loading(model, model_config, target_device)
return model.eval()
def log_model_inspection(model: nn.Module) -> None:
"""Log model structure if VLLM_LOG_MODEL_INSPECTION=1."""
if not envs.VLLM_LOG_MODEL_INSPECTION:
return
from vllm.model_inspection import format_model_inspection
logger.info("vLLM model structure:\n%s", format_model_inspection(model))
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/model_loader/base_loader.py",
"license": "Apache License 2.0",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/model_loader/bitsandbytes_loader.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import fnmatch
import glob
import itertools
import math
import os
from collections.abc import Callable, Generator
from typing import Any
import numpy as np
import torch
from huggingface_hub import HfApi
from packaging import version
from torch import nn
from transformers.utils import SAFE_WEIGHTS_INDEX_NAME
from vllm.config import ModelConfig
from vllm.config.load import LoadConfig
from vllm.distributed import (
get_tensor_model_parallel_rank,
get_tensor_model_parallel_world_size,
)
from vllm.logger import init_logger
from vllm.lora.utils import is_moe_model
from vllm.model_executor.layers.fused_moe import FusedMoE
from vllm.model_executor.layers.linear import (
LinearBase,
MergedColumnParallelLinear,
QKVParallelLinear,
ReplicatedLinear,
RowParallelLinear,
)
from vllm.model_executor.model_loader.base_loader import BaseModelLoader
from vllm.model_executor.model_loader.utils import ParamMapping
from vllm.model_executor.model_loader.weight_utils import (
download_safetensors_index_file_from_hf,
download_weights_from_hf,
filter_duplicate_safetensors_files,
filter_files_not_needed_for_inference,
pt_weights_iterator,
safetensors_weights_iterator,
)
from vllm.model_executor.models import is_pooling_model
from vllm.model_executor.utils import (
get_moe_expert_mapping,
get_packed_modules_mapping,
set_weight_attrs,
)
from vllm.platforms import current_platform
from vllm.utils.torch_utils import set_default_torch_dtype
logger = init_logger(__name__)
class BitsAndBytesModelLoader(BaseModelLoader):
"""Model loader to load model weights with BitsAndBytes quantization."""
possible_config_file_names = ["adapter_config.json"]
def __init__(self, load_config: LoadConfig):
super().__init__(load_config)
# Save the module names without sharding.
self.unsharded_weights_modules: list[str] = []
# Save the module names that are sharded by column.
self.column_sharded_weights_modules: list[str] = []
# Modules whose weights might have fused on disk
# we need their output_sizes to make shard in flight correctly with TP
self.maybe_fused_weights_modules: dict[str, list[int]] = {}
# Store all module names (from transformers) that support
# BNB quantization.
self.target_modules: list[str] = []
self.tp_disabled_modules: list[str] = []
# Store the mapping of expert parameters for MoE models.
self.expert_params_mapping: list[tuple[str, str, int, str]] = []
# mapping weight names from transformers to vllm.
self.weight_mapper: Callable = lambda name: name
self.pre_quant: bool = False
self.load_8bit: bool = False
self.is_pool_model: bool = False
def _get_weight_files(
self,
model_name_or_path: str,
allowed_patterns: list[str],
revision: str | None = None,
) -> tuple[str, list[str], str]:
"""Retrieve weight files. Download the files if necessary.
Return the weight files and the file pattern."""
is_local = os.path.isdir(model_name_or_path)
if is_local:
for pattern in allowed_patterns:
weight_files = glob.glob(os.path.join(model_name_or_path, pattern))
if weight_files:
return model_name_or_path, weight_files, pattern
else:
hf_api = HfApi()
repo_files = hf_api.list_repo_files(repo_id=model_name_or_path)
for pattern in allowed_patterns:
matching_files = fnmatch.filter(repo_files, pattern)
if matching_files:
hf_folder = download_weights_from_hf(
model_name_or_path,
self.load_config.download_dir,
[pattern],
revision,
ignore_patterns=self.load_config.ignore_patterns,
)
return (
hf_folder,
glob.glob(os.path.join(hf_folder, pattern)),
pattern,
)
raise RuntimeError(f"No model weights found in: `{model_name_or_path}`")
def _prepare_weights(
self, model_name_or_path: str, revision: str | None
) -> tuple[list[str], bool]:
"""Prepare weight files for the model."""
allowed_patterns = ["*.safetensors", "*.bin", "*.pt"]
hf_folder, hf_weights_files, matched_pattern = self._get_weight_files(
model_name_or_path, allowed_patterns, revision
)
use_safetensors = matched_pattern == "*.safetensors"
is_local = os.path.isdir(model_name_or_path)
index_file = SAFE_WEIGHTS_INDEX_NAME
if use_safetensors:
# For models like Mistral-7B-Instruct-v0.3
# there are both sharded safetensors files and a consolidated
# safetensors file. Using both breaks.
# Here, we download the `model.safetensors.index.json` and filter
# any files not found in the index.
if not is_local:
download_safetensors_index_file_from_hf(
model_name_or_path,
index_file,
self.load_config.download_dir,
revision,
)
hf_weights_files = filter_duplicate_safetensors_files(
hf_weights_files, hf_folder, index_file
)
else:
hf_weights_files = filter_files_not_needed_for_inference(hf_weights_files)
if len(hf_weights_files) == 0:
raise RuntimeError(
f"Cannot find any model weights with `{model_name_or_path}`"
)
return hf_weights_files, use_safetensors
def _hf_weight_iter(self, hf_weights_files, use_safetensors: bool):
def _maybe_pool_model(module_name: str):
# For pool model, we need to add the prefix `model.`
# for the weight name if possible.
if (
self.is_pool_model
and self.target_modules[0].startswith("model.")
and not module_name.startswith("model.")
):
return "model." + module_name
return module_name
if use_safetensors:
iterator = safetensors_weights_iterator(
hf_weights_files,
self.load_config.use_tqdm_on_load,
)
else:
iterator = pt_weights_iterator(
hf_weights_files,
self.load_config.use_tqdm_on_load,
self.load_config.pt_load_map_location,
)
for org_name, param in iterator:
# mapping weight names from transformers to vllm while preserving
# original names.
mapped_name = self.weight_mapper(org_name)
mapped_name = _maybe_pool_model(mapped_name)
yield org_name, mapped_name, param
def _get_quantized_weights_iterator(
self,
model_name_or_path: str,
revision: str | None,
) -> tuple[Generator[tuple[str, torch.Tensor], None, None], dict[str, Any]]:
"""Get an iterator to the model weights with bitsandbytes quantization,
as well as the quantization state dictionary."""
# only load the bitsandbytes module when needed
try:
import bitsandbytes
if version.parse(bitsandbytes.__version__) < version.parse("0.46.1"):
raise ImportError(
"bitsandbytes version is wrong. Please "
"install bitsandbytes>=0.46.1."
)
except ImportError as err:
raise ImportError(
"Please install bitsandbytes>=0.46.1 via "
"`pip install bitsandbytes>=0.46.1` to use "
"bitsandbytes quantizer."
) from err
hf_weights_files, use_safetensors = self._prepare_weights(
model_name_or_path, revision
)
quant_state_dict: dict[str, Any] = {}
if self.pre_quant:
if self.load_8bit:
return self._quantized_8bit_generator(
hf_weights_files, use_safetensors, quant_state_dict
), quant_state_dict
else:
return self._quantized_4bit_generator(
hf_weights_files, use_safetensors, quant_state_dict
), quant_state_dict
return self._unquantized_generator(
hf_weights_files, use_safetensors, quant_state_dict
), quant_state_dict
def _is_8bit_weight_name(self, weight_name: str):
quantized_suffix = {".scb", ".weight_format"}
return any(weight_name.lower().endswith(suffix) for suffix in quantized_suffix)
def _is_4bit_weight_name(self, weight_name: str):
quantized_suffix = {
"absmax",
"quant_map",
"nested_absmax",
"nested_quant_map",
"bitsandbytes",
}
suffix = weight_name.split(".")[-1]
return any(q_suffix in suffix for q_suffix in quantized_suffix)
def _quantized_8bit_generator(
self, hf_weights_files, use_safetensors, quant_state_dict
) -> Generator:
for (
org_weight_name,
mapped_weight_name,
weight_tensor,
) in self._hf_weight_iter(hf_weights_files, use_safetensors):
if not mapped_weight_name.lower().endswith(".scb"):
continue
weight_key = mapped_weight_name.lower().replace(".scb", ".weight")
quant_state_dict[weight_key] = weight_tensor
for (
org_weight_name,
mapped_weight_name,
weight_tensor,
) in self._hf_weight_iter(hf_weights_files, use_safetensors):
if self._is_8bit_weight_name(mapped_weight_name):
continue
if mapped_weight_name in quant_state_dict:
set_weight_attrs(weight_tensor, {"load_in_8bit": True})
yield org_weight_name, weight_tensor
else:
yield org_weight_name, weight_tensor
def _quantized_4bit_generator(
self, hf_weights_files, use_safetensors, quant_state_dict
) -> Generator:
from bitsandbytes.functional import QuantState
# First iterate over all quant state weights
weight_iterator = self._hf_weight_iter(hf_weights_files, use_safetensors)
temp_state_dict = {}
for (
org_weight_name,
mapped_weight_name,
weight_tensor,
) in weight_iterator:
if not self._is_4bit_weight_name(mapped_weight_name):
continue
# bitsandbytes library requires
# weight.quant_state.bitsandbytes__* in CPU
if "quant_state.bitsandbytes" in mapped_weight_name:
temp_state_dict[mapped_weight_name] = weight_tensor.cpu().data
else:
temp_state_dict[mapped_weight_name] = weight_tensor
# Closure to parse quant_state for each prequant weight
def _parse_quant_state(param_name: str, temp_state_dict: dict) -> QuantState:
quant_state = {}
for k in temp_state_dict:
if param_name + "." in k:
quant_state[k] = temp_state_dict[k]
return QuantState.from_dict(
quant_state, device=current_platform.device_type
)
# Second iterate over all prequant and normal weights
# pre quantized weights would have a quant_state
for (
org_weight_name,
mapped_weight_name,
weight_tensor,
) in self._hf_weight_iter(hf_weights_files, use_safetensors):
if self._is_4bit_weight_name(mapped_weight_name):
continue
if (
f"{mapped_weight_name}.quant_state.bitsandbytes__nf4" in temp_state_dict
) or (
f"{mapped_weight_name}.quant_state.bitsandbytes__fp4" in temp_state_dict
):
quant_state = _parse_quant_state(mapped_weight_name, temp_state_dict)
quant_state_dict[mapped_weight_name] = quant_state
yield org_weight_name, weight_tensor
else:
yield org_weight_name, weight_tensor
def _unquantized_generator(
self, hf_weights_files, use_safetensors, quant_state_dict
) -> Generator:
from bitsandbytes.functional import quantize_4bit
global_tp_size = get_tensor_model_parallel_world_size()
global_tp_rank = get_tensor_model_parallel_rank()
check_match = (
lambda weight_name, module_name: weight_name.removesuffix(".weight")
== module_name
)
for (
org_weight_name,
mapped_weight_name,
weight_tensor,
) in self._hf_weight_iter(hf_weights_files, use_safetensors):
# override tp_size and tp_rank if the module has disabled TP
if any(
tp_disabled_module in mapped_weight_name
for tp_disabled_module in self.tp_disabled_modules
):
tp_size = 1
tp_rank = 0
else:
tp_size = global_tp_size
tp_rank = global_tp_rank
if any(
target_module in mapped_weight_name
for target_module in self.target_modules
) and mapped_weight_name.endswith(".weight"):
# Without sharding
if any(
check_match(mapped_weight_name, module)
for module in self.unsharded_weights_modules
):
weight_sub_tensor = weight_tensor
# Shard by column
elif any(
check_match(mapped_weight_name, module)
for module in self.column_sharded_weights_modules
):
total_size = weight_tensor.size(-1)
start_index = total_size // tp_size * tp_rank
end_index = total_size // tp_size * (tp_rank + 1)
weight_sub_tensor = weight_tensor[..., start_index:end_index]
# Weights have fused on disk. In this case, we assume that the
# weight and module use same name.
elif any(
check_match(mapped_weight_name, module)
for module in self.maybe_fused_weights_modules
):
# special case for fused weights
# get the size of each shard weight tensor
total_shard_sizes = next(
(
sizes
for module, sizes in self.maybe_fused_weights_modules.items() # noqa: E501
if check_match(mapped_weight_name, module)
)
)
total_size = weight_tensor.size(0)
assert total_size == sum(total_shard_sizes)
# get the start/end index of each shard weight tensor
total_start_index = list(
itertools.accumulate([0] + total_shard_sizes)
)[:-1]
shard_weights_index = [
(
idx + size // tp_size * tp_rank,
idx + size // tp_size * (tp_rank + 1),
)
for idx, size in zip(total_start_index, total_shard_sizes)
]
# slice and reorder the weight tensor
weight_tensor = [
weight_tensor[start_index:end_index, ...]
for start_index, end_index in shard_weights_index
]
weight_sub_tensor = torch.cat(weight_tensor, dim=0)
# Shard by row
else:
total_size = weight_tensor.size(0)
start_index = total_size // tp_size * tp_rank
end_index = total_size // tp_size * (tp_rank + 1)
weight_sub_tensor = weight_tensor[start_index:end_index, ...]
# bitsandbytes requires data in GPU
if weight_sub_tensor.is_cuda:
loaded_weight = weight_sub_tensor
else:
loaded_weight = weight_sub_tensor.to(
device=current_platform.device_type
)
# remove the following after the issue is fixed:
# https://github.com/bitsandbytes-foundation/bitsandbytes/issues/1342
if loaded_weight.is_contiguous() is False:
loaded_weight = loaded_weight.contiguous()
with set_default_torch_dtype(torch.float32):
processed_weight, quant_state = quantize_4bit(
loaded_weight,
compress_statistics=True,
quant_type="nf4",
)
quant_state_dict[mapped_weight_name] = quant_state
else:
processed_weight = weight_tensor
yield org_weight_name, processed_weight
def _get_bnb_target_modules(self, model: nn.Module) -> None:
"""
Identify and collect all modules that support BitsAndBytes
quantization.
"""
for name, module in model.named_modules():
if isinstance(module, LinearBase) and hasattr(
module.quant_method, "quant_config"
):
if modules_info := self.modules_mapping.get_sub_modules(name):
# Map vllm's names to transformers's names.
rep_name, sub_modules = modules_info
for sub_name in sub_modules:
new_name = name.replace(rep_name, sub_name)
self.target_modules.append(new_name)
if module.disable_tp:
self.tp_disabled_modules.append(new_name)
# Add original module name even if the module has stacked map,
# in case model has a mixture of disk-merged and disk-split
# weights with same last name.
self.target_modules.append(name)
if module.disable_tp:
self.tp_disabled_modules.append(name)
elif isinstance(module, FusedMoE) and hasattr(
module.quant_method, "quant_config"
):
# TODO: support FusedMoE with prequant and 8bit.
if self.pre_quant and self.load_8bit:
raise ValueError(
"Prequant BitsAndBytes 8bit models with FusedMoE "
"is not supported yet."
)
# Get the corresponding weight name using module name and
# expert_params_mapping.
for exp in self.expert_params_mapping:
weight_name = exp[1]
rep_name = name.replace("experts", "") + weight_name.removesuffix(
"."
)
self.target_modules.append(rep_name)
assert self.target_modules, (
"vLLM currently does not support BNB quantization for"
)
f" {type(model).__name__}"
def _classify_module_sharding(self, model: nn.Module):
"""
Categorize modules based on their weight sharding requirements
for tensor parallelism.
"""
for name, module in model.named_modules():
# Some modules like `ReplicatedLinear` should not have their weights
# sharded. The reason for implementing it this way is to avoid new
# static variable in the model implementation.
if isinstance(module, (ReplicatedLinear,)):
self.unsharded_weights_modules.append(name)
# `QKVParallelLinear` and `MergedColumnParallelLinear` might have
# fused weights on disk. We need to use the output sizes of these
# modules to shard the weights correctly.
elif isinstance(module, (QKVParallelLinear, MergedColumnParallelLinear)):
self.maybe_fused_weights_modules[name] = module.output_sizes
# In TP, these weights are partitioned along the column
# dimension (dim=-1)
elif isinstance(module, (RowParallelLinear,)):
self.column_sharded_weights_modules.append(name)
elif isinstance(module, FusedMoE):
expert_mapping = self.expert_params_mapping
for exp in expert_mapping:
if exp[-1] == "w2":
weight_name = exp[1]
rep_name = name.replace(
"experts", ""
) + weight_name.removesuffix(".")
self.column_sharded_weights_modules.append(rep_name)
def _verify_model_compatibility(
self, model: nn.Module, model_config: ModelConfig
) -> None:
"""
Verify that the model is compatible with BitsAndBytes quantization.
"""
if not hasattr(model, "load_weights"):
raise AttributeError(
"The required method 'load_weights' is not defined in class"
f" {type(model).__name__}."
)
if not hasattr(model, "packed_modules_mapping"):
raise AttributeError(
f"Model {type(model).__name__} does not support BitsAndBytes "
"quantization yet. No 'packed_modules_mapping' found."
)
quant_config = getattr(model_config.hf_config, "quantization_config", None)
if quant_config and (quant_method := quant_config.get("quant_method")):
if quant_method == "bitsandbytes":
self.pre_quant = True
else:
raise ValueError(
f"BitsAndBytes loader does not support {quant_method} quantization"
)
# The quant_states in pre_quantized models cannot work with a split
# weight tensor. So TP does not work with pre_quantized bnb models.
if self.pre_quant and get_tensor_model_parallel_world_size() > 1:
raise ValueError(
"Prequant BitsAndBytes models with tensor parallelism is not "
"supported. Please try with pipeline parallelism."
)
if quant_config and self.pre_quant:
self.load_8bit = quant_config.get("load_in_8bit", False)
def _initialize_loader_state(
self, model: nn.Module, model_config: ModelConfig
) -> None:
"""
Initialize the loader's internal state based on the model and
configuration.
"""
self.is_pool_model = is_pooling_model(model)
self.modules_mapping = ParamMapping(get_packed_modules_mapping(model))
if is_moe_model(model):
self.expert_params_mapping = get_moe_expert_mapping(model)
if not self.expert_params_mapping:
raise AttributeError(
f"MoE Model {type(model).__name__} does not support "
"BitsAndBytes quantization yet. Ensure this model has "
"'get_expert_mapping' method."
)
# For some models like Molmo, we need to use hf_to_vllm_mapper
# to ensure correct loading of weights.
if hf_to_vllm_mapper := getattr(model, "hf_to_vllm_mapper", None):
self.weight_mapper = lambda name: hf_to_vllm_mapper._map_name(name)
self._get_bnb_target_modules(model)
self._classify_module_sharding(model)
def _dequantize_dq(self, quant_states: Any):
"""
When BNB employs Double Quantization, we perform the dequantization of
these constants during weight loading rather than at inference time,
thereby avoiding this computational overhead during inference. This
comes at the cost of increased memory usage.
"""
from bitsandbytes.functional import QuantState, dequantize_blockwise
def _dequantize_single_state(quant_state):
"""Helper function to dequantize a single QuantState object."""
if not (isinstance(quant_state, QuantState) and quant_state.nested):
return
# Copied from: https://github.com/bitsandbytes-foundation/bitsandbytes/blob/0.45.3/bitsandbytes/functional.py#L1352-#L1356
absmax = dequantize_blockwise(quant_state.absmax, quant_state.state2)
absmax += quant_state.offset
# Ensure float32 dtype
if absmax.dtype != torch.float32:
absmax = absmax.float()
quant_state.absmax = absmax
quant_state.nested = False
quant_state.offset = None
quant_state.state2 = None
if isinstance(quant_states, dict):
for quant_state in quant_states.values():
_dequantize_single_state(quant_state)
else:
_dequantize_single_state(quant_states)
return quant_states
def _fuse_moe_quant_states(self, model: nn.Module, quant_states_dict: dict) -> dict:
"""
This function consolidates individual expert quantization states into
fused representations for w13 and w2.
"""
from bitsandbytes.functional import QuantState
if not self.expert_params_mapping:
return dict()
expert_mapping = self.expert_params_mapping
expert_qs_dict = {}
for name, module in model.named_modules():
if not isinstance(module, FusedMoE):
continue
w1_states_lst = []
w2_states_lst = []
w3_states_lst = []
for exp in expert_mapping:
shard_id = exp[-1]
if shard_id not in ("w1", "w2", "w3"):
raise ValueError(
f"shard_id must be ['w1','w2','w3'] but got {shard_id}."
)
layer_prefix = name.split("experts")[0]
weight_qual_name = layer_prefix + exp[1] + "weight"
quant_state = self._dequantize_dq(quant_states_dict[weight_qual_name])
if shard_id == "w1":
w1_states_lst.append(quant_state)
elif shard_id == "w2":
w2_states_lst.append(quant_state)
else:
w3_states_lst.append(quant_state)
del quant_states_dict[weight_qual_name]
assert len(w1_states_lst) == len(w2_states_lst) == len(w3_states_lst)
w13_absmax_lst = []
w2_absmax_lst = []
w13_total_dim0 = 0
w2_total_dim0 = 0
for w1_qs, w2_qs, w3_qs in zip(w1_states_lst, w2_states_lst, w3_states_lst):
assert w1_qs.shape == w3_qs.shape
assert w1_qs.blocksize == w2_qs.blocksize == w3_qs.blocksize
assert w1_qs.dtype == w2_qs.dtype == w3_qs.dtype
# w1 and w3 are interleaved in storage
w13_absmax_lst.append(w1_qs.absmax)
w13_absmax_lst.append(w3_qs.absmax)
w2_absmax_lst.append(w2_qs.absmax)
w13_total_dim0 += w1_qs.shape[0] + w3_qs.shape[0]
w2_total_dim0 += w2_qs.shape[0]
w13_absmax = torch.cat(w13_absmax_lst)
w2_absmax = torch.cat(w2_absmax_lst)
# Create fused quantization state for w13.
w13_qs = QuantState(
absmax=w13_absmax,
shape=(w13_total_dim0, w1_states_lst[0].shape[1]),
code=w1_states_lst[0].code,
blocksize=w1_states_lst[0].blocksize,
quant_type="nf4",
dtype=w1_states_lst[0].dtype,
)
# Create fused quantization state for w2.
w2_qs = QuantState(
absmax=w2_absmax,
shape=(w2_total_dim0, w2_states_lst[0].shape[1]),
code=w2_states_lst[0].code,
blocksize=w2_states_lst[0].blocksize,
quant_type="nf4",
dtype=w2_states_lst[0].dtype,
)
# The weight suffixes .w13_weight and .w2_weight are consistent
# with the param in BitsAndBytesMoEMethod.
w13_weight_name = name + ".w13_weight"
w2_weight_name = name + ".w2_weight"
expert_qs_dict[w13_weight_name] = w13_qs
expert_qs_dict[w2_weight_name] = w2_qs
return expert_qs_dict
def _stack_quantization_states(
self, model: nn.Module, quant_state_dict: dict
) -> dict[str, dict[int, Any]]:
stacked_quant_state_dict: dict[str, dict[int, Any]] = {}
# TODO: Change this lazy import to normal import
# after the checks are updated to run on a new version
from vllm.model_executor.models.utils import is_pp_missing_parameter
param_dict = dict(model.named_parameters())
for quant_param_name in quant_state_dict:
if is_pp_missing_parameter(quant_param_name, model):
continue
non_stacked_param_name = quant_param_name
shard_index = 0
for shard_name, (
weight_name,
index,
) in self.modules_mapping.inverse_packed_mapping.items():
# Some models, such as MiniCPM V2.5/2.6, contain both
# module names 'kv_proj' and 'qkv_proj'. To prevent 'kv_proj'
# from being incorrectly identified as being present in
# 'vpm.encoder.layers.0.self_attn.qkv_proj.weight
shard_pos = quant_param_name.find(shard_name)
can_correct_rename = (shard_pos > 0) and (
quant_param_name[shard_pos - 1] == "."
)
# If the quant_param_name is packed, it won't occur in the
# param_dict before renaming.
new_quant_param_name = quant_param_name.replace(shard_name, weight_name)
need_rename = (quant_param_name not in param_dict) and (
new_quant_param_name in param_dict
)
if can_correct_rename and need_rename:
shard_index = index
quant_param_name = new_quant_param_name
break
# Models like Clip/Siglip may skip some layers in initialization,
# causing unused quant_param_name in state_dict.
if quant_param_name not in param_dict:
continue
if quant_param_name not in stacked_quant_state_dict:
stacked_quant_state_dict[quant_param_name] = {}
stacked_quant_state_dict[quant_param_name][shard_index] = quant_state_dict[
non_stacked_param_name
]
return stacked_quant_state_dict
def _bind_quant_states_to_params(
self, model: nn.Module, stacked_quant_state_dict: dict
) -> None:
# save quant_states and offsets as the attributes of the parameters
param_dict = dict(model.named_parameters())
for param_name, param in param_dict.items():
if param_name in stacked_quant_state_dict:
quant_states = stacked_quant_state_dict[param_name]
# Dequantize double quantized values during weight loading.
self._dequantize_dq(quant_states)
set_weight_attrs(param, {"bnb_quant_state": quant_states})
if not isinstance(quant_states, dict):
continue
pack_ratio = getattr(param, "pack_factor", -1)
if pack_ratio == -1:
raise ValueError(f"pack_factor not set for parameter {param_name}.")
num_elements = [0] * len(quant_states)
for seq, quant_state in quant_states.items():
num_elements[seq] = math.prod(quant_state.shape) // pack_ratio
offsets = np.concatenate(([0], np.cumsum(num_elements)))
# Make torch infer_schema happy
offsets = torch.tensor(offsets).cpu()
set_weight_attrs(param, {"bnb_shard_offsets": offsets})
if self.load_8bit:
set_weight_attrs(
param, {"matmul_state": [None] * len(quant_states)}
)
def load_weights(self, model: nn.Module, model_config: ModelConfig) -> None:
self._verify_model_compatibility(model, model_config)
self._initialize_loader_state(model, model_config)
logger.info(
"Loading weights with BitsAndBytes quantization. May take a while ..."
)
qweight_iterator, quant_state_dict = self._get_quantized_weights_iterator(
model_config.model,
model_config.revision,
)
weights_to_load = {name for name, _ in model.named_parameters()}
loaded_weights = model.load_weights(qweight_iterator)
# Some models may have weights loading tracker unimplemented.
if loaded_weights is not None:
weights_not_loaded = weights_to_load - loaded_weights
if weights_not_loaded:
raise ValueError(
"Following weights were not initialized from "
f"checkpoint: {weights_not_loaded}"
)
expert_quant_state_dict = self._fuse_moe_quant_states(model, quant_state_dict)
stacked_quant_state_dict = self._stack_quantization_states(
model, quant_state_dict
)
stacked_quant_state_dict = {
**expert_quant_state_dict,
**stacked_quant_state_dict,
}
self._bind_quant_states_to_params(model, stacked_quant_state_dict)
torch.cuda.empty_cache()
def download_model(self, model_config: ModelConfig) -> None:
self._prepare_weights(model_config.model, model_config.revision)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/model_loader/bitsandbytes_loader.py",
"license": "Apache License 2.0",
"lines": 730,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/model_loader/default_loader.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import dataclasses
import glob
import os
import time
from collections.abc import Generator, Iterable
from typing import cast
import torch
from torch import nn
from transformers.utils import SAFE_WEIGHTS_INDEX_NAME
from vllm.config import ModelConfig
from vllm.config.load import LoadConfig
from vllm.logger import init_logger
from vllm.model_executor.layers.quantization.torchao import torchao_version_at_least
from vllm.model_executor.model_loader.base_loader import BaseModelLoader
from vllm.model_executor.model_loader.weight_utils import (
download_safetensors_index_file_from_hf,
download_weights_from_hf,
fastsafetensors_weights_iterator,
filter_duplicate_safetensors_files,
filter_files_not_needed_for_inference,
get_quant_config,
maybe_download_from_modelscope,
multi_thread_pt_weights_iterator,
multi_thread_safetensors_weights_iterator,
np_cache_weights_iterator,
pt_weights_iterator,
safetensors_weights_iterator,
)
from vllm.tracing import instrument
from vllm.transformers_utils.repo_utils import list_filtered_repo_files
logger = init_logger(__name__)
class DefaultModelLoader(BaseModelLoader):
"""Model loader that can load different file types from disk."""
# default number of thread when enable multithread weight loading
DEFAULT_NUM_THREADS = 8
@dataclasses.dataclass
class Source:
"""A source for weights."""
model_or_path: str
"""The model ID or path."""
revision: str | None
"""The optional model revision."""
prefix: str = ""
"""A prefix to prepend to all weights."""
fall_back_to_pt: bool = True
"""Whether .pt weights can be used."""
allow_patterns_overrides: list[str] | None = None
"""If defined, weights will load exclusively using these patterns."""
counter_before_loading_weights: float = 0.0
counter_after_loading_weights: float = 0.0
def __init__(self, load_config: LoadConfig):
super().__init__(load_config)
extra_config = load_config.model_loader_extra_config
allowed_keys = {"enable_multithread_load", "num_threads"}
unexpected_keys = set(extra_config.keys()) - allowed_keys
if unexpected_keys:
raise ValueError(
f"Unexpected extra config keys for load format "
f"{load_config.load_format}: "
f"{unexpected_keys}"
)
def _prepare_weights(
self,
model_name_or_path: str,
revision: str | None,
fall_back_to_pt: bool,
allow_patterns_overrides: list[str] | None,
) -> tuple[str, list[str], bool]:
"""Prepare weights for the model.
If the model is not local, it will be downloaded."""
model_name_or_path = (
maybe_download_from_modelscope(model_name_or_path, revision)
or model_name_or_path
)
is_local = os.path.isdir(model_name_or_path)
load_format = self.load_config.load_format
use_safetensors = False
index_file = SAFE_WEIGHTS_INDEX_NAME
# First check for 'auto' format that mistral files format are present.
# This is to load mistral models with official format by default.
if load_format == "auto":
load_format = (
"mistral"
if len(
list_filtered_repo_files(
model_name_or_path=model_name_or_path,
allow_patterns=["consolidated*.safetensors"],
revision=revision,
)
)
> 0
else "hf"
)
# Some quantized models use .pt files for storing the weights.
if load_format == "hf":
allow_patterns = ["*.safetensors", "*.bin"]
elif load_format == "safetensors" or load_format == "fastsafetensors":
use_safetensors = True
allow_patterns = ["*.safetensors"]
elif load_format == "mistral":
use_safetensors = True
allow_patterns = ["consolidated*.safetensors"]
index_file = "consolidated.safetensors.index.json"
elif load_format == "pt":
allow_patterns = ["*.pt"]
elif load_format == "npcache":
allow_patterns = ["*.bin"]
else:
raise ValueError(f"Unknown load_format: {load_format}")
if fall_back_to_pt:
allow_patterns += ["*.pt"]
if allow_patterns_overrides is not None:
allow_patterns = allow_patterns_overrides
if not is_local:
hf_folder = download_weights_from_hf(
model_name_or_path,
self.load_config.download_dir,
allow_patterns,
revision,
ignore_patterns=self.load_config.ignore_patterns,
)
else:
hf_folder = model_name_or_path
hf_weights_files: list[str] = []
for pattern in allow_patterns:
hf_weights_files += glob.glob(os.path.join(hf_folder, pattern))
if len(hf_weights_files) > 0:
if pattern == "*.safetensors":
use_safetensors = True
break
if use_safetensors:
# For models like Mistral-7B-Instruct-v0.3
# there are both sharded safetensors files and a consolidated
# safetensors file. Using both breaks.
# Here, we download the `model.safetensors.index.json` and filter
# any files not found in the index.
if not is_local:
download_safetensors_index_file_from_hf(
model_name_or_path,
index_file,
self.load_config.download_dir,
revision,
)
hf_weights_files = filter_duplicate_safetensors_files(
hf_weights_files, hf_folder, index_file
)
else:
hf_weights_files = filter_files_not_needed_for_inference(hf_weights_files)
if len(hf_weights_files) == 0:
raise RuntimeError(
f"Cannot find any model weights with `{model_name_or_path}`"
)
return hf_folder, hf_weights_files, use_safetensors
def _get_weights_iterator(
self, source: "Source"
) -> Generator[tuple[str, torch.Tensor], None, None]:
"""Get an iterator for the model weights based on the load format."""
extra_config = self.load_config.model_loader_extra_config
hf_folder, hf_weights_files, use_safetensors = self._prepare_weights(
source.model_or_path,
source.revision,
source.fall_back_to_pt,
source.allow_patterns_overrides,
)
if self.load_config.load_format == "npcache":
# Currently np_cache only support *.bin checkpoints
assert use_safetensors is False
weights_iterator = np_cache_weights_iterator(
source.model_or_path,
self.load_config.download_dir,
hf_folder,
hf_weights_files,
self.load_config.use_tqdm_on_load,
)
elif use_safetensors:
if self.load_config.load_format == "fastsafetensors":
weights_iterator = fastsafetensors_weights_iterator(
hf_weights_files,
self.load_config.use_tqdm_on_load,
)
else:
if extra_config.get("enable_multithread_load"):
weights_iterator = multi_thread_safetensors_weights_iterator(
hf_weights_files,
self.load_config.use_tqdm_on_load,
max_workers=extra_config.get(
"num_threads", self.DEFAULT_NUM_THREADS
),
)
else:
weights_iterator = safetensors_weights_iterator(
hf_weights_files,
self.load_config.use_tqdm_on_load,
self.load_config.safetensors_load_strategy,
)
else:
if extra_config.get("enable_multithread_load"):
weights_iterator = multi_thread_pt_weights_iterator(
hf_weights_files,
self.load_config.use_tqdm_on_load,
self.load_config.pt_load_map_location,
max_workers=extra_config.get(
"num_threads", self.DEFAULT_NUM_THREADS
),
)
else:
weights_iterator = pt_weights_iterator(
hf_weights_files,
self.load_config.use_tqdm_on_load,
self.load_config.pt_load_map_location,
)
if self.counter_before_loading_weights == 0.0:
self.counter_before_loading_weights = time.perf_counter()
# Apply the prefix.
return ((source.prefix + name, tensor) for (name, tensor) in weights_iterator)
def get_all_weights(
self,
model_config: ModelConfig,
model: nn.Module,
) -> Generator[tuple[str, torch.Tensor], None, None]:
primary_weights = DefaultModelLoader.Source(
model_config.model,
model_config.revision,
prefix="",
fall_back_to_pt=getattr(model, "fall_back_to_pt_during_load", True),
allow_patterns_overrides=getattr(model, "allow_patterns_overrides", None),
)
yield from self._get_weights_iterator(primary_weights)
secondary_weights = cast(
Iterable[DefaultModelLoader.Source],
getattr(model, "secondary_weights", ()),
)
for source in secondary_weights:
yield from self._get_weights_iterator(source)
def download_model(self, model_config: ModelConfig) -> None:
self._prepare_weights(
model_config.model,
model_config.revision,
fall_back_to_pt=True,
allow_patterns_overrides=None,
)
@instrument(span_name="Load weights")
def load_weights(self, model: nn.Module, model_config: ModelConfig) -> None:
if model_config.quantization == "torchao":
quant_config = get_quant_config(model_config, self.load_config)
if (
hasattr(quant_config, "is_checkpoint_torchao_serialized")
and quant_config.is_checkpoint_torchao_serialized
and torchao_version_at_least("0.15.0")
):
self.load_config.safetensors_load_strategy = "torchao"
weights_to_load = {name for name, _ in model.named_parameters()}
loaded_weights = model.load_weights(self.get_all_weights(model_config, model))
self.counter_after_loading_weights = time.perf_counter()
logger.info_once(
"Loading weights took %.2f seconds",
self.counter_after_loading_weights - self.counter_before_loading_weights,
scope="local",
)
# We only enable strict check for non-quantized models
# that have loaded weights tracking currently.
if model_config.quantization is None and loaded_weights is not None:
weights_not_loaded = weights_to_load - loaded_weights
if weights_not_loaded:
raise ValueError(
"Following weights were not initialized from "
f"checkpoint: {weights_not_loaded}"
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/model_loader/default_loader.py",
"license": "Apache License 2.0",
"lines": 270,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/model_loader/dummy_loader.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import torch.nn as nn
from vllm.config import ModelConfig
from vllm.config.load import LoadConfig
from vllm.model_executor.model_loader.base_loader import BaseModelLoader
from vllm.model_executor.model_loader.weight_utils import initialize_dummy_weights
class DummyModelLoader(BaseModelLoader):
"""Model loader that will set model weights to random values."""
def __init__(self, load_config: LoadConfig):
super().__init__(load_config)
if load_config.model_loader_extra_config:
raise ValueError(
f"Model loader extra config is not supported for "
f"load format {load_config.load_format}"
)
def download_model(self, model_config: ModelConfig) -> None:
pass # Nothing to download
def load_weights(self, model: nn.Module, model_config: ModelConfig) -> None:
# NOTE(woosuk): For accurate performance evaluation, we assign
# random values to the weights.
initialize_dummy_weights(model, model_config)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/model_loader/dummy_loader.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/model_loader/gguf_loader.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import os
from collections.abc import Generator
import gguf
import regex as re
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoModelForImageTextToText
from vllm.config import ModelConfig, VllmConfig
from vllm.config.load import LoadConfig
from vllm.logger import init_logger
from vllm.model_executor.model_loader.base_loader import BaseModelLoader
from vllm.model_executor.model_loader.utils import (
initialize_model,
process_weights_after_loading,
)
from vllm.model_executor.model_loader.weight_utils import (
download_gguf,
get_gguf_extra_tensor_names,
get_gguf_weight_type_map,
gguf_quant_weights_iterator,
)
from vllm.transformers_utils.gguf_utils import detect_gguf_multimodal
from vllm.utils.torch_utils import set_default_torch_dtype
logger = init_logger(__name__)
class GGUFModelLoader(BaseModelLoader):
"""
Model loader that can load GGUF files. This is useful for loading models
that are quantized with GGUF and saved in the GGUF format. This loader
supports loading both full models and sharded models.
"""
def __init__(self, load_config: LoadConfig):
super().__init__(load_config)
if load_config.model_loader_extra_config:
raise ValueError(
f"Model loader extra config is not supported for "
f"load format {load_config.load_format}"
)
def _prepare_weights(self, model_config: ModelConfig):
model_name_or_path = model_config.model
if os.path.isfile(model_name_or_path):
return model_name_or_path
# repo id/filename.gguf
if "/" in model_name_or_path and model_name_or_path.endswith(".gguf"):
repo_id, filename = model_name_or_path.rsplit("/", 1)
return hf_hub_download(repo_id=repo_id, filename=filename)
# repo_id:quant_type
elif "/" in model_name_or_path and ":" in model_name_or_path:
repo_id, quant_type = model_name_or_path.rsplit(":", 1)
return download_gguf(
repo_id,
quant_type,
cache_dir=self.load_config.download_dir,
revision=model_config.revision,
ignore_patterns=self.load_config.ignore_patterns,
)
raise ValueError(
f"Unrecognised GGUF reference: {model_name_or_path} "
"(expected local file, <repo_id>/<filename>.gguf, "
"or <repo_id>:<quant_type>)"
)
def _get_gguf_weights_map(self, model_config: ModelConfig):
"""
GGUF uses this naming convention for their tensors from HF checkpoint:
`blk.N.BB.weight` and `blk.N.BB.bias`
where N signifies the block number of a layer, and BB signifies the
attention/mlp layer components.
See "Standardized tensor names" in
https://github.com/ggerganov/ggml/blob/master/docs/gguf.md for details.
"""
config = model_config.hf_config
# Get text config to handle both nested (multimodal) and flat
# (text-only) config structures. For multimodal models like
# Gemma3Config, this returns config.text_config. For text-only
# models, this returns config itself.
text_config = config.get_text_config()
model_type = config.model_type
is_multimodal = (
hasattr(config, "vision_config") and config.vision_config is not None
)
gguf_to_hf_name_map = {}
sideload_params: list[re.Pattern] = []
# hack: ggufs have a different name than transformers
if model_type == "cohere":
model_type = "command-r"
if model_type == "gemma3_text":
# Gemma3 models use "gemma3_text" in HuggingFace but
# "gemma3" in GGUF architecture naming
model_type = "gemma3"
if model_type in ("deepseek_v3", "deepseek_v2"):
model_type = "deepseek2"
# GGUF layer map assumes that we will have a merged expert weights
# so we need to map them manually
for idx in range(config.num_hidden_layers):
gguf_to_hf_name_map[f"blk.{idx}.exp_probs_b.bias"] = (
f"model.layers.{idx}.mlp.gate.e_score_correction_bias"
)
gguf_to_hf_name_map[f"blk.{idx}.ffn_down_exps.weight"] = (
f"model.layers.{idx}.mlp.experts.0.down_proj.weight"
)
gguf_to_hf_name_map[f"blk.{idx}.ffn_gate_exps.weight"] = (
f"model.layers.{idx}.mlp.experts.0.gate_proj.weight"
)
gguf_to_hf_name_map[f"blk.{idx}.ffn_up_exps.weight"] = (
f"model.layers.{idx}.mlp.experts.0.up_proj.weight"
)
sideload_params.append(
re.compile(
f"model\\.layers\\.{idx}"
r"\.mlp\.experts\.[0-9]+\.(gate|up|down)_proj\.weight"
)
)
if model_type in ("qwen2_moe", "qwen3_moe"):
model_type = model_type.replace("_", "")
# GGUF layer map assumes that we will have a merged expert weights
# so we need to map them manually
for idx in range(config.num_hidden_layers):
gguf_to_hf_name_map[f"blk.{idx}.ffn_down_exps.weight"] = (
f"model.layers.{idx}.mlp.experts.0.down_proj.weight"
)
gguf_to_hf_name_map[f"blk.{idx}.ffn_gate_exps.weight"] = (
f"model.layers.{idx}.mlp.experts.0.gate_proj.weight"
)
gguf_to_hf_name_map[f"blk.{idx}.ffn_up_exps.weight"] = (
f"model.layers.{idx}.mlp.experts.0.up_proj.weight"
)
sideload_params.append(
re.compile(
f"model\\.layers\\.{idx}"
r"\.mlp\.experts\.[0-9]+\.(gate|up|down)_proj\.weight"
)
)
arch = None
for key, value in gguf.MODEL_ARCH_NAMES.items():
if value == model_type:
arch = key
break
if arch is None:
raise RuntimeError(f"Unknown gguf model_type: {model_type}")
text_num_layers = text_config.num_hidden_layers
text_name_map = gguf.get_tensor_name_map(arch, text_num_layers)
if is_multimodal:
mm_proj_arch = gguf.MODEL_ARCH.MMPROJ
vision_num_layers = config.vision_config.num_hidden_layers
vision_name_map = gguf.get_tensor_name_map(mm_proj_arch, vision_num_layers)
else:
vision_name_map = None
# Create dummy model to extract parameter names
# For multimodal: use AutoModelForImageTextToText to get
# language + vision + projector params
# For text-only: use AutoModelForCausalLM to get language model params
auto_cls = (
AutoModelForImageTextToText if is_multimodal else AutoModelForCausalLM
)
with torch.device("meta"):
dummy_model = auto_cls.from_config(
config, trust_remote_code=model_config.trust_remote_code
)
state_dict = dummy_model.state_dict()
if hf_checkpoint_map := getattr(
dummy_model, "_checkpoint_conversion_mapping", None
):
def revert_hf_rename(name: str) -> str:
for original_name, hf_name in hf_checkpoint_map.items():
if hf_name in name:
name = name.replace(hf_name, original_name).lstrip("^")
return name
state_dict = {
revert_hf_rename(name): tensor for name, tensor in state_dict.items()
}
def find_hf_name_in_tensor_map(hf_name: str) -> str | None:
"""
Map HuggingFace parameter name to GGUF tensor name.
This function handles the mismatch between HF parameter naming
conventions and gguf-py's expected format:
1. Strips 'model.' prefix (common in multimodal models)
2. Converts '_weight' suffix to '.weight' (Gemma3 compatibility)
3. Searches vision_name_map for multimodal parameters
4. Falls back to text_name_map for language model parameters
Args:
hf_name: Full HuggingFace parameter name (e.g.,
'model.multi_modal_projector.mm_soft_emb_norm.weight')
Returns:
GGUF tensor name with suffix (e.g., 'mm.soft_emb_norm.weight')
or None if no mapping found
"""
# Strip 'language_model.' prefix for multimodal models - gguf-py
# tensor mappings expect parameter names without this prefix.
# Note: 'model.' prefix should be KEPT for text-only models as
# gguf-py expects it.
if hf_name.startswith("language_model."):
hf_name = hf_name[15:] # Remove 'language_model.'
# Parse parameter name and suffix
if hf_name.endswith((".weight", ".bias")):
base_name, suffix = hf_name.rsplit(".", 1)
else:
base_name, suffix = hf_name, ""
# Handle '_weight' suffix (Gemma3 naming: parameter ends with
# '_weight' instead of '.weight')
if base_name.endswith("_weight"):
base_name = base_name[:-7] # Remove '_weight'
suffix = "weight"
gguf_name = None
# Priority 1: Search vision/projector parameters for multimodal models
if vision_name_map is not None:
gguf_name = vision_name_map.get_name(base_name)
# Priority 2: Search text backbone parameters
if gguf_name is None:
gguf_name = text_name_map.get_name(base_name)
if gguf_name is None:
return None
return gguf_name + "." + suffix
# Build mapping and track unmapped parameters
unmapped_params = []
for hf_name in state_dict:
gguf_name_with_suffix = find_hf_name_in_tensor_map(hf_name)
# Track mapping success
if gguf_name_with_suffix is not None:
gguf_to_hf_name_map[gguf_name_with_suffix] = hf_name
logger.debug("Mapped GGUF %s → HF %s", gguf_name_with_suffix, hf_name)
elif hf_name not in gguf_to_hf_name_map.values():
# Parameter not in manual overrides either
unmapped_params.append(hf_name)
# All parameters (except those initialized by other means) must be mapped:
# both vision/projector and backbone
if unmapped_params:
unmapped_params = list(
filter(
lambda x: not any(re.fullmatch(p, x) for p in sideload_params),
unmapped_params,
)
)
if unmapped_params:
raise RuntimeError(
f"Failed to map GGUF parameters "
f"({len(unmapped_params)}): "
f"{unmapped_params}"
)
return gguf_to_hf_name_map
def _get_gguf_weight_type(
self,
model_config: ModelConfig,
model_name_or_path: str,
gguf_to_hf_name_map: dict[str, str],
) -> dict[str, str]:
weight_type_map = get_gguf_weight_type_map(
model_name_or_path, gguf_to_hf_name_map
)
is_multimodal = hasattr(model_config.hf_config, "vision_config")
if is_multimodal:
mmproj_file = detect_gguf_multimodal(model_name_or_path)
assert mmproj_file is not None, (
"Could not find mm_proj file for multimodal GGUF model"
)
logger.info("Loading extra mm_proj weights from %s...", mmproj_file)
mm_proj_weight_type_map = get_gguf_weight_type_map(
mmproj_file, gguf_to_hf_name_map
)
weight_type_map.update(mm_proj_weight_type_map)
return weight_type_map
def _get_weights_iterator(
self,
model_config: ModelConfig,
model_name_or_path: str,
gguf_to_hf_name_map: dict[str, str],
) -> Generator[tuple[str, torch.Tensor], None, None]:
"""
Iterate over GGUF model weights, loading from both main model file and
mmproj.gguf for multimodal Gemma3 models.
For Gemma3 multimodal GGUF models:
- Main file (gemma-3-*.gguf): Language model weights (model.*)
- mmproj file (mmproj*.gguf): Vision tower + projector weights (v.*, mm.*)
Yields:
Tuples of (parameter_name, tensor) for all model weights
"""
hf_config = model_config.hf_config
is_multimodal = hasattr(hf_config, "vision_config")
if is_multimodal:
# Load mm_proj (mm_encoder + projector) for multimodal weights
mmproj_file = detect_gguf_multimodal(model_name_or_path)
assert mmproj_file is not None, (
"Could not find mm_proj file for multimodal GGUF model"
)
yield from gguf_quant_weights_iterator(mmproj_file, gguf_to_hf_name_map)
yield from gguf_quant_weights_iterator(model_name_or_path, gguf_to_hf_name_map)
def download_model(self, model_config: ModelConfig) -> None:
self._prepare_weights(model_config)
def load_weights(self, model: nn.Module, model_config: ModelConfig) -> None:
local_model_path = self._prepare_weights(model_config)
gguf_weights_map = self._get_gguf_weights_map(model_config)
model.load_weights(
self._get_weights_iterator(model_config, local_model_path, gguf_weights_map)
)
def load_model(
self, vllm_config: VllmConfig, model_config: ModelConfig, prefix: str = ""
) -> nn.Module:
device_config = vllm_config.device_config
local_model_path = self._prepare_weights(model_config)
gguf_weights_map = self._get_gguf_weights_map(model_config)
# we can only know if tie word embeddings after mapping weights
if "lm_head.weight" in get_gguf_extra_tensor_names(
local_model_path, gguf_weights_map
):
model_config.hf_config.update({"tie_word_embeddings": True})
weight_type_map = self._get_gguf_weight_type(
model_config, local_model_path, gguf_weights_map
)
# filter out unquantized modules to skip
unquant_names = [
name.removesuffix(".weight")
for name, weight_type in weight_type_map.items()
if weight_type in ("F32", "F16", "BF16") and name.endswith(".weight")
]
logger.debug(
"GGUF unquantized modules: %s",
unquant_names,
)
vllm_config.quant_config.unquantized_modules.extend(unquant_names)
target_device = torch.device(device_config.device)
with set_default_torch_dtype(model_config.dtype):
with target_device:
model = initialize_model(vllm_config=vllm_config, prefix=prefix)
self.load_weights(model, model_config)
process_weights_after_loading(model, model_config, target_device)
return model
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/model_loader/gguf_loader.py",
"license": "Apache License 2.0",
"lines": 327,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/model_loader/runai_streamer_loader.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import os
from collections.abc import Generator
import torch
from torch import nn
from transformers.utils import SAFE_WEIGHTS_INDEX_NAME
from vllm.config import ModelConfig
from vllm.config.load import LoadConfig
from vllm.model_executor.model_loader.base_loader import BaseModelLoader
from vllm.model_executor.model_loader.weight_utils import (
download_safetensors_index_file_from_hf,
download_weights_from_hf,
runai_safetensors_weights_iterator,
)
from vllm.transformers_utils.runai_utils import is_runai_obj_uri, list_safetensors
class RunaiModelStreamerLoader(BaseModelLoader):
"""
Model loader that can load safetensors
files from local FS or S3 bucket.
"""
def __init__(self, load_config: LoadConfig):
super().__init__(load_config)
self._is_distributed = False
if load_config.model_loader_extra_config:
extra_config = load_config.model_loader_extra_config
if "distributed" in extra_config and isinstance(
extra_config.get("distributed"), bool
):
self._is_distributed = extra_config.get("distributed")
if "concurrency" in extra_config and isinstance(
extra_config.get("concurrency"), int
):
os.environ["RUNAI_STREAMER_CONCURRENCY"] = str(
extra_config.get("concurrency")
)
if "memory_limit" in extra_config and isinstance(
extra_config.get("memory_limit"), int
):
os.environ["RUNAI_STREAMER_MEMORY_LIMIT"] = str(
extra_config.get("memory_limit")
)
runai_streamer_s3_endpoint = os.getenv("RUNAI_STREAMER_S3_ENDPOINT")
aws_endpoint_url = os.getenv("AWS_ENDPOINT_URL")
if runai_streamer_s3_endpoint is None and aws_endpoint_url is not None:
os.environ["RUNAI_STREAMER_S3_ENDPOINT"] = aws_endpoint_url
def _prepare_weights(
self, model_name_or_path: str, revision: str | None
) -> list[str]:
"""Prepare weights for the model.
If the model is not local, it will be downloaded."""
is_object_storage_path = is_runai_obj_uri(model_name_or_path)
is_local = os.path.isdir(model_name_or_path)
safetensors_pattern = "*.safetensors"
index_file = SAFE_WEIGHTS_INDEX_NAME
hf_folder = (
model_name_or_path
if (is_local or is_object_storage_path)
else download_weights_from_hf(
model_name_or_path,
self.load_config.download_dir,
[safetensors_pattern],
revision,
ignore_patterns=self.load_config.ignore_patterns,
)
)
hf_weights_files = list_safetensors(path=hf_folder)
if not is_local and not is_object_storage_path:
download_safetensors_index_file_from_hf(
model_name_or_path, index_file, self.load_config.download_dir, revision
)
if not hf_weights_files:
raise RuntimeError(
f"Cannot find any safetensors model weights with `{model_name_or_path}`"
)
return hf_weights_files
def _get_weights_iterator(
self, model_or_path: str, revision: str
) -> Generator[tuple[str, torch.Tensor], None, None]:
"""Get an iterator for the model weights based on the load format."""
hf_weights_files = self._prepare_weights(model_or_path, revision)
return runai_safetensors_weights_iterator(
hf_weights_files, self.load_config.use_tqdm_on_load, self._is_distributed
)
def download_model(self, model_config: ModelConfig) -> None:
"""Download model if necessary"""
self._prepare_weights(model_config.model, model_config.revision)
def load_weights(self, model: nn.Module, model_config: ModelConfig) -> None:
"""Load weights into a model."""
model_weights = model_config.model
if model_weights_override := model_config.model_weights:
model_weights = model_weights_override
model.load_weights(
self._get_weights_iterator(model_weights, model_config.revision)
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/model_loader/runai_streamer_loader.py",
"license": "Apache License 2.0",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/model_loader/sharded_state_loader.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import collections
import glob
import os
import time
from collections.abc import Generator
from typing import Any
import torch
from torch import nn
from vllm.config import ModelConfig
from vllm.config.load import LoadConfig
from vllm.logger import init_logger
from vllm.model_executor.model_loader.base_loader import BaseModelLoader
from vllm.model_executor.model_loader.weight_utils import (
download_weights_from_hf,
runai_safetensors_weights_iterator,
)
from vllm.transformers_utils.s3_utils import glob as s3_glob
from vllm.transformers_utils.utils import is_s3
logger = init_logger(__name__)
class ShardedStateLoader(BaseModelLoader):
"""
Model loader that directly loads each worker's model state dict, which
enables a fast load path for large tensor-parallel models where each worker
only needs to read its own shard rather than the entire checkpoint. See
`examples/offline_inference/save_sharded_state.py` for creating a sharded
checkpoint.
"""
DEFAULT_PATTERN = "model-rank-{rank}-part-{part}.safetensors"
def __init__(self, load_config: LoadConfig):
super().__init__(load_config)
extra_config = (
{}
if load_config.model_loader_extra_config is None
else load_config.model_loader_extra_config.copy()
)
self.pattern = extra_config.pop("pattern", self.DEFAULT_PATTERN)
if extra_config:
raise ValueError(
f"Unexpected extra config keys for load format "
f"{load_config.load_format}: "
f"{load_config.model_loader_extra_config.keys()}"
)
@staticmethod
def _filter_subtensors(
tensors: dict[str, torch.Tensor],
) -> dict[str, torch.Tensor]:
"""
Filter out all tensors that share the same memory or a subset of the
memory of another tensor.
"""
same_storage_groups: dict[Any, list[tuple[str, torch.Tensor]]] = (
collections.defaultdict(list)
)
for key, tensor in tensors.items():
if tensor.numel():
ptr = tensor.untyped_storage().data_ptr()
same_storage_groups[tensor.device, ptr].append((key, tensor))
def get_end_ptr(tensor: torch.Tensor) -> int:
return tensor.view(-1)[-1].data_ptr() + tensor.element_size()
result: dict[str, torch.Tensor] = {}
for group in same_storage_groups.values():
for k, t in group:
a, b = t.data_ptr(), get_end_ptr(t)
for k2, t2 in group:
if not t2.is_contiguous():
continue
a2, b2 = t2.data_ptr(), get_end_ptr(t2)
if a < a2 or b2 < b:
continue
if a2 < a or b < b2 or not t.is_contiguous():
break # t2 covers strictly more memory than t.
if k2 < k:
# Same tensors, keep the one with the smaller key.
break
else:
result[k] = t
return result
def _prepare_weights(self, model_name_or_path: str, revision: str | None):
if is_s3(model_name_or_path) or os.path.isdir(model_name_or_path):
return model_name_or_path
else:
allow_patterns = ["*.safetensors"]
return download_weights_from_hf(
model_name_or_path,
self.load_config.download_dir,
allow_patterns,
revision,
ignore_patterns=self.load_config.ignore_patterns,
)
def download_model(self, model_config: ModelConfig) -> None:
self._prepare_weights(model_config.model, model_config.revision)
def load_weights(self, model: nn.Module, model_config: ModelConfig) -> None:
from vllm.distributed import get_tensor_model_parallel_rank
model_weights = model_config.model
if model_weights_override := model_config.model_weights:
model_weights = model_weights_override
local_model_path = model_weights
rank = get_tensor_model_parallel_rank()
pattern = os.path.join(
local_model_path,
self.pattern.format(rank=rank, part="*"),
)
filepaths = []
if is_s3(local_model_path):
file_pattern = f"*{self.pattern.format(rank=rank, part='*')}"
filepaths = s3_glob(path=local_model_path, allow_pattern=[file_pattern])
else:
filepaths = glob.glob(pattern)
if not filepaths:
# TODO: support un-sharded checkpoints too
raise ValueError(
f"Could not find checkpoint files '{pattern}', only "
f"pre-sharded checkpoints are currently supported!"
)
state_dict = self._filter_subtensors(model.state_dict())
counter_before_loading_weights = time.perf_counter()
for key, tensor in self.iterate_over_files(filepaths):
# If loading with LoRA enabled, additional padding may
# be added to certain parameters. We only load into a
# narrowed view of the parameter data.
param_data = state_dict[key].data
param_shape = state_dict[key].shape
for dim, size in enumerate(tensor.shape):
if size < param_shape[dim]:
param_data = param_data.narrow(dim, 0, size)
if tensor.shape != param_shape:
logger.warning(
"loading tensor of shape %s into parameter '%s' of shape %s",
tensor.shape,
key,
param_shape,
)
param_data.copy_(tensor)
state_dict.pop(key)
counter_after_loading_weights = time.perf_counter()
logger.info_once(
"Loading weights took %.2f seconds",
counter_after_loading_weights - counter_before_loading_weights,
scope="local",
)
if state_dict:
raise ValueError(f"Missing keys {tuple(state_dict)} in loaded state!")
def iterate_over_files(
self, paths
) -> Generator[tuple[str, torch.Tensor], None, None]:
if self.load_config.load_format == "runai_streamer_sharded":
yield from runai_safetensors_weights_iterator(paths, True)
else:
from safetensors.torch import safe_open
for path in paths:
with safe_open(path, framework="pt") as f:
for key in f.keys(): # noqa: SIM118
tensor = f.get_tensor(key)
yield key, tensor
@staticmethod
def save_model(
model: torch.nn.Module,
path: str,
pattern: str | None = None,
max_size: int | None = None,
) -> None:
from safetensors.torch import save_file
from vllm.distributed import get_tensor_model_parallel_rank
if pattern is None:
pattern = ShardedStateLoader.DEFAULT_PATTERN
rank = get_tensor_model_parallel_rank()
part_idx = 0
total_size = 0
state_dict = ShardedStateLoader._filter_subtensors(model.state_dict())
state_dict_part: dict[str, torch.Tensor] = {}
for key, tensor in state_dict.items():
param_size = tensor.nelement() * tensor.element_size()
if max_size is not None and total_size + param_size > max_size:
filename = pattern.format(rank=rank, part=part_idx)
save_file(
state_dict_part,
os.path.join(path, filename),
)
part_idx += 1
total_size = 0
state_dict_part = {}
state_dict_part[key] = tensor
total_size += param_size
if len(state_dict_part) > 0:
filename = pattern.format(rank=rank, part=part_idx)
save_file(
state_dict_part,
os.path.join(path, filename),
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/model_loader/sharded_state_loader.py",
"license": "Apache License 2.0",
"lines": 191,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/model_loader/tensorizer_loader.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# ruff: noqa: SIM117
import copy
from collections.abc import Generator
import torch
from torch import nn
from vllm.config import ModelConfig, ParallelConfig, VllmConfig
from vllm.config.load import LoadConfig
from vllm.logger import init_logger
from vllm.model_executor.model_loader.base_loader import BaseModelLoader
from vllm.model_executor.model_loader.tensorizer import (
TensorizerConfig,
deserialize_tensorizer_model,
init_tensorizer_model,
is_vllm_tensorized,
serialize_vllm_model,
tensorizer_weights_iterator,
)
from vllm.model_executor.model_loader.utils import (
get_model_architecture,
initialize_model,
)
from vllm.utils.torch_utils import set_default_torch_dtype
logger = init_logger(__name__)
BLACKLISTED_TENSORIZER_ARGS = {
"device", # vLLM decides this
"dtype", # vLLM decides this
"mode", # Not meant to be configurable by the user
}
def validate_config(config: dict):
for k, v in config.items():
if v is not None and k in BLACKLISTED_TENSORIZER_ARGS:
raise ValueError(f"{k} is not an allowed Tensorizer argument.")
class TensorizerLoader(BaseModelLoader):
"""Model loader using CoreWeave's tensorizer library."""
def __init__(self, load_config: LoadConfig):
super().__init__(load_config)
if isinstance(load_config.model_loader_extra_config, TensorizerConfig):
self.tensorizer_config = load_config.model_loader_extra_config
else:
validate_config(load_config.model_loader_extra_config)
self.tensorizer_config = TensorizerConfig(
**load_config.model_loader_extra_config["tensorizer_config"]
)
def _verify_config(
self, model_config: ModelConfig, parallel_config: ParallelConfig
):
self.tensorizer_config.verify_with_model_config(model_config)
self.tensorizer_config.verify_with_parallel_config(parallel_config)
def _get_weights_iterator(
self,
) -> Generator[tuple[str, torch.Tensor], None, None]:
tensorizer_args = self.tensorizer_config._construct_tensorizer_args()
return tensorizer_weights_iterator(tensorizer_args)
def _load_model_serialized_cpu(
self,
vllm_config: VllmConfig,
prefix: str = "",
) -> nn.Module:
"""Load a serialized model with tensorizer to the CPU.
This is only necessary when the model isn't vLLM-tensorized (see
examples/others/tensorize_vllm_model.py) This should still
be faster than default HuggingFace loading, but will be slower than
loading a vLLM-tensorized model.
"""
device_config = vllm_config.device_config
model_config = vllm_config.model_config
with set_default_torch_dtype(model_config.dtype):
with torch.device(device_config.device):
model = initialize_model(vllm_config=vllm_config, prefix=prefix)
model.load_weights(self._get_weights_iterator())
return model.eval()
def download_model(self, model_config: ModelConfig) -> None:
self.tensorizer_config.verify_with_model_config(model_config)
with self.tensorizer_config.open_stream():
pass
def _patch_tensorizer_config(self, model_config: ModelConfig) -> TensorizerConfig:
model_class = get_model_architecture(model_config)[0]
tensorizer_config = copy.copy(self.tensorizer_config)
tensorizer_config.model_class = model_class
tensorizer_config.hf_config = model_config.hf_config
tensorizer_config.dtype = model_config.dtype
return tensorizer_config
def load_weights(self, model: nn.Module, model_config: ModelConfig) -> None:
"""Load serialized model weights with tensorizer.
Expects a vLLM-tensorized model. See the
examples/others/tensorize_vllm_model.py example script
for serializing vLLM models."""
if is_vllm_tensorized(self.tensorizer_config):
tensorizer_config = self._patch_tensorizer_config(model_config)
deserialize_tensorizer_model(model, tensorizer_config)
else:
model.load_weights(self._get_weights_iterator())
def load_model(
self, vllm_config: VllmConfig, model_config: ModelConfig, prefix: str = ""
) -> nn.Module:
parallel_config = vllm_config.parallel_config
self._verify_config(model_config, parallel_config)
if parallel_config.tensor_parallel_size > 1:
from vllm.distributed import get_tensor_model_parallel_rank
self.tensorizer_config.tensorizer_uri = (
self.tensorizer_config.tensorizer_uri % get_tensor_model_parallel_rank()
)
if is_vllm_tensorized(self.tensorizer_config):
tensorizer_config = self._patch_tensorizer_config(model_config)
device_config = vllm_config.device_config
with set_default_torch_dtype(model_config.dtype):
with torch.device(device_config.device):
model = init_tensorizer_model(
tensorizer_config=tensorizer_config, vllm_config=vllm_config
)
self.load_weights(model, model_config)
return model
return self._load_model_serialized_cpu(vllm_config=vllm_config, prefix=prefix)
@staticmethod
def save_model(
model: torch.nn.Module,
tensorizer_config: TensorizerConfig | dict,
model_config: ModelConfig,
) -> None:
if isinstance(tensorizer_config, dict):
tensorizer_config = TensorizerConfig(**tensorizer_config)
serialize_vllm_model(
model=model,
tensorizer_config=tensorizer_config,
model_config=model_config,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/model_loader/tensorizer_loader.py",
"license": "Apache License 2.0",
"lines": 128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:examples/online_serving/retrieval_augmented_generation_with_langchain.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Retrieval Augmented Generation (RAG) Implementation with Langchain
==================================================================
This script demonstrates a RAG implementation using LangChain, Milvus
and vLLM. RAG enhances LLM responses by retrieving relevant context
from a document collection.
Features:
- Web content loading and chunking
- Vector storage with Milvus
- Embedding generation with vLLM
- Question answering with context
Prerequisites:
1. Install dependencies:
pip install -U vllm \
langchain_milvus langchain_openai \
langchain_community beautifulsoup4 \
langchain-text-splitters
2. Start services:
# Start embedding service (port 8000)
vllm serve ssmits/Qwen2-7B-Instruct-embed-base
# Start chat service (port 8001)
vllm serve qwen/Qwen1.5-0.5B-Chat --port 8001
Usage:
python retrieval_augmented_generation_with_langchain.py
Notes:
- Ensure both vLLM services are running before executing
- Default ports: 8000 (embedding), 8001 (chat)
- First run may take time to download models
"""
import argparse
from argparse import Namespace
from typing import Any
from langchain_community.document_loaders import WebBaseLoader
from langchain_core.documents import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_milvus import Milvus
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
def load_and_split_documents(config: dict[str, Any]):
"""
Load and split documents from web URL
"""
try:
loader = WebBaseLoader(web_paths=(config["url"],))
docs = loader.load()
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=config["chunk_size"],
chunk_overlap=config["chunk_overlap"],
)
return text_splitter.split_documents(docs)
except Exception as e:
print(f"Error loading document from {config['url']}: {str(e)}")
raise
def init_vectorstore(config: dict[str, Any], documents: list[Document]):
"""
Initialize vector store with documents
"""
return Milvus.from_documents(
documents=documents,
embedding=OpenAIEmbeddings(
model=config["embedding_model"],
openai_api_key=config["vllm_api_key"],
openai_api_base=config["vllm_embedding_endpoint"],
),
connection_args={"uri": config["uri"]},
drop_old=True,
)
def init_llm(config: dict[str, Any]):
"""
Initialize llm
"""
return ChatOpenAI(
model=config["chat_model"],
openai_api_key=config["vllm_api_key"],
openai_api_base=config["vllm_chat_endpoint"],
)
def get_qa_prompt():
"""
Get question answering prompt template
"""
template = """You are an assistant for question-answering tasks.
Use the following pieces of retrieved context to answer the question.
If you don't know the answer, just say that you don't know.
Use three sentences maximum and keep the answer concise.
Question: {question}
Context: {context}
Answer:
"""
return PromptTemplate.from_template(template)
def format_docs(docs: list[Document]):
"""
Format documents for prompt
"""
return "\n\n".join(doc.page_content for doc in docs)
def create_qa_chain(retriever: Any, llm: ChatOpenAI, prompt: PromptTemplate):
"""
Set up question answering chain
"""
return (
{
"context": retriever | format_docs,
"question": RunnablePassthrough(),
}
| prompt
| llm
| StrOutputParser()
)
def get_parser() -> argparse.ArgumentParser:
"""
Parse command line arguments
"""
parser = argparse.ArgumentParser(description="RAG with vLLM and langchain")
# Add command line arguments
parser.add_argument(
"--vllm-api-key", default="EMPTY", help="API key for vLLM compatible services"
)
parser.add_argument(
"--vllm-embedding-endpoint",
default="http://localhost:8000/v1",
help="Base URL for embedding service",
)
parser.add_argument(
"--vllm-chat-endpoint",
default="http://localhost:8001/v1",
help="Base URL for chat service",
)
parser.add_argument("--uri", default="./milvus.db", help="URI for Milvus database")
parser.add_argument(
"--url",
default=("https://docs.vllm.ai/en/latest/getting_started/quickstart.html"),
help="URL of the document to process",
)
parser.add_argument(
"--embedding-model",
default="ssmits/Qwen2-7B-Instruct-embed-base",
help="Model name for embeddings",
)
parser.add_argument(
"--chat-model", default="qwen/Qwen1.5-0.5B-Chat", help="Model name for chat"
)
parser.add_argument(
"-i", "--interactive", action="store_true", help="Enable interactive Q&A mode"
)
parser.add_argument(
"-k", "--top-k", type=int, default=3, help="Number of top results to retrieve"
)
parser.add_argument(
"-c",
"--chunk-size",
type=int,
default=1000,
help="Chunk size for document splitting",
)
parser.add_argument(
"-o",
"--chunk-overlap",
type=int,
default=200,
help="Chunk overlap for document splitting",
)
return parser
def init_config(args: Namespace):
"""
Initialize configuration settings from command line arguments
"""
return {
"vllm_api_key": args.vllm_api_key,
"vllm_embedding_endpoint": args.vllm_embedding_endpoint,
"vllm_chat_endpoint": args.vllm_chat_endpoint,
"uri": args.uri,
"embedding_model": args.embedding_model,
"chat_model": args.chat_model,
"url": args.url,
"chunk_size": args.chunk_size,
"chunk_overlap": args.chunk_overlap,
"top_k": args.top_k,
}
def main():
# Parse command line arguments
args = get_parser().parse_args()
# Initialize configuration
config = init_config(args)
# Load and split documents
documents = load_and_split_documents(config)
# Initialize vector store and retriever
vectorstore = init_vectorstore(config, documents)
retriever = vectorstore.as_retriever(search_kwargs={"k": config["top_k"]})
# Initialize llm and prompt
llm = init_llm(config)
prompt = get_qa_prompt()
# Set up QA chain
qa_chain = create_qa_chain(retriever, llm, prompt)
# Interactive mode
if args.interactive:
print("\nWelcome to Interactive Q&A System!")
print("Enter 'q' or 'quit' to exit.")
while True:
question = input("\nPlease enter your question: ")
if question.lower() in ["q", "quit"]:
print("\nThank you for using! Goodbye!")
break
output = qa_chain.invoke(question)
print(output)
else:
# Default single question mode
question = "How to install vLLM?"
output = qa_chain.invoke(question)
print("-" * 50)
print(output)
print("-" * 50)
if __name__ == "__main__":
main()
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/online_serving/retrieval_augmented_generation_with_langchain.py",
"license": "Apache License 2.0",
"lines": 216,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:examples/online_serving/retrieval_augmented_generation_with_llamaindex.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
RAG (Retrieval Augmented Generation) Implementation with LlamaIndex
================================================================
This script demonstrates a RAG system using:
- LlamaIndex: For document indexing and retrieval
- Milvus: As vector store backend
- vLLM: For embedding and text generation
Features:
1. Document Loading & Processing
2. Embedding & Storage
3. Query Processing
Requirements:
1. Install dependencies:
pip install llama-index llama-index-readers-web \
llama-index-llms-openai-like \
llama-index-embeddings-openai-like \
llama-index-vector-stores-milvus \
2. Start services:
# Start embedding service (port 8000)
vllm serve ssmits/Qwen2-7B-Instruct-embed-base
# Start chat service (port 8001)
vllm serve qwen/Qwen1.5-0.5B-Chat --port 8001
Usage:
python retrieval_augmented_generation_with_llamaindex.py
Notes:
- Ensure both vLLM services are running before executing
- Default ports: 8000 (embedding), 8001 (chat)
- First run may take time to download models
"""
import argparse
from argparse import Namespace
from typing import Any
from llama_index.core import Settings, StorageContext, VectorStoreIndex
from llama_index.core.node_parser import SentenceSplitter
from llama_index.embeddings.openai_like import OpenAILikeEmbedding
from llama_index.llms.openai_like import OpenAILike
from llama_index.readers.web import SimpleWebPageReader
from llama_index.vector_stores.milvus import MilvusVectorStore
def init_config(args: Namespace):
"""Initialize configuration with command line arguments"""
return {
"url": args.url,
"embedding_model": args.embedding_model,
"chat_model": args.chat_model,
"vllm_api_key": args.vllm_api_key,
"embedding_endpoint": args.embedding_endpoint,
"chat_endpoint": args.chat_endpoint,
"db_path": args.db_path,
"chunk_size": args.chunk_size,
"chunk_overlap": args.chunk_overlap,
"top_k": args.top_k,
}
def load_documents(url: str) -> list:
"""Load and process web documents"""
return SimpleWebPageReader(html_to_text=True).load_data([url])
def setup_models(config: dict[str, Any]):
"""Configure embedding and chat models"""
Settings.embed_model = OpenAILikeEmbedding(
api_base=config["embedding_endpoint"],
api_key=config["vllm_api_key"],
model_name=config["embedding_model"],
)
Settings.llm = OpenAILike(
model=config["chat_model"],
api_key=config["vllm_api_key"],
api_base=config["chat_endpoint"],
context_window=128000,
is_chat_model=True,
is_function_calling_model=False,
)
Settings.transformations = [
SentenceSplitter(
chunk_size=config["chunk_size"],
chunk_overlap=config["chunk_overlap"],
)
]
def setup_vector_store(db_path: str) -> MilvusVectorStore:
"""Initialize vector store"""
sample_emb = Settings.embed_model.get_text_embedding("test")
print(f"Embedding dimension: {len(sample_emb)}")
return MilvusVectorStore(uri=db_path, dim=len(sample_emb), overwrite=True)
def create_index(documents: list, vector_store: MilvusVectorStore):
"""Create document index"""
storage_context = StorageContext.from_defaults(vector_store=vector_store)
return VectorStoreIndex.from_documents(
documents,
storage_context=storage_context,
)
def query_document(index: VectorStoreIndex, question: str, top_k: int):
"""Query document with given question"""
query_engine = index.as_query_engine(similarity_top_k=top_k)
return query_engine.query(question)
def get_parser() -> argparse.ArgumentParser:
"""Parse command line arguments"""
parser = argparse.ArgumentParser(description="RAG with vLLM and LlamaIndex")
# Add command line arguments
parser.add_argument(
"--url",
default=("https://docs.vllm.ai/en/latest/getting_started/quickstart.html"),
help="URL of the document to process",
)
parser.add_argument(
"--embedding-model",
default="ssmits/Qwen2-7B-Instruct-embed-base",
help="Model name for embeddings",
)
parser.add_argument(
"--chat-model", default="qwen/Qwen1.5-0.5B-Chat", help="Model name for chat"
)
parser.add_argument(
"--vllm-api-key", default="EMPTY", help="API key for vLLM compatible services"
)
parser.add_argument(
"--embedding-endpoint",
default="http://localhost:8000/v1",
help="Base URL for embedding service",
)
parser.add_argument(
"--chat-endpoint",
default="http://localhost:8001/v1",
help="Base URL for chat service",
)
parser.add_argument(
"--db-path", default="./milvus_demo.db", help="Path to Milvus database"
)
parser.add_argument(
"-i", "--interactive", action="store_true", help="Enable interactive Q&A mode"
)
parser.add_argument(
"-c",
"--chunk-size",
type=int,
default=1000,
help="Chunk size for document splitting",
)
parser.add_argument(
"-o",
"--chunk-overlap",
type=int,
default=200,
help="Chunk overlap for document splitting",
)
parser.add_argument(
"-k", "--top-k", type=int, default=3, help="Number of top results to retrieve"
)
return parser
def main():
# Parse command line arguments
args = get_parser().parse_args()
# Initialize configuration
config = init_config(args)
# Load documents
documents = load_documents(config["url"])
# Setup models
setup_models(config)
# Setup vector store
vector_store = setup_vector_store(config["db_path"])
# Create index
index = create_index(documents, vector_store)
if args.interactive:
print("\nEntering interactive mode. Type 'quit' to exit.")
while True:
# Get user question
question = input("\nEnter your question: ")
# Check for exit command
if question.lower() in ["quit", "exit", "q"]:
print("Exiting interactive mode...")
break
# Get and print response
print("\n" + "-" * 50)
print("Response:\n")
response = query_document(index, question, config["top_k"])
print(response)
print("-" * 50)
else:
# Single query mode
question = "How to install vLLM?"
response = query_document(index, question, config["top_k"])
print("-" * 50)
print("Response:\n")
print(response)
print("-" * 50)
if __name__ == "__main__":
main()
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/online_serving/retrieval_augmented_generation_with_llamaindex.py",
"license": "Apache License 2.0",
"lines": 186,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/v1/attention/backends/utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import functools
from collections.abc import Callable
from dataclasses import dataclass, field, fields, make_dataclass
from typing import (
TYPE_CHECKING,
Any,
Literal,
Protocol,
get_args,
)
import numpy as np
import torch
from typing_extensions import runtime_checkable
from vllm.config import VllmConfig, get_layers_from_vllm_config
from vllm.utils.math_utils import cdiv
from vllm.v1.kv_cache_interface import KVCacheSpec, MambaSpec
if TYPE_CHECKING:
from vllm.v1.core.sched.output import SchedulerOutput
from vllm.v1.worker.gpu_input_batch import InputBatch
import vllm.envs as envs
from vllm.distributed.kv_transfer.kv_connector.utils import (
get_kv_connector_cache_layout,
)
from vllm.logger import init_logger
from vllm.model_executor.layers.attention_layer_base import AttentionLayerBase
from vllm.v1.attention.backend import (
AttentionBackend,
AttentionImpl,
AttentionMetadata,
CommonAttentionMetadata,
subclass_attention_backend,
)
logger = init_logger(__name__)
KVCacheLayoutType = Literal["NHD", "HND"]
_KV_CACHE_LAYOUT_OVERRIDE: KVCacheLayoutType | None = None
PAD_SLOT_ID = -1
def is_valid_kv_cache_layout(value: str) -> bool:
return value in get_args(KVCacheLayoutType)
@functools.lru_cache
def get_kv_cache_layout():
# Format specified by the code.
global _KV_CACHE_LAYOUT_OVERRIDE
cache_layout: Literal["NHD", "HND"] | None = None
if _KV_CACHE_LAYOUT_OVERRIDE is not None:
cache_layout = _KV_CACHE_LAYOUT_OVERRIDE
logger.info_once(
"`_KV_CACHE_LAYOUT_OVERRIDE` variable detected. "
"Setting KV cache layout to %s.",
cache_layout,
)
return cache_layout
# Format specified by the user.
cache_layout = envs.VLLM_KV_CACHE_LAYOUT
# When neither the user nor the override specified a layout, get default
if cache_layout is None:
cache_layout = get_kv_connector_cache_layout()
else:
assert is_valid_kv_cache_layout(cache_layout)
logger.info_once(
"`VLLM_KV_CACHE_LAYOUT` environment variable "
"detected. Setting KV cache layout to %s.",
cache_layout,
)
return cache_layout
def set_kv_cache_layout(cache_layout: KVCacheLayoutType):
global _KV_CACHE_LAYOUT_OVERRIDE
_KV_CACHE_LAYOUT_OVERRIDE = cache_layout
@dataclass
class PerLayerParameters:
"""
Currently, FlashInfer backend only support models in which all layers share
the same values for the following hyperparameters. Should not be used for
trtllm-gen backend since it supports different values for the following
hyperparameters.
"""
window_left: int
logits_soft_cap: float | None
sm_scale: float
has_sinks: bool = False
# has same params for all layers
has_same_window_lefts: bool | None = field(default=None, compare=False)
has_same_all_params: bool | None = field(default=None, compare=False)
def get_per_layer_parameters(
vllm_config: VllmConfig, layer_names: list[str], cls_: type["AttentionImpl"]
) -> dict[str, PerLayerParameters]:
"""
Scan layers in `layer_names` and determine some hyperparameters
to use during `plan`.
"""
layers = get_layers_from_vllm_config(
vllm_config,
AttentionLayerBase, # type: ignore[type-abstract]
layer_names,
)
per_layer_params: dict[str, PerLayerParameters] = {}
for key, layer in layers.items():
impl = layer.impl
assert isinstance(impl, cls_)
# Infer hyperparameters from the attention layer
window_size = getattr(impl, "sliding_window", None)
window_left = window_size[0] if window_size is not None else -1
logits_soft_cap = getattr(impl, "logits_soft_cap", None)
sm_scale = impl.scale
has_sinks = getattr(impl, "sinks", None) is not None
per_layer_params[key] = PerLayerParameters(
window_left, logits_soft_cap, sm_scale, has_sinks
)
return per_layer_params
def infer_global_hyperparameters(
per_layer_params: dict[str, PerLayerParameters],
) -> PerLayerParameters:
"""
Currently, FlashInfer backend other than trtllm-gen
only support models in which all layers share
the same values for the following hyperparameters:
- `window_left`
- `logits_soft_cap`
- `sm_scale`
So this function asserts that all layers share the same values for these
hyperparameters and returns the global values.
"""
assert len(per_layer_params) > 0, "No attention layers found in the model."
param_sets = list(per_layer_params.values())
global_params = param_sets[0]
global_params.has_same_window_lefts = all(
params.window_left == global_params.window_left for params in param_sets
)
global_params.has_same_all_params = all(
params == global_params for params in param_sets
)
return global_params
#
# Take in `query_start_loc_np` and `seq_lens_np` and break the sequences into
# local attention blocks, where each block is passed to the attention kernel
# as an independent local ("virtual") batch item.
#
# For example, if are performing a chunked prefill a batch of 3 sequences:
# q_seqlens = [4, 10, 5]
# kv_seqlens = [6, 17, 9]
# Then normally for regular attention we would compute with an attention mask
# for batch idx 0 (q_seqlens = 4, kv_seqlens = 6) like:
# batch idx: 0 (q_seqlens = 4, kv_seqlens = 6)
# k_toks > 0 1 2 3 4 5
# q_toks v _____________
# 0 | 1 1 1
# 1 | 1 1 1 1
# 2 | 1 1 1 1 1
# 3 | 1 1 1 1 1 1
#
# for local attention (with attn_chunk_size = 4) we would compute with an
# attention mask like:
# batch idx: 0 (q_seqlens = 4, kv_seqlens = 6, attn_chunk_size = 4)
# k_toks > 0 1 2 3 4 5
# q_toks v _____________
# 0 | 1 1 1
# 1 | 1 1 1 1
# 2 | 1
# 3 | 1 1
#
# We can simulate this mask using standard flash-attention by breaking the
# sequences into local ("virtual") batches, where each local batch item is a
# local attention block, so in this case batch idx 0 would be broken up into:
#
# local-batch idx: 0 (q_seqlens = 2, kv_seqlens = 4) (batch 0)
# k_toks > 0 1 2 3
# q_toks v _____________
# 0 | 1 1 1
# 1 | 1 1 1 1
# local-batch idx: 1 (q_seqlens = 2, kv_seqlens = 2) (batch 0)
# k_toks > 4 5
# q_toks v _____________
# 2 | 1
# 3 | 1 1
#
# e.g. if we have:
# attn_chunk_size = 4
# query_start_loc_np = [0, 4, 14, 19] (q_seqlens = [4, 10, 5])
# Then this function would return:
# __b0__ ______b1______ __b2__ < orig batch indices
# q_seqlens_local = [ 2, 2, 1, 4, 4, 1, 4, 1]
# cu_seqlens_q_local = [0, 4, 6, 10, 14, 18, 19, 23, 24]
# seqlens_k_local = [ 4, 2, 4, 4, 4, 1, 4, 1]
# block_table_local : shape[local_virtual_batches, pages_per_local_batch]
def make_local_attention_virtual_batches(
attn_chunk_size: int,
common_attn_metadata: CommonAttentionMetadata,
block_size: int = 0,
) -> tuple[CommonAttentionMetadata, Callable[[torch.Tensor], torch.Tensor]]:
query_start_loc_np = common_attn_metadata.query_start_loc_cpu.numpy()
seq_lens_np = common_attn_metadata.seq_lens_cpu.numpy()
block_table = common_attn_metadata.block_table_tensor
device = common_attn_metadata.query_start_loc.device
q_seqlens = query_start_loc_np[1:] - query_start_loc_np[:-1]
actual_batch_size = seq_lens_np.shape[0]
# Handle if we are starting in the middle of a local attention block,
# we assume q_seqlens > 0 (for all elements), for each batch idx we compute
# the number of tokens that are not in the first local attention block and
# then we can simply use a cdiv for the rest.
# For example if we have:
# attn_chunk_size = 4
# q_seqlens = [4, 10, 5]
# k_seqlens = [6, 17, 9]
# Then we would get:
# new_tokens_in_first_block = [2, 1, 4]
# local_blocks = [2, 4, 2]
q_tokens_in_first_block = np.minimum(
attn_chunk_size - ((seq_lens_np - q_seqlens) % attn_chunk_size), q_seqlens
).astype(np.int32)
tokens_in_last_block = attn_chunk_size + (seq_lens_np % -attn_chunk_size)
local_blocks = 1 + cdiv(q_seqlens - q_tokens_in_first_block, attn_chunk_size)
# Once we know the number of local blocks we can compute the request spans
# for each batch idx, we can figure out the number of "virtual" requests we
# have to make,
# For the above example we would get:
# seqlens_q_local = [2, 2, 1, 4, 4, 1, 4, 1]
#
# First Get batched arange. (E.g., [2, 4, 2] -> [0, 1, 0, 1, 2, 3, 0, 1])
# (TODO: max a utility to share this code with _prepare_inputs)
# arange step 1. [2, 4, 2] -> [2, 6, 8]
cu_num_blocks = np.cumsum(local_blocks)
virtual_batches = cu_num_blocks[-1]
# arange step 2. [2, 6, 8] -> [0, 0, 2, 2, 2, 2, 6, 6]
block_offsets = np.repeat(cu_num_blocks - local_blocks, local_blocks)
# arange step 3. [0, 1, 0, 1, 2, 3, 0, 1]
arange = np.arange(virtual_batches, dtype=np.int32) - block_offsets
# also compute reverse arange (i.e. [1, 0, 3, 2, 1, 0, 1, 0])
rarange = np.repeat(local_blocks, local_blocks) - arange - 1
# Then we can compute the seqlens_q_local, handling the fact that the
# first and last blocks could be partial
seqlens_q_local = np.repeat(q_seqlens - q_tokens_in_first_block, local_blocks)
# set the first block since this may be a partial block
seqlens_q_local[arange == 0] = q_tokens_in_first_block
# set the remaining blocks
seqlens_q_local[arange > 0] = np.minimum(
seqlens_q_local - attn_chunk_size * (arange - 1), attn_chunk_size
)[arange > 0]
# convert from q_seqlens to cu_seqlens_q
cu_seqlens_q_local = np.empty(virtual_batches + 1, dtype=np.int32)
np.cumsum(seqlens_q_local, out=cu_seqlens_q_local[1:])
cu_seqlens_q_local[0] = 0
# compute the seqlens_k_local,
# basically a full local attention block for all but the last block in each
# batch
# For our example this will be:
# seqlens_k_local = [4, 2, 4, 4, 4, 1, 4, 1]
seqlens_k_local = np.full(cu_num_blocks[-1], attn_chunk_size, dtype=np.int32)
seqlens_k_local[cu_num_blocks - 1] = tokens_in_last_block
num_computed_tokens_local = seqlens_k_local - seqlens_q_local
k_seqstarts_absolute = np.repeat(seq_lens_np, local_blocks) - (
rarange * attn_chunk_size + np.repeat(tokens_in_last_block, local_blocks)
)
# For the example the local attention blocks start at:
# _b0_ _____b1_____ _b2_
# k_seqstarts_absolute = [0, 4, 4, 8, 12, 16, 4, 8]
block_starts = k_seqstarts_absolute // block_size
assert attn_chunk_size % block_size == 0, (
f"attn_chunk_size {attn_chunk_size} is not divisible by block_size {block_size}"
)
pages_per_local_batch = attn_chunk_size // block_size
# Create a block_table for the local attention blocks
# For out example if we have a block-table like (assuming block_size=2):
# block_table = [
# [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], < batch 0
# [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], < batch 1
# [20, 21, 22, 23, 24, 25, 26, 27, 28, 29], < batch 2
# ]
# Then for the local batches we would want a block-table like
# block_table_local = [
# [ 0, 1 ], < local-batch 0, (batch 0, starting from k[0])
# [ 2, 3 ], < local-batch 1, (batch 0, starting from k[4])
# [ 12, 13 ], < local-batch 2, (batch 1, starting from k[4])
# [ 14, 15 ], < local-batch 3, (batch 1, starting from k[8])
# [ 16, 17 ], < local-batch 4, (batch 1, starting from k[12])
# [ 18, 19 ], < local-batch 5, (batch 1, starting from k[16])
# [ 22, 23 ], < local-batch 6, (batch 2, starting from k[4])
# [ 24, 25 ], < local-batch 7, (batch 2, starting from k[8])
# ]
block_indices = block_starts[:, None] + np.arange(
pages_per_local_batch, dtype=np.int32
)
block_indices = block_indices.reshape(-1).clip(max=block_table.shape[1] - 1)
batch_indices = np.repeat(
np.arange(actual_batch_size, dtype=np.int32),
local_blocks * pages_per_local_batch,
)
# NOTE: https://github.com/pytorch/pytorch/pull/160256 causes performance
# regression when using numpy arrays (batch and block indices) to index into
# torch tensor (block_table). As a workaround, convert numpy arrays to torch
# tensor first, which recovers perf.
batch_indices_torch = torch.from_numpy(batch_indices)
block_indices_torch = torch.from_numpy(block_indices)
# Save as a lambda so we can return this for update_block_table
make_block_table = lambda block_table: block_table[
batch_indices_torch, block_indices_torch
].view(virtual_batches, -1)
block_table_local = make_block_table(block_table)
query_start_loc_cpu = torch.from_numpy(cu_seqlens_q_local)
seq_lens_cpu = torch.from_numpy(seqlens_k_local)
max_seq_len = int(seq_lens_cpu.max())
return CommonAttentionMetadata(
query_start_loc_cpu=query_start_loc_cpu,
query_start_loc=query_start_loc_cpu.to(device=device, non_blocking=True),
seq_lens=seq_lens_cpu.to(device=device, non_blocking=True),
num_reqs=len(seq_lens_cpu),
num_actual_tokens=common_attn_metadata.num_actual_tokens,
max_query_len=seqlens_q_local.max(),
max_seq_len=max_seq_len,
block_table_tensor=block_table_local,
slot_mapping=common_attn_metadata.slot_mapping,
causal=True,
_seq_lens_cpu=seq_lens_cpu,
_num_computed_tokens_cpu=torch.from_numpy(num_computed_tokens_local),
), make_block_table
def make_kv_sharing_fast_prefill_common_attn_metadata(
common_attn_metadata: CommonAttentionMetadata,
) -> CommonAttentionMetadata:
if common_attn_metadata.max_query_len == 1:
# All requests are decode (assume 1 token for now)
# Skip computing fast prefill path
return common_attn_metadata
assert common_attn_metadata.logits_indices_padded is not None
assert common_attn_metadata.num_logits_indices is not None
logits_indices_padded = common_attn_metadata.logits_indices_padded
num_logits_indices = common_attn_metadata.num_logits_indices
# Get rid of CUDAGraph padding, if any
logits_indices = logits_indices_padded[:num_logits_indices]
num_reqs = common_attn_metadata.num_reqs
query_start_loc = common_attn_metadata.query_start_loc
# Example inputs
# num_reqs: 3
# generation_indices: [14, 18, 19, 27]
# query_start_loc: [0, 15, 20, 28]
# seq_lens: [41, 31, 40]
# Find how many decode indices belong to each request
# request_ids: [0, 1, 1, 2]
request_ids = torch.bucketize(logits_indices, query_start_loc[1:], right=True)
# Figure out how many tokens are in each request
# num_decode_tokens: [1, 2, 1]
num_decode_tokens = torch.bincount(request_ids, minlength=num_reqs)
# Calculate new query_start_loc with tokens in generation_indices
# decode_query_start_loc: [0, 1, 3, 4]
decode_query_start_loc = torch.empty(
num_reqs + 1, device=query_start_loc.device, dtype=query_start_loc.dtype
)
decode_query_start_loc[0] = 0
decode_query_start_loc[1:] = torch.cumsum(num_decode_tokens, dim=0)
decode_max_query_len = int(num_decode_tokens.max().item())
total_num_decode_tokens = int(num_decode_tokens.sum().item())
common_attn_metadata = CommonAttentionMetadata(
query_start_loc=decode_query_start_loc,
query_start_loc_cpu=decode_query_start_loc.to("cpu", non_blocking=True),
seq_lens=common_attn_metadata.seq_lens,
num_reqs=num_reqs,
num_actual_tokens=total_num_decode_tokens,
max_query_len=decode_max_query_len,
max_seq_len=common_attn_metadata.max_seq_len,
block_table_tensor=common_attn_metadata.block_table_tensor,
slot_mapping=common_attn_metadata.slot_mapping,
causal=True,
_seq_lens_cpu=common_attn_metadata._seq_lens_cpu,
_num_computed_tokens_cpu=common_attn_metadata._num_computed_tokens_cpu,
)
return common_attn_metadata
def split_decodes_prefills_and_extends(
common_attn_metadata: CommonAttentionMetadata,
decode_threshold: int = 1,
) -> tuple[int, int, int, int, int, int]:
"""
Assuming a reordered batch, finds the boundary between prefill and decode
requests.
Args:
common_attn_metadata: CommonAttentionMetadata object containing the
batch metadata.
decode_threshold: The maximum query length to be considered a decode.
Returns:
num_decodes: The number of decode requests.
num_extends: The number of extend requests.
num_prefills: The number of prefill requests.
num_decode_tokens: The number of tokens in the decode requests.
num_extend_tokens: The number of tokens in the extend requests.
num_prefill_tokens: The number of tokens in the prefill requests.
"""
max_query_len = common_attn_metadata.max_query_len
num_reqs = common_attn_metadata.num_reqs
num_tokens = common_attn_metadata.num_actual_tokens
query_start_loc = common_attn_metadata.query_start_loc_cpu
seq_lens = common_attn_metadata.seq_lens_cpu
if max_query_len <= decode_threshold:
return num_reqs, 0, 0, num_tokens, 0, 0
query_lens = query_start_loc[1:] - query_start_loc[:-1]
is_prefill_or_extend = query_lens > decode_threshold
is_prefill = (seq_lens == query_lens) & is_prefill_or_extend
first_extend = is_prefill_or_extend.int().argmax(dim=-1).item()
first_prefill = is_prefill.int().argmax(dim=-1).item()
num_decodes = first_extend
num_decode_tokens = query_start_loc[first_extend].item()
if not torch.any(is_prefill_or_extend):
return (num_decodes, 0, 0, num_decode_tokens, 0, 0)
num_prefills_or_extends = num_reqs - num_decodes
num_prefill_or_extend_tokens = num_tokens - num_decode_tokens
if not torch.any(is_prefill):
return (
num_decodes,
num_prefills_or_extends,
0,
num_decode_tokens,
num_prefill_or_extend_tokens,
0,
)
num_extends = first_prefill - num_decodes
num_prefills = num_reqs - first_prefill
num_prefill_tokens = num_tokens - query_start_loc[first_prefill]
num_extend_tokens = num_prefill_or_extend_tokens - num_prefill_tokens
return (
num_decodes,
num_extends,
num_prefills,
num_decode_tokens,
num_extend_tokens,
num_prefill_tokens,
)
def split_decodes_and_prefills(
common_attn_metadata: CommonAttentionMetadata,
decode_threshold: int = 1,
require_uniform: bool = False,
) -> tuple[int, int, int, int]:
"""
Assuming a reordered batch, finds the boundary between prefill and decode
requests.
Args:
common_attn_metadata: CommonAttentionMetadata object containing the
batch metadata.
decode_threshold: The maximum query length to be considered a decode.
require_uniform: If True, requires that all decode requests have the
same query length. When set, some queries may be considered prefills
even if they are <= decode_threshold, in order to ensure uniformity.
Returns:
num_decodes: The number of decode requests.
num_prefills: The number of prefill requests.
num_decode_tokens: The number of tokens in the decode requests.
num_prefill_tokens: The number of tokens in the prefill requests.
"""
max_query_len = common_attn_metadata.max_query_len
num_reqs = common_attn_metadata.num_reqs
num_tokens = common_attn_metadata.num_actual_tokens
query_start_loc = common_attn_metadata.query_start_loc_cpu
if max_query_len <= decode_threshold and (
not require_uniform or decode_threshold <= 1
):
return num_reqs, 0, num_tokens, 0
query_lens = query_start_loc[1:] - query_start_loc[:-1]
if query_lens[0].item() > decode_threshold:
# first request is not decode, so no decode requests
return 0, num_reqs, 0, num_tokens
if require_uniform:
# check if we are in a padded uniform batch; this is used for full-CGs, some
# requests may have a query length of 0 but since they are padding its fine
# to treat them as decodes (ensures num_decodes matches the captured size)
if torch.all((query_lens == query_lens[0]) | (query_lens == 0)):
assert num_reqs * query_lens[0] == num_tokens, "tokens not padded correctly"
return num_reqs, 0, num_tokens, 0 # all decodes
is_prefill = query_lens != query_lens[0]
else:
is_prefill = query_lens > decode_threshold
if not torch.any(is_prefill):
return num_reqs, 0, num_tokens, 0
first_prefill = is_prefill.int().argmax(dim=-1).item()
assert torch.all(query_lens[:first_prefill] <= decode_threshold)
num_decodes = first_prefill
num_prefills = num_reqs - num_decodes
num_decode_tokens = query_start_loc[first_prefill].item()
num_prefill_tokens = num_tokens - num_decode_tokens
return (num_decodes, num_prefills, num_decode_tokens, num_prefill_tokens)
def split_prefill_chunks(
seq_lens_cpu: torch.Tensor, workspace_size: int, request_offset: int = 0
) -> list[tuple[int, int]]:
"""
Split the prefill requests into chunks such that the total sequence length
of each chunk is less than or equal to the workspace size.
Args:
seq_lens_cpu: The sequence lengths of the prefill requests on CPU.
workspace_size: The maximum workspace size (in tokens) per chunk.
request_offset: The offset to add to the request indices.
Returns:
A list of tuples of (reqs_start, reqs_end) representing chunk boundaries.
"""
chunk_bounds = []
i, n = 0, len(seq_lens_cpu)
assert torch.all(seq_lens_cpu <= workspace_size).item()
while i < n:
start, chunk_total = i, 0
while i < n and (chunk_total + (s := seq_lens_cpu[i].item())) <= workspace_size:
chunk_total += s
i += 1
chunk_bounds.append((start + request_offset, i + request_offset))
return chunk_bounds
def reorder_batch_to_split_decodes_and_prefills(
input_batch: "InputBatch",
scheduler_output: "SchedulerOutput",
decode_threshold: int = 1,
) -> bool:
"""
Reorders the batch to split into prefill and decode requests; places all
requests with <= decode_threshold tokens at the front of the batch.
Returns:
True if the batch was modified, False otherwise.
"""
# We now want to reorder the batch into decode → extend → prefill order
# where:
# decode: request with num_scheduled_tokens <= decode_threshold
# extend: non-decode request with existing context
# prefill: non-decode request with no existing context
# NOTE for now we loosely use "decode" to mean requests where attention is
# likely memory-bound and "prefill" to mean requests where attention is
# likely compute-bound,
num_reqs = len(input_batch.req_ids)
num_scheduled_tokens = [
scheduler_output.num_scheduled_tokens[id] for id in input_batch.req_ids
]
num_scheduled_tokens_np = np.array(num_scheduled_tokens)
num_computed_tokens_np = input_batch.num_computed_tokens_cpu[:num_reqs]
is_prefill = num_computed_tokens_np == 0
is_decode = (num_scheduled_tokens_np <= decode_threshold) & (~is_prefill)
is_extend = (num_scheduled_tokens_np > decode_threshold) & (~is_prefill)
# Desired order: decode → extend → prefill
req_regions = np.zeros(is_decode.shape, dtype=np.int32) # 0 = decode by default
req_regions[is_extend] = 1
req_regions[is_prefill] = 2
num_decodes = int(is_decode.sum())
num_extends = int(is_extend.sum())
target_regions = np.zeros(num_reqs, dtype=np.int32)
target_regions[num_decodes : num_decodes + num_extends] = 1
target_regions[num_decodes + num_extends :] = 2
needs_swap = req_regions != target_regions
if not needs_swap.any():
return False
# Extract indices that need swapping and sort by target region
orig_indices = np.where(needs_swap)[0]
sorted_order = np.argsort(req_regions[needs_swap], kind="stable")
src_indices = orig_indices[sorted_order]
src_dest_map = {int(src): int(dst) for src, dst in zip(src_indices, orig_indices)}
for src in src_dest_map:
dst = src_dest_map[src]
while src != dst:
input_batch.swap_states(src, dst)
# Mark dst as done by updating its destination to itself
next_dst = src_dest_map.get(dst, dst)
src_dest_map[dst] = dst
dst = next_dst
return True
def reshape_query_for_spec_decode(query: torch.Tensor, batch_size: int) -> torch.Tensor:
"""
Reshapes the query tensor for the specified batch size, so that
it has shape (batch_size, seq_len, num_heads, head_dim).
"""
assert query.dim() == 3, f"query must be 3D, got {query.dim()}D"
total_tokens = query.shape[0]
num_heads = query.shape[1]
head_dim = query.shape[2]
assert total_tokens % batch_size == 0, (
f"{total_tokens=} is not divisible by {batch_size=}"
)
seq_len = total_tokens // batch_size
return query.view(batch_size, seq_len, num_heads, head_dim)
def reshape_attn_output_for_spec_decode(attn_output: torch.Tensor) -> torch.Tensor:
"""
Reshapes the attention output tensor, so that
the batch_size and seq_len dimensions are combined.
"""
if attn_output.dim() == 3:
# Already in the correct shape
return attn_output
assert attn_output.dim() == 4, f"attn_output must be 4D, got {attn_output.dim()}D"
total_tokens = attn_output.shape[0] * attn_output.shape[1]
return attn_output.view(total_tokens, attn_output.shape[2], attn_output.shape[3])
def subclass_attention_metadata(
name_prefix: str,
metadata_cls: Any,
fields: list[tuple[str, Any, Any]],
) -> Any:
"""
Return a new subclass of `metadata_cls` with additional fields
"""
name: str = name_prefix + metadata_cls.__name__ # type: ignore
Wrapped = make_dataclass(name, fields, bases=(metadata_cls,))
return Wrapped
@runtime_checkable
class KVSharingFastPrefillMetadata(Protocol):
logits_indices_padded: torch.Tensor | None = None
num_logits_indices: int | None = None
def create_fast_prefill_custom_backend(
prefix: str,
underlying_attn_backend: type[AttentionBackend],
) -> type[AttentionBackend]:
underlying_builder = underlying_attn_backend.get_builder_cls()
class FastPrefillAttentionBuilder(underlying_builder): # type: ignore
def build(
self,
common_prefix_len: int,
common_attn_metadata: CommonAttentionMetadata,
fast_build: bool = False,
) -> AttentionMetadata:
new_common_attn_metadata = (
make_kv_sharing_fast_prefill_common_attn_metadata(common_attn_metadata)
)
metadata = super().build(
common_prefix_len, new_common_attn_metadata, fast_build
)
class KVSharingFastPrefillAttentionMetadata(
metadata.__class__, # type: ignore
KVSharingFastPrefillMetadata,
):
def __init__(self, metadata, common_attn_metadata):
# Shallow copy all fields in metadata cls
for _field in fields(metadata.__class__):
setattr(self, _field.name, getattr(metadata, _field.name))
self.logits_indices_padded = (
common_attn_metadata.logits_indices_padded
)
self.num_logits_indices = common_attn_metadata.num_logits_indices
return KVSharingFastPrefillAttentionMetadata(metadata, common_attn_metadata)
attn_backend = subclass_attention_backend(
name_prefix=prefix,
attention_backend_cls=underlying_attn_backend,
builder_cls=FastPrefillAttentionBuilder,
)
return attn_backend
def compute_causal_conv1d_metadata(
query_start_loc_p_cpu: torch.Tensor,
*,
device: torch.device,
):
# Needed for causal_conv1d. Use the CPU query_start_loc to avoid DtoH sync.
assert query_start_loc_p_cpu.device.type == "cpu"
seqlens = query_start_loc_p_cpu.diff()
nums_dict = {} # type: ignore
batch_ptr = None
token_chunk_offset_ptr = None
for BLOCK_M in [8]: # cover all BLOCK_M values
nums = -(-seqlens // BLOCK_M)
nums_dict[BLOCK_M] = {}
nums_dict[BLOCK_M]["nums"] = nums
nums_dict[BLOCK_M]["tot"] = nums.sum().item()
mlist = torch.from_numpy(np.repeat(np.arange(len(nums)), nums))
nums_dict[BLOCK_M]["mlist"] = mlist
mlist_len = len(nums_dict[BLOCK_M]["mlist"])
nums_dict[BLOCK_M]["mlist_len"] = mlist_len
MAX_NUM_PROGRAMS = max(1024, mlist_len) * 2
offsetlist = [] # type: ignore
for idx, num in enumerate(nums):
offsetlist.extend(range(num))
offsetlist = torch.tensor(offsetlist, dtype=torch.int32)
nums_dict[BLOCK_M]["offsetlist"] = offsetlist
if batch_ptr is None:
# Update default value after class definition
batch_ptr = torch.full(
(MAX_NUM_PROGRAMS,), PAD_SLOT_ID, dtype=torch.int32, device=device
)
token_chunk_offset_ptr = torch.full(
(MAX_NUM_PROGRAMS,), PAD_SLOT_ID, dtype=torch.int32, device=device
)
else:
if batch_ptr.nelement() < MAX_NUM_PROGRAMS:
batch_ptr.resize_(MAX_NUM_PROGRAMS).fill_(PAD_SLOT_ID)
token_chunk_offset_ptr.resize_( # type: ignore
MAX_NUM_PROGRAMS
).fill_(PAD_SLOT_ID)
batch_ptr[0:mlist_len].copy_(mlist, non_blocking=True)
token_chunk_offset_ptr[ # type: ignore
0:mlist_len
].copy_(offsetlist, non_blocking=True)
nums_dict[BLOCK_M]["batch_ptr"] = batch_ptr
nums_dict[BLOCK_M]["token_chunk_offset_ptr"] = token_chunk_offset_ptr # type: ignore
return nums_dict, batch_ptr, token_chunk_offset_ptr
def get_dcp_local_seq_lens(
seq_lens: torch.Tensor,
dcp_size: int = 1,
dcp_rank: int | None = None,
cp_kv_cache_interleave_size: int = 1,
) -> torch.Tensor:
"""While using dcp, kv_cache size stored on each rank may be different,
use this function to calculate split decode seq_lens of each dcp rank.
Only consider dcp now, we can extend the case of cp based on this.
"""
num_requests = seq_lens.size(0)
if dcp_rank is None:
rank_offsets = (
torch.arange(dcp_size, dtype=torch.int32, device=seq_lens.device)
.unsqueeze(0)
.repeat(num_requests, 1)
)
else:
rank_offsets = torch.tensor(
[[dcp_rank]], dtype=torch.int32, device=seq_lens.device
)
seq_lens_tiled = (
seq_lens.to(torch.int32).unsqueeze(-1).repeat(1, rank_offsets.shape[1])
)
base = (
seq_lens_tiled
// cp_kv_cache_interleave_size
// dcp_size
* cp_kv_cache_interleave_size
)
remainder = seq_lens_tiled - base * dcp_size
remainder = torch.clip(
remainder - rank_offsets * cp_kv_cache_interleave_size,
0,
cp_kv_cache_interleave_size,
)
dcp_local_seq_lens = base + remainder
return dcp_local_seq_lens.squeeze(1)
def mamba_get_block_table_tensor(
block_table: torch.Tensor,
seq_lens: torch.Tensor,
kv_cache_spec: KVCacheSpec,
mamba_cache_mode: str,
) -> torch.Tensor:
"""
Get the block table tensor for mamba kernels from the input
common_attn_metadata.block_table_tensor given different mamba cache modes.
- "all": input (#requests, cdiv(max_model_len, block_size));
output (#requests, cdiv(max_model_len, block_size)).
- "none": input (#requests, 1 + num_speculative_blocks);
output (#requests, 1 + num_speculative_blocks).
- "align": input (#requests, cdiv(max_model_len, block_size));
output (#requests, 1 + num_speculative_blocks), which are the last
1 + num_speculative_blocks of each request.
"""
if mamba_cache_mode in ("all", "none"):
return block_table
else:
assert isinstance(kv_cache_spec, MambaSpec)
# NOTE: For 0-length requests in CUDA graph, use a start_index of 0
# to handle the invalid block table.
start_indices = torch.clamp(
(seq_lens - 1) // kv_cache_spec.block_size,
min=0,
)
# Use int32 for arithmetic to avoid dtype promotion overhead,
# then convert to int64 for gather (which requires Long indices)
offsets = torch.arange(
1 + kv_cache_spec.num_speculative_blocks,
device=block_table.device,
dtype=torch.int32,
)
indices_to_gather = (start_indices.unsqueeze(1) + offsets).to(torch.int64)
return torch.gather(block_table, 1, indices_to_gather)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/attention/backends/utils.py",
"license": "Apache License 2.0",
"lines": 750,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/test_triton_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import sys
import types
from unittest import mock
from vllm.triton_utils.importing import TritonLanguagePlaceholder, TritonPlaceholder
def test_triton_placeholder_is_module():
triton = TritonPlaceholder()
assert isinstance(triton, types.ModuleType)
assert triton.__name__ == "triton"
def test_triton_language_placeholder_is_module():
triton_language = TritonLanguagePlaceholder()
assert isinstance(triton_language, types.ModuleType)
assert triton_language.__name__ == "triton.language"
def test_triton_placeholder_decorators():
triton = TritonPlaceholder()
@triton.jit
def foo(x):
return x
@triton.autotune
def bar(x):
return x
@triton.heuristics
def baz(x):
return x
assert foo(1) == 1
assert bar(2) == 2
assert baz(3) == 3
def test_triton_placeholder_decorators_with_args():
triton = TritonPlaceholder()
@triton.jit(debug=True)
def foo(x):
return x
@triton.autotune(configs=[], key="x")
def bar(x):
return x
@triton.heuristics({"BLOCK_SIZE": lambda args: 128 if args["x"] > 1024 else 64})
def baz(x):
return x
assert foo(1) == 1
assert bar(2) == 2
assert baz(3) == 3
def test_triton_placeholder_language():
lang = TritonLanguagePlaceholder()
assert isinstance(lang, types.ModuleType)
assert lang.__name__ == "triton.language"
assert lang.constexpr is None
assert lang.dtype is None
assert lang.int64 is None
assert lang.int32 is None
assert lang.tensor is None
def test_triton_placeholder_language_from_parent():
triton = TritonPlaceholder()
lang = triton.language
assert isinstance(lang, TritonLanguagePlaceholder)
def test_no_triton_fallback():
# clear existing triton modules
sys.modules.pop("triton", None)
sys.modules.pop("triton.language", None)
sys.modules.pop("vllm.triton_utils", None)
sys.modules.pop("vllm.triton_utils.importing", None)
# mock triton not being installed
with mock.patch.dict(sys.modules, {"triton": None}):
from vllm.triton_utils import HAS_TRITON, tl, triton
assert HAS_TRITON is False
assert triton.__class__.__name__ == "TritonPlaceholder"
assert triton.language.__class__.__name__ == "TritonLanguagePlaceholder"
assert tl.__class__.__name__ == "TritonLanguagePlaceholder"
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/test_triton_utils.py",
"license": "Apache License 2.0",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_executor/models/granitemoehybrid.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Inference-only GraniteMoeHybrid model."""
# Added by the IBM Team, 2025
from collections.abc import Iterable
import torch
from torch import nn
from transformers import GraniteMoeHybridConfig
from vllm.compilation.decorators import support_torch_compile
from vllm.config import CacheConfig, ModelConfig, VllmConfig
from vllm.distributed import get_tensor_model_parallel_world_size
from vllm.distributed.parallel_state import get_pp_group
from vllm.model_executor.layers.attention import Attention
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.linear import QKVParallelLinear, RowParallelLinear
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.mamba.mamba_mixer2 import MambaMixer2
from vllm.model_executor.layers.mamba.mamba_utils import (
MambaStateCopyFunc,
MambaStateCopyFuncCalculator,
MambaStateDtypeCalculator,
MambaStateShapeCalculator,
)
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.rotary_embedding import get_rope
from vllm.model_executor.layers.vocab_parallel_embedding import (
ParallelLMHead,
VocabParallelEmbedding,
)
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
from vllm.sequence import IntermediateTensors
from .granitemoe import GraniteMoeMoE
from .granitemoeshared import GraniteMoeSharedMLP
from .interfaces import (
HasInnerState,
IsHybrid,
SupportsLoRA,
SupportsMambaPrefixCaching,
SupportsPP,
SupportsQuant,
)
from .utils import (
AutoWeightsLoader,
is_pp_missing_parameter,
make_empty_intermediate_tensors_factory,
make_layers,
maybe_prefix,
)
class GraniteMoeHybridMambaDecoderLayer(nn.Module):
def __init__(
self,
config: GraniteMoeHybridConfig,
layer_idx: int,
model_config: ModelConfig | None = None,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.residual_multiplier = config.residual_multiplier
self.mamba = MambaMixer2(
hidden_size=config.hidden_size,
ssm_state_size=config.mamba_d_state,
conv_kernel_size=config.mamba_d_conv,
intermediate_size=config.mamba_expand * config.hidden_size,
use_conv_bias=config.mamba_conv_bias,
use_bias=config.mamba_proj_bias,
n_groups=config.mamba_n_groups,
num_heads=config.mamba_n_heads,
head_dim=config.mamba_d_head,
rms_norm_eps=config.rms_norm_eps,
activation=config.hidden_act,
model_config=model_config,
cache_config=cache_config,
quant_config=quant_config,
prefix=f"{prefix}.mixer",
)
self.block_sparse_moe = None
if getattr(config, "num_local_experts", 0) > 0:
self.block_sparse_moe = GraniteMoeMoE(
num_experts=config.num_local_experts,
top_k=config.num_experts_per_tok,
hidden_size=config.hidden_size,
intermediate_size=config.intermediate_size,
quant_config=quant_config,
prefix=f"{prefix}.block_sparse_moe",
)
self.shared_mlp = (
None
if getattr(config, "shared_intermediate_size", 0) == 0
else GraniteMoeSharedMLP(
config, quant_config=quant_config, prefix=f"{prefix}.shared_mlp"
)
)
self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = RMSNorm(
config.hidden_size, eps=config.rms_norm_eps
)
def forward(
self,
hidden_states: torch.Tensor,
residual: torch.Tensor | None,
**kwargs,
):
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
output = self.mamba(hidden_states)
hidden_states = residual + output * self.residual_multiplier
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
if self.shared_mlp is None:
if self.block_sparse_moe is not None:
hidden_states = self.block_sparse_moe(hidden_states)
# else: skip
else:
# create a copy since block_sparse_moe modifies in-place
if self.block_sparse_moe is not None:
moe_hidden_states = hidden_states.clone()
moe_hidden_states = self.block_sparse_moe(moe_hidden_states)
hidden_states = moe_hidden_states + self.shared_mlp(hidden_states)
del moe_hidden_states
else:
hidden_states = self.shared_mlp(hidden_states)
hidden_states = residual + hidden_states * self.residual_multiplier
return hidden_states, residual
class GraniteMoeHybridAttentionDecoderLayer(nn.Module):
def __init__(
self,
config: GraniteMoeHybridConfig,
layer_idx: int,
model_config: ModelConfig | None = None,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
self.hidden_size = config.hidden_size
self.residual_multiplier = config.residual_multiplier
self.self_attn = GraniteMoeHybridAttention(
config,
cache_config=cache_config,
quant_config=quant_config,
prefix=f"{prefix}.self_attn",
)
self.block_sparse_moe = None
if getattr(config, "num_local_experts", 0) > 0:
self.block_sparse_moe = GraniteMoeMoE(
num_experts=config.num_local_experts,
top_k=config.num_experts_per_tok,
hidden_size=config.hidden_size,
intermediate_size=config.intermediate_size,
quant_config=quant_config,
prefix=f"{prefix}.block_sparse_moe",
)
self.shared_mlp = (
None
if getattr(config, "shared_intermediate_size", 0) == 0
else GraniteMoeSharedMLP(
config, quant_config=quant_config, prefix=f"{prefix}.shared_mlp"
)
)
self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = RMSNorm(
config.hidden_size, eps=config.rms_norm_eps
)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
residual: torch.Tensor | None,
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states = self.self_attn(
positions=positions,
hidden_states=hidden_states,
)
hidden_states = residual + hidden_states * self.residual_multiplier
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
if self.shared_mlp is None:
if self.block_sparse_moe is not None:
hidden_states = self.block_sparse_moe(hidden_states)
# else: skip
else:
# create a copy since block_sparse_moe modifies in-place
if self.block_sparse_moe is not None:
moe_hidden_states = hidden_states.clone()
moe_hidden_states = self.block_sparse_moe(moe_hidden_states)
hidden_states = moe_hidden_states + self.shared_mlp(hidden_states)
del moe_hidden_states
else:
hidden_states = self.shared_mlp(hidden_states)
hidden_states = residual + hidden_states * self.residual_multiplier
return hidden_states, residual
class GraniteMoeHybridAttention(nn.Module):
def __init__(
self,
config: GraniteMoeHybridConfig,
model_config: ModelConfig | None = None,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
self.causal = True
self.hidden_size = config.hidden_size
self.attention_bias = config.attention_bias
self.attention_multiplier = config.attention_multiplier
self.total_num_heads = config.num_attention_heads
self.head_dim = self.hidden_size // self.total_num_heads
self.total_num_kv_heads = config.num_key_value_heads
# TensorParallel logic
tp_size = get_tensor_model_parallel_world_size()
assert self.total_num_heads % tp_size == 0
self.num_heads = self.total_num_heads // tp_size
if self.total_num_kv_heads >= tp_size:
# Number of KV heads is greater than TP size, so we partition
# the KV heads across multiple tensor parallel GPUs.
assert self.total_num_kv_heads % tp_size == 0
else:
# Number of KV heads is less than TP size, so we replicate
# the KV heads across multiple tensor parallel GPUs.
assert tp_size % self.total_num_kv_heads == 0
self.num_key_value_heads = max(1, self.total_num_kv_heads // tp_size)
self.qkv_proj = QKVParallelLinear(
self.hidden_size,
self.head_dim,
self.total_num_heads,
self.total_num_kv_heads,
bias=self.attention_bias,
quant_config=quant_config,
prefix=f"{prefix}.qkv_proj",
)
self.o_proj = RowParallelLinear(
self.hidden_size,
self.hidden_size,
bias=self.attention_bias,
quant_config=quant_config,
prefix=f"{prefix}.o_proj",
)
if config.position_embedding_type == "rope":
self.rotary_emb = get_rope(
self.head_dim,
max_position=config.max_position_embeddings,
rope_parameters=config.rope_parameters,
is_neox_style=True,
)
else:
self.rotary_emb = None
self.attn = Attention(
self.num_heads,
self.head_dim,
self.attention_multiplier,
num_kv_heads=self.num_key_value_heads,
cache_config=cache_config,
quant_config=quant_config,
prefix=f"{prefix}.attn",
)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
) -> torch.Tensor:
qkv, _ = self.qkv_proj(hidden_states)
query, key, value = qkv.split(
[
self.num_heads * self.head_dim,
self.num_key_value_heads * self.head_dim,
self.num_key_value_heads * self.head_dim,
],
dim=-1,
)
if self.rotary_emb is not None:
query, key = self.rotary_emb(positions, query, key)
hidden_states = self.attn(query, key, value)
del query, key, value
hidden_states = self.o_proj(hidden_states)[0]
return hidden_states
ALL_DECODER_LAYER_TYPES = {
"attention": GraniteMoeHybridAttentionDecoderLayer,
"mamba": GraniteMoeHybridMambaDecoderLayer,
}
@support_torch_compile
class GraniteMoeHybridModel(nn.Module):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
model_config = vllm_config.model_config
cache_config = vllm_config.cache_config
quant_config = vllm_config.quant_config
self.config = config
self.quant_config = quant_config
self.vocab_size = config.vocab_size
self.embed_tokens = VocabParallelEmbedding(
self.vocab_size,
config.hidden_size,
)
self.embedding_multiplier = config.embedding_multiplier
def get_layer(prefix: str):
layer_idx = int(prefix.rsplit(".", 1)[1])
layer_class = ALL_DECODER_LAYER_TYPES[config.layer_types[layer_idx]]
return layer_class(
config,
layer_idx,
model_config,
cache_config,
quant_config=quant_config,
prefix=prefix,
)
self.start_layer, self.end_layer, self.layers = make_layers(
config.num_hidden_layers, get_layer, prefix=f"{prefix}.layers"
)
self.make_empty_intermediate_tensors = make_empty_intermediate_tensors_factory(
["hidden_states", "residual"], config.hidden_size
)
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.embed_tokens(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor:
if get_pp_group().is_first_rank:
if inputs_embeds is not None:
hidden_states = inputs_embeds
else:
hidden_states = self.embed_input_ids(input_ids)
hidden_states *= self.embedding_multiplier
residual = None
else:
if intermediate_tensors is None:
raise RuntimeError("Intermediate tensors may not be None!")
hidden_states = intermediate_tensors["hidden_states"]
residual = intermediate_tensors["residual"]
num_attn = 0
for i, layer in enumerate(self.layers):
if isinstance(layer, GraniteMoeHybridAttentionDecoderLayer):
num_attn += 1
hidden_states, residual = layer(
positions=positions, hidden_states=hidden_states, residual=residual
)
if not get_pp_group().is_last_rank:
return IntermediateTensors(
{"hidden_states": hidden_states, "residual": residual}
)
hidden_states = self.norm(hidden_states)
return hidden_states
def get_expert_mapping(self) -> list[tuple[str, str, int, str]]:
# Params for weights, fp8 weight scales, fp8 activation scales
# (param_name, weight_name, expert_id, shard_id)
# layers.0.block_sparse_moe.expert_0.input_linear.input_scale
ckpt_gate_proj_name = "gate_proj"
ckpt_down_proj_name = "down_proj"
ckpt_up_proj_name = "up_proj"
num_experts = self.config.num_local_experts
return [
# (param_name, weight_name, expert_id, shard_id)
(
"block_sparse_moe.experts.w13_"
if weight_name in [ckpt_gate_proj_name, ckpt_up_proj_name]
else "block_sparse_moe.experts.w2_",
f"block_sparse_moe.experts.{expert_id}.{weight_name}.",
expert_id,
shard_id,
)
for expert_id in range(num_experts)
for shard_id, weight_name in [
("w1", ckpt_gate_proj_name),
("w2", ckpt_down_proj_name),
("w3", ckpt_up_proj_name),
]
]
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
(".qkv_proj", ".q_proj", "q"),
(".qkv_proj", ".k_proj", "k"),
(".qkv_proj", ".v_proj", "v"),
]
params_dict = dict(self.named_parameters())
loaded_params: set[str] = set()
expert_params_mapping = self.get_expert_mapping()
def _load(n, p):
param = params_dict[n]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, p)
loaded_params.add(n)
def _load_shard(n, p, shard_id):
# Skip layers on other devices.
if not is_pp_missing_parameter(n, self):
param = params_dict[n]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, p, shard_id)
loaded_params.add(n)
def _load_expert(n, p, name, shard_id, expert_id):
param = params_dict[n]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, p, name, shard_id=shard_id, expert_id=expert_id)
loaded_params.add(n)
def _load_quant_expert(name, loaded_weight):
for mapping in expert_params_mapping:
param_name, weight_name, expert_id, shard_id = mapping
if weight_name not in name:
continue
name_mapped = name.replace(weight_name, param_name)
# Skip layers on other devices.
if is_pp_missing_parameter(name_mapped, self):
continue
param = params_dict[name_mapped]
weight_loader = param.weight_loader
success = False
if weight_loader is not None:
success = weight_loader(
param,
loaded_weight,
name_mapped,
shard_id=shard_id,
expert_id=expert_id,
return_success=True,
)
if success:
return name_mapped
return None
for n, p in weights:
if "A_log" in n:
n = n.replace("A_log", "A")
if self.quant_config is not None and (
scale_name := self.quant_config.get_cache_scale(n)
):
# Loading kv cache quantization scales
loaded_weight = p
loaded_weight = (
loaded_weight if loaded_weight.dim() == 0 else loaded_weight[0]
)
_load(scale_name, loaded_weight)
loaded_params.add(scale_name)
continue
if _load_quant_expert(n, p):
continue
# Logic analogous to: https://github.com/vllm-project/vllm/blob/f49e5aff11c986ed4d45202b1716c5d74786efa9/vllm/model_executor/models/granitemoeshared.py#L215
# Mapping different experts' layout:
# from HF (input_linear, output_linear, router)
# to vLLM (experts_w13({e}.w1, {e}.w2), experts_w3({e}.w3), gate)
# The renaming and parameter loading logic is the same for weight
# and weight_scale tensors so we can reuse them without issues.
if n.endswith(".block_sparse_moe.input_linear.weight") or n.endswith(
".block_sparse_moe.input_linear.weight_scale"
):
for e in range(p.size(0)):
w1_name = n.replace(
".block_sparse_moe.input_linear.weight",
f".block_sparse_moe.experts.{e}.w1.weight",
)
w3_name = n.replace(
".block_sparse_moe.input_linear.weight",
f".block_sparse_moe.experts.{e}.w3.weight",
)
w1_param, w3_param = p[e].chunk(2, dim=0)
_load_expert(
n.replace(".input_linear.", ".experts.w13_"),
w1_param,
w1_name,
shard_id="w1",
expert_id=e,
)
_load_expert(
n.replace(".input_linear.", ".experts.w13_"),
w3_param,
w3_name,
shard_id="w3",
expert_id=e,
)
elif n.endswith(".block_sparse_moe.output_linear.weight") or n.endswith(
".block_sparse_moe.output_linear.weight_scale"
):
for e in range(p.size(0)):
w2_name = n.replace(
".block_sparse_moe.output_linear.weight",
f".block_sparse_moe.experts.{e}.w2.weight",
)
w2_param = p[e]
_load_expert(
n.replace(".output_linear.", ".experts.w2_"),
w2_param,
w2_name,
shard_id="w2",
expert_id=e,
)
elif n.endswith(".block_sparse_moe.router.layer.weight"):
gate_name = n.replace(
".block_sparse_moe.router.layer.weight",
".block_sparse_moe.gate.weight",
)
_load(gate_name, p)
else:
loaded = False
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name in n:
_load_shard(
n.replace(weight_name, param_name), p, shard_id=shard_id
)
loaded = True
if not loaded:
_load(n, p)
return loaded_params
class GraniteMoeHybridForCausalLM(
nn.Module,
HasInnerState,
SupportsLoRA,
SupportsPP,
IsHybrid,
SupportsQuant,
SupportsMambaPrefixCaching,
):
packed_modules_mapping = {
"qkv_proj": [
"q_proj",
"k_proj",
"v_proj",
],
"conv1d": ["conv1d"],
"in_proj": ["in_proj"],
"input_linear": ["input_linear"],
}
embedding_modules = {
"embed_tokens": "input_embeddings",
"lm_head": "output_embeddings",
}
@classmethod
def get_mamba_state_dtype_from_config(
cls,
vllm_config: "VllmConfig",
) -> tuple[torch.dtype, torch.dtype]:
return MambaStateDtypeCalculator.mamba2_state_dtype(
vllm_config.model_config.dtype,
vllm_config.cache_config.mamba_cache_dtype,
vllm_config.cache_config.mamba_ssm_cache_dtype,
)
@classmethod
def get_mamba_state_shape_from_config(
cls,
vllm_config: "VllmConfig",
) -> tuple[tuple[int, int], tuple[int, int, int]]:
"""Calculate shapes for Mamba's convolutional and state caches.
Args:
vllm_config: vLLM config
Returns:
Tuple containing:
- conv_state_shape: Shape for convolutional state cache
- temporal_state_shape: Shape for state space model cache
"""
parallel_config = vllm_config.parallel_config
hf_config = vllm_config.model_config.hf_config
intermediate_size = hf_config.mamba_expand * hf_config.hidden_size
return MambaStateShapeCalculator.mamba2_state_shape(
intermediate_size=intermediate_size,
tp_world_size=parallel_config.tensor_parallel_size,
n_groups=hf_config.mamba_n_groups,
num_heads=hf_config.mamba_n_heads,
head_dim=hf_config.mamba_d_head,
state_size=hf_config.mamba_d_state,
conv_kernel=hf_config.mamba_d_conv,
)
@classmethod
def get_mamba_state_copy_func(cls) -> tuple[MambaStateCopyFunc, MambaStateCopyFunc]:
return MambaStateCopyFuncCalculator.mamba2_state_copy_func()
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
self.vllm_config = vllm_config
self.model_config = vllm_config.model_config
scheduler_config = vllm_config.scheduler_config
self.quant_config = vllm_config.quant_config
self.config = config
self.scheduler_config = scheduler_config
self.model = GraniteMoeHybridModel(
vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")
)
self.lm_head = ParallelLMHead(
config.vocab_size,
config.hidden_size,
quant_config=self.quant_config,
prefix=maybe_prefix(prefix, "lm_head"),
)
if config.tie_word_embeddings:
self.lm_head.weight = self.model.embed_tokens.weight
self.logits_processor = LogitsProcessor(
config.vocab_size,
config.vocab_size,
scale=1 / self.config.logits_scaling,
)
self.make_empty_intermediate_tensors = (
self.model.make_empty_intermediate_tensors
)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.model.embed_input_ids(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
**kwargs,
):
hidden_states = self.model(
input_ids, positions, intermediate_tensors, inputs_embeds
)
return hidden_states
def compute_logits(
self,
hidden_states: torch.Tensor,
) -> torch.Tensor | None:
logits = self.logits_processor(self.lm_head, hidden_states)
return logits
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
loader = AutoWeightsLoader(self)
return loader.load_weights(weights)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/granitemoehybrid.py",
"license": "Apache License 2.0",
"lines": 623,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:benchmarks/kernels/benchmark_moe_permute_unpermute.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import argparse
from typing import Any, TypedDict
import ray
import torch
from transformers import AutoConfig
from vllm.model_executor.layers.fused_moe import fused_topk
from vllm.model_executor.layers.fused_moe.moe_permute_unpermute import (
moe_permute,
moe_unpermute,
)
from vllm.model_executor.layers.fused_moe.utils import _fp8_quantize
from vllm.platforms import current_platform
from vllm.utils.argparse_utils import FlexibleArgumentParser
from vllm.utils.torch_utils import set_random_seed
FP8_DTYPE = current_platform.fp8_dtype()
class BenchmarkConfig(TypedDict):
BLOCK_SIZE_M: int
BLOCK_SIZE_N: int
BLOCK_SIZE_K: int
GROUP_SIZE_M: int
num_warps: int
num_stages: int
def benchmark_permute(
num_tokens: int,
num_experts: int,
hidden_size: int,
topk: int,
dtype: torch.dtype,
use_fp8_w8a8: bool,
use_int8_w8a16: bool,
num_iters: int = 100,
) -> float:
# init_dtype = torch.float16 if use_fp8_w8a8 else dtype
hidden_states = torch.randn(num_tokens, hidden_size, dtype=dtype)
# output_hidden_states = torch.empty_like(hidden_states)
if use_fp8_w8a8:
qhidden_states, scale = _fp8_quantize(hidden_states, None, None)
else:
qhidden_states = hidden_states
gating_output = torch.randn(num_iters, num_tokens, num_experts, dtype=torch.float32)
input_gating = torch.randn(num_tokens, num_experts, dtype=torch.float32)
topk_weights, topk_ids, token_expert_indices = fused_topk(
qhidden_states, input_gating, topk, False
)
def prepare(i: int):
input_gating.copy_(gating_output[i])
def run():
moe_permute(
qhidden_states,
a1q_scale=None,
topk_ids=topk_ids,
n_expert=num_experts,
expert_map=None,
)
# JIT compilation & warmup
run()
torch.cuda.synchronize()
# Capture 10 invocations with CUDA graph
graph = torch.cuda.CUDAGraph()
with torch.cuda.graph(graph):
for _ in range(10):
run()
torch.cuda.synchronize()
# Warmup
for _ in range(5):
graph.replay()
torch.cuda.synchronize()
start_event = torch.Event(enable_timing=True)
end_event = torch.Event(enable_timing=True)
latencies: list[float] = []
for i in range(num_iters):
prepare(i)
torch.cuda.synchronize()
start_event.record()
graph.replay()
end_event.record()
end_event.synchronize()
latencies.append(start_event.elapsed_time(end_event))
avg = sum(latencies) / (num_iters * 10) * 1000 # us
graph.reset()
return avg
def benchmark_unpermute(
num_tokens: int,
num_experts: int,
hidden_size: int,
topk: int,
dtype: torch.dtype,
use_fp8_w8a8: bool,
use_int8_w8a16: bool,
num_iters: int = 100,
) -> float:
# init_dtype = torch.float16 if use_fp8_w8a8 else dtype
hidden_states = torch.randn(num_tokens, hidden_size, dtype=dtype)
if use_fp8_w8a8:
qhidden_states, scale = _fp8_quantize(hidden_states, None, None)
else:
qhidden_states = hidden_states
input_gating = torch.randn(num_tokens, num_experts, dtype=torch.float32)
topk_weights, topk_ids, token_expert_indices = fused_topk(
qhidden_states, input_gating, topk, False
)
def prepare():
(
permuted_hidden_states,
_,
first_token_off,
inv_perm_idx,
_,
) = moe_permute(
qhidden_states,
a1q_scale=None,
topk_ids=topk_ids,
n_expert=num_experts,
expert_map=None,
)
# convert to fp16/bf16 as gemm output
return (
permuted_hidden_states.to(dtype),
first_token_off,
inv_perm_idx,
)
def run(input: tuple):
(permuted_hidden_states, first_token_off, inv_perm_idx) = input
output = torch.empty_like(hidden_states)
moe_unpermute(
output,
permuted_hidden_states,
topk_weights,
inv_perm_idx,
first_token_off,
)
# JIT compilation & warmup
input = prepare()
run(input)
torch.cuda.synchronize()
# Capture 10 invocations with CUDA graph
graph = torch.cuda.CUDAGraph()
with torch.cuda.graph(graph):
for _ in range(10):
run(input)
torch.cuda.synchronize()
# Warmup
for _ in range(5):
graph.replay()
torch.cuda.synchronize()
start_event = torch.Event(enable_timing=True)
end_event = torch.Event(enable_timing=True)
latencies: list[float] = []
for i in range(num_iters):
torch.cuda.synchronize()
start_event.record()
graph.replay()
end_event.record()
end_event.synchronize()
latencies.append(start_event.elapsed_time(end_event))
avg = sum(latencies) / (num_iters * 10) * 1000 # us
graph.reset()
return avg
@ray.remote(num_gpus=1)
class BenchmarkWorker:
def __init__(self, seed: int) -> None:
torch.set_default_device("cuda")
set_random_seed(seed)
self.seed = seed
# Get the device ID to allocate tensors and kernels
# on the respective GPU. This is required for Ray to work
# correctly with multi-GPU tuning on the ROCm platform.
self.device_id = int(ray.get_gpu_ids()[0])
def benchmark(
self,
num_tokens: int,
num_experts: int,
hidden_size: int,
topk: int,
dtype: torch.dtype,
use_fp8_w8a8: bool,
use_int8_w8a16: bool,
) -> tuple[float, float]:
set_random_seed(self.seed)
permute_time = benchmark_permute(
num_tokens,
num_experts,
hidden_size,
topk,
dtype,
use_fp8_w8a8,
use_int8_w8a16,
num_iters=100,
)
unpermute_time = benchmark_unpermute(
num_tokens,
num_experts,
hidden_size,
topk,
dtype,
use_fp8_w8a8,
use_int8_w8a16,
num_iters=100,
)
return permute_time, unpermute_time
def get_weight_block_size_safety(config, default_value=None):
quantization_config = getattr(config, "quantization_config", {})
if isinstance(quantization_config, dict):
return quantization_config.get("weight_block_size", default_value)
return default_value
def main(args: argparse.Namespace):
print(args)
config = AutoConfig.from_pretrained(
args.model, trust_remote_code=args.trust_remote_code
)
if config.architectures[0] == "DbrxForCausalLM":
E = config.ffn_config.moe_num_experts
topk = config.ffn_config.moe_top_k
elif config.architectures[0] == "JambaForCausalLM":
E = config.num_experts
topk = config.num_experts_per_tok
elif (
config.architectures[0] == "DeepseekV3ForCausalLM"
or config.architectures[0] == "DeepseekV2ForCausalLM"
or config.architectures[0] == "Glm4MoeForCausalLM"
or config.architectures[0] == "Glm4MoeLiteForCausalLM"
):
E = config.n_routed_experts
topk = config.num_experts_per_tok
elif config.architectures[0] in ["Qwen2MoeForCausalLM", "Qwen3MoeForCausalLM"]:
E = config.num_experts
topk = config.num_experts_per_tok
else:
# Support for llama4
config = config.get_text_config()
# Default: Mixtral.
E = config.num_local_experts
topk = config.num_experts_per_tok
hidden_size = config.hidden_size
dtype = torch.float16 if current_platform.is_rocm() else config.dtype
use_fp8_w8a8 = args.dtype == "fp8_w8a8"
use_int8_w8a16 = args.dtype == "int8_w8a16"
if args.batch_size is None:
batch_sizes = [
1,
2,
4,
8,
16,
24,
32,
48,
64,
96,
128,
256,
512,
1024,
1536,
2048,
3072,
4096,
]
else:
batch_sizes = [args.batch_size]
ray.init()
num_gpus = int(ray.available_resources()["GPU"])
workers = [BenchmarkWorker.remote(args.seed) for _ in range(num_gpus)]
def _distribute(method: str, inputs: list[Any]) -> list[Any]:
outputs = []
worker_idx = 0
for input_args in inputs:
worker = workers[worker_idx]
worker_method = getattr(worker, method)
output = worker_method.remote(*input_args)
outputs.append(output)
worker_idx = (worker_idx + 1) % num_gpus
return ray.get(outputs)
outputs = _distribute(
"benchmark",
[
(
batch_size,
E,
hidden_size,
topk,
dtype,
use_fp8_w8a8,
use_int8_w8a16,
)
for batch_size in batch_sizes
],
)
for batch_size, (permute, unpermute) in zip(batch_sizes, outputs):
print(f"Batch size: {batch_size}")
print(f"Permute time: {permute:.2f} us")
print(f"Unpermute time: {unpermute:.2f} us")
if __name__ == "__main__":
parser = FlexibleArgumentParser()
parser.add_argument(
"--model", type=str, default="mistralai/Mixtral-8x7B-Instruct-v0.1"
)
parser.add_argument(
"--dtype", type=str, choices=["auto", "fp8_w8a8", "int8_w8a16"], default="auto"
)
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--batch-size", type=int, required=False)
parser.add_argument("--trust-remote-code", action="store_true")
args = parser.parse_args()
main(args)
| {
"repo_id": "vllm-project/vllm",
"file_path": "benchmarks/kernels/benchmark_moe_permute_unpermute.py",
"license": "Apache License 2.0",
"lines": 307,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/kernels/moe/test_moe_permute_unpermute.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Tests for the MOE permute/unpermute kernel
Run `pytest tests/kernels/test_moe_permute_unpermute.py`.
"""
import numpy as np
import pytest
import torch
from vllm.model_executor.layers.fused_moe import fused_topk
from vllm.model_executor.layers.fused_moe.layer import determine_expert_map
from vllm.model_executor.layers.fused_moe.moe_permute_unpermute import (
moe_permute,
moe_permute_unpermute_supported,
moe_unpermute,
)
from vllm.platforms import current_platform
from vllm.utils.torch_utils import set_random_seed
NUM_EXPERTS = [16, 64, 256]
TOP_KS = [2, 6, 8]
EP_SIZE = [1, 4, 16]
set_random_seed(0)
if current_platform.is_rocm():
pytest.skip(
"moe_permute_unpermute_supported is not defined for ROCm",
allow_module_level=True,
)
def torch_permute(
hidden_states: torch.Tensor,
topk_ids: torch.Tensor,
# token_expert_indices: torch.Tensor,
topk: int,
n_expert: int,
n_local_expert: int,
start_expert: int,
expert_map: torch.Tensor | None = None,
) -> list[torch.Tensor]:
n_token = hidden_states.shape[0]
if expert_map is not None:
is_local_expert = expert_map[topk_ids] != -1
not_local_expert = expert_map[topk_ids] == -1
topk_ids = is_local_expert * (topk_ids - start_expert) + not_local_expert * (
topk_ids + n_expert
)
token_expert_indices = torch.arange(
0, n_token * topk, dtype=torch.int32, device=hidden_states.device
).reshape((n_token, topk))
sorted_topk_ids, sorted_indices = torch.sort(topk_ids.flatten(), stable=True)
dst_row_id2src_row_id_map = token_expert_indices.flatten()[sorted_indices]
expert_first_token_offset = torch.zeros(
n_local_expert + 1, dtype=torch.int64, device="cuda"
)
idx = 0
for i in range(0, n_local_expert):
cnt = 0
while idx < sorted_topk_ids.numel() and sorted_topk_ids[idx] == i:
cnt += 1
idx += 1
expert_first_token_offset[i + 1] = expert_first_token_offset[i] + cnt
_, src2dst_idx = torch.sort(dst_row_id2src_row_id_map)
valid_row_idx = []
permuted_hidden_states = hidden_states[dst_row_id2src_row_id_map // topk, ...]
src_row_id2dst_row_id_map = torch.arange(
0, n_token * topk, device="cuda", dtype=torch.int32
)[src2dst_idx].reshape((n_token, topk))
valid_row_idx += [i for i in range(expert_first_token_offset[-1])]
dst_row_id2src_row_id_map[expert_first_token_offset[-1] :] = n_token * topk
return [
permuted_hidden_states,
expert_first_token_offset,
src_row_id2dst_row_id_map,
dst_row_id2src_row_id_map,
valid_row_idx,
]
def torch_unpermute(
permuted_hidden_states: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
token_expert_indices: torch.Tensor,
src_row_id2dst_row_id_map: torch.Tensor,
valid_row_idx: torch.Tensor,
topk: int,
n_expert: int,
) -> torch.Tensor:
# ignore invalid row
n_hidden = permuted_hidden_states.shape[1]
mask = torch.zeros(permuted_hidden_states.shape[0], dtype=bool, device="cuda")
mask[valid_row_idx] = True
permuted_hidden_states[~mask] = 0
permuted_hidden_states = permuted_hidden_states[
src_row_id2dst_row_id_map.flatten(), ...
]
permuted_hidden_states = permuted_hidden_states.view(-1, topk, n_hidden)
output = (
(permuted_hidden_states * topk_weights.unsqueeze(2))
.sum(1)
.to(permuted_hidden_states.dtype)
)
return output
@pytest.mark.parametrize("n_token", [1, 33, 1024, 5000])
@pytest.mark.parametrize("n_hidden", [2048, 7168])
@pytest.mark.parametrize("n_expert", NUM_EXPERTS)
@pytest.mark.parametrize("topk", TOP_KS)
@pytest.mark.parametrize("dtype", [torch.bfloat16])
@pytest.mark.parametrize("ep_size", EP_SIZE)
def test_moe_permute_unpermute(
n_token: int,
n_hidden: int,
topk: int,
n_expert: int,
ep_size: int,
dtype: torch.dtype,
):
if not moe_permute_unpermute_supported():
pytest.skip("moe_permute_unpermute is not supported on this platform.")
ep_rank = np.random.randint(0, ep_size)
expert_map = None
n_local_expert = n_expert
if ep_size != 1:
n_local_expert, expert_map, _ = determine_expert_map(ep_size, ep_rank, n_expert)
expert_map = expert_map.cuda()
start_expert = n_local_expert * ep_rank
set_random_seed(0)
hidden_states = torch.randn((n_token, n_hidden), device="cuda").to(dtype)
gating_output = torch.randn((n_token, n_expert), device="cuda").to(dtype)
topk_weights, topk_ids, token_expert_indices = fused_topk(
hidden_states, gating_output, topk, False
)
(
gold_permuted_hidden_states,
gold_expert_first_token_offset,
gold_inv_permuted_idx,
gold_permuted_idx,
valid_row_idx,
) = torch_permute(
hidden_states,
topk_ids,
# token_expert_indices,
topk,
n_expert,
n_local_expert,
start_expert,
expert_map=expert_map,
)
(
permuted_hidden_states,
_,
expert_first_token_offset,
inv_permuted_idx,
_,
) = moe_permute(
hidden_states=hidden_states,
a1q_scale=None,
topk_ids=topk_ids,
n_expert=n_expert,
n_local_expert=n_local_expert,
expert_map=expert_map,
)
# check expert_first_token_offset
torch.testing.assert_close(
gold_expert_first_token_offset, expert_first_token_offset, atol=0, rtol=0
)
# check src_row_id2dst_row_id_map
torch.testing.assert_close(
gold_inv_permuted_idx.flatten(), inv_permuted_idx, atol=0, rtol=0
)
# check permuted_hidden_states, only valid token
torch.testing.assert_close(
gold_permuted_hidden_states[valid_row_idx],
permuted_hidden_states[valid_row_idx],
atol=0,
rtol=0,
)
# add a random tensor to simulate group gemm
result0 = 0.5 * permuted_hidden_states + torch.randn_like(permuted_hidden_states)
result4 = torch.empty_like(hidden_states)
moe_unpermute(
result4, result0, topk_weights, inv_permuted_idx, expert_first_token_offset
)
gold4 = torch_unpermute(
result0,
topk_weights,
topk_ids,
token_expert_indices,
inv_permuted_idx,
valid_row_idx,
topk,
n_local_expert,
)
# check unpermuted hidden
torch.testing.assert_close(result4, gold4, atol=2e-2, rtol=0)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/kernels/moe/test_moe_permute_unpermute.py",
"license": "Apache License 2.0",
"lines": 190,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import torch
def moe_permute(
hidden_states: torch.Tensor,
a1q_scale: torch.Tensor | None,
topk_ids: torch.Tensor,
n_expert: int,
n_local_expert: int = -1,
expert_map: torch.Tensor | None = None,
permuted_hidden_states: torch.Tensor | None = None,
) -> tuple[torch.Tensor, torch.Tensor | None, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
This function expands and permutes activation to gather uncontinuous tokens
for each expert.
Parameters:
- hidden_states (torch.Tensor): The input tensor to the MoE layer.
- a1q_scale (Optional[torch.Tensor]): quant scale for hidden_states
- topk_ids (torch.Tensor): topk expert route id for each token.
- n_expert (int): The number of expert.
- n_local_expert (int): The number of expert in current EP rank.
- expert_map (Optional[torch.Tensor]): A tensor mapping expert indices
from the global expert space to the local expert space of the expert
parallel shard.
- permuted_hidden_states (Optional[torch.Tensor]): Optional output tensor.
If None, the output tensor will be created in this function.
Returns:
- permuted_hidden_states (torch.Tensor): permuted activation.
- a1q_scale (Optional[torch.Tensor]): permuted quant scale for hidden_states
if original scale not per-tensor scaling
- expert_first_token_offset (torch.Tensor): offset of the first token
of each expert for standard grouped gemm.
- inv_permuted_idx (torch.Tensor): idx map for moe_unpermute.
- permuted_idx (torch.Tensor): idx map from hidden to permuted_hidden.
"""
n_token, n_hidden = hidden_states.size()
topk = topk_ids.size(1)
assert (n_hidden * hidden_states.element_size()) % 16 == 0, (
"permue kernel need hidden dim align to 16B"
)
permuted_row_size = n_token * topk
if n_local_expert == -1:
n_local_expert = n_expert
if permuted_hidden_states is None:
permuted_hidden_states = torch.empty(
(permuted_row_size, n_hidden),
dtype=hidden_states.dtype,
device=hidden_states.device,
)
assert permuted_hidden_states.size() == (permuted_row_size, n_hidden), (
f"Expected permuted hidden states to be {(permuted_row_size, n_hidden)}"
f" but got {permuted_hidden_states.size()}"
)
token_expert_indices = torch.arange(
0, n_token * topk, dtype=torch.int32, device=hidden_states.device
).reshape((n_token, topk))
expert_first_token_offset = torch.empty(
n_local_expert + 1, dtype=torch.int64, device=hidden_states.device
)
permuted_idx = torch.full(
(permuted_row_size,),
n_token * topk,
dtype=torch.int32,
device=hidden_states.device,
)
inv_permuted_idx = torch.empty(
(n_token, topk), dtype=torch.int32, device=hidden_states.device
)
topk_ids = topk_ids.to(torch.int32)
torch.ops._moe_C.moe_permute(
hidden_states,
topk_ids,
token_expert_indices,
expert_map,
n_expert,
n_local_expert,
topk,
permuted_hidden_states,
expert_first_token_offset,
inv_permuted_idx,
permuted_idx,
)
if a1q_scale is not None and a1q_scale.dim() > 1:
a1q_scale = a1q_scale[permuted_idx.clamp(max=n_token * topk - 1) // topk]
return (
permuted_hidden_states,
a1q_scale,
expert_first_token_offset,
inv_permuted_idx.flatten(),
permuted_idx,
)
def moe_unpermute(
out: torch.Tensor,
permuted_hidden_states: torch.Tensor,
topk_weights: torch.Tensor,
inv_permuted_idx: torch.Tensor,
expert_first_token_offset: torch.Tensor | None = None,
) -> None:
"""
This function expands and permutes activation to gathering uncontinuous
tokens for each expert.
Parameters:
- out (torch.Tensor): output tensor
- permuted_hidden_states (torch.Tensor): permuted activation.
- topk_weights (torch.Tensor): topk expert route weight for each token.
- inv_permuted_idx (torch.Tensor): row idx map for moe_unpermute.
- expert_first_token_offset (Optional[torch.Tensor]): offset of the first
token of each expert for grouped gemm.
Returns:
- hidden_states (torch.Tensor): The reduced and unpermuted activation
tensor.
"""
topk = topk_weights.size(1)
n_hidden = permuted_hidden_states.size(-1)
assert (n_hidden * permuted_hidden_states.element_size()) % 16 == 0, (
"unpermue kernel need hidden dim align to 16B"
)
torch.ops._moe_C.moe_unpermute(
permuted_hidden_states,
topk_weights,
inv_permuted_idx,
expert_first_token_offset,
topk,
out,
)
def moe_permute_unpermute_supported():
return torch.ops._moe_C.moe_permute_unpermute_supported()
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/fused_moe/moe_permute_unpermute.py",
"license": "Apache License 2.0",
"lines": 127,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/multimodal/test_hasher.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import uuid
from pathlib import Path
import numpy as np
import pytest
import torch
from PIL import Image, ImageDraw
from vllm.multimodal.hasher import MultiModalHasher
pytestmark = pytest.mark.cpu_test
ASSETS_DIR = Path(__file__).parent / "assets"
assert ASSETS_DIR.exists()
def test_hash_single_item_different_shape():
x1 = torch.zeros(())
x2 = torch.zeros((1,))
hasher = MultiModalHasher
assert hasher.hash_kwargs(x=x1) != hasher.hash_kwargs(x=x2)
def test_hash_key_order_invariant():
x = torch.zeros((5, 10))
y = torch.ones((5, 10))
hasher = MultiModalHasher
assert hasher.hash_kwargs(x=x, y=y) == hasher.hash_kwargs(y=y, x=x)
# NOTE: Images that are the same visually are allowed to have the same hash
@pytest.mark.parametrize("mode_pair", [("1", "L"), ("RGBA", "CMYK")])
def test_hash_collision_image_mode(mode_pair):
mode1, mode2 = mode_pair
image1 = Image.new(mode1, size=(10, 10), color=1)
image2 = Image.new(mode2, size=(10, 10), color=1)
hasher = MultiModalHasher
assert hasher.hash_kwargs(image=image1) != hasher.hash_kwargs(image=image2)
def test_hash_collision_image_palette():
# These images differ only in Image.palette._palette
image1 = Image.open(ASSETS_DIR / "image1.png")
image2 = Image.open(ASSETS_DIR / "image2.png")
hasher = MultiModalHasher
assert hasher.hash_kwargs(image=image1) != hasher.hash_kwargs(image=image2)
def test_hash_collision_image_transpose():
image1 = Image.new("1", size=(10, 20))
ImageDraw.Draw(image1).line([(0, 0), (10, 0)])
image2 = Image.new("1", size=(20, 10))
ImageDraw.Draw(image2).line([(0, 0), (0, 10)])
hasher = MultiModalHasher
assert hasher.hash_kwargs(image=image1) != hasher.hash_kwargs(image=image2)
@pytest.mark.parametrize("dtype", [torch.float32, torch.bfloat16])
def test_hash_collision_tensor_shape(dtype):
# The hash should be different though the data is the same when flattened
arr1 = torch.zeros((5, 10, 20, 3), dtype=dtype)
arr2 = torch.zeros((10, 20, 5, 3), dtype=dtype)
hasher = MultiModalHasher
assert hasher.hash_kwargs(data=arr1) != hasher.hash_kwargs(data=arr2)
def test_hash_collision_array_shape():
# The hash should be different though the data is the same when flattened
arr1 = np.zeros((5, 10, 20, 3))
arr2 = np.zeros((10, 20, 5, 3))
hasher = MultiModalHasher
assert hasher.hash_kwargs(data=arr1) != hasher.hash_kwargs(data=arr2)
def test_hash_non_contiguous_array():
arr = np.arange(24).reshape(4, 6).T
assert not arr.flags.c_contiguous
arr_c = np.ascontiguousarray(arr)
assert arr_c.flags.c_contiguous
hasher = MultiModalHasher
# Both should be hashable and produce the same hashes
assert hasher.hash_kwargs(data=arr) == hasher.hash_kwargs(data=arr_c)
def test_hash_image_exif_id():
# Test that EXIF ImageId tag can be used to store UUID
# and the hasher will use that instead of the image data.
image1 = image2 = Image.new("1", size=(10, 20))
id = uuid.uuid4()
image1.getexif()[Image.ExifTags.Base.ImageID] = id
image2 = Image.open(ASSETS_DIR / "image1.png")
image2.getexif()[Image.ExifTags.Base.ImageID] = "Not a UUID"
image2a = Image.open(ASSETS_DIR / "image1.png")
hasher = MultiModalHasher
# first image has UUID in ImageID, so it should hash to that UUID
assert hasher.hash_kwargs(image=image1) == hasher.hash_kwargs(image=id.bytes)
# second image has non-UUID in ImageID, so it should hash to the image data
assert hasher.hash_kwargs(image=image2) == hasher.hash_kwargs(image=image2a)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/multimodal/test_hasher.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/kernels/test_fused_quant_activation.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
import torch
import vllm._custom_ops as ops
from tests.kernels.utils import opcheck
from vllm.model_executor.layers.activation import SiluAndMul
from vllm.platforms import current_platform
DTYPES = [torch.bfloat16, torch.float16]
QUANT_DTYPES = [current_platform.fp8_dtype()]
NUM_TOKENS = [1, 17, 86, 1234, 3045] # Arbitrary values for testing
HIDDEN_SIZES = [16, 48, 128, 1562, 4096] # Arbitrary values for testing
SEEDS = [0]
CUDA_DEVICES = [f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2)]
def ref_impl(
silu_and_mul: SiluAndMul, x: torch.Tensor, scale: torch.Tensor
) -> torch.Tensor:
silu_and_mul_out = silu_and_mul.forward_native(x)
out, scales = ops.scaled_fp8_quant(silu_and_mul_out, scale)
return out
def ops_impl(x: torch.Tensor, scale: torch.Tensor) -> torch.Tensor:
out_shape = (x.shape[0], x.shape[1] // 2)
out = torch.empty(out_shape, dtype=current_platform.fp8_dtype(), device=x.device)
torch.ops._C.silu_and_mul_quant(out, x, scale)
return out
@pytest.mark.parametrize("num_tokens", NUM_TOKENS)
@pytest.mark.parametrize("hidden_size", HIDDEN_SIZES)
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("quant_dtype", QUANT_DTYPES)
@pytest.mark.parametrize("seed", SEEDS)
@pytest.mark.parametrize("device", CUDA_DEVICES)
@torch.inference_mode()
def test_silu_and_mul(
default_vllm_config,
num_tokens: int,
hidden_size: int,
dtype: torch.dtype,
quant_dtype: torch.dtype,
seed: int,
device: str,
) -> None:
torch.random.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.set_default_device(device)
layer = SiluAndMul()
# Make inputs
scale = torch.randn((1), device=device, dtype=torch.float32)
x = torch.randn(num_tokens, hidden_size, dtype=dtype)
ref_out = ref_impl(layer, x, scale)
ops_out = ops_impl(x, scale)
assert ref_out.dtype == quant_dtype
assert ops_out.dtype == quant_dtype
assert ref_out.shape == ops_out.shape
assert torch.allclose(
ref_out.to(dtype=torch.float32), ops_out.to(dtype=torch.float32)
)
opcheck(torch.ops._C.silu_and_mul_quant, (ops_out, x, scale))
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/kernels/test_fused_quant_activation.py",
"license": "Apache License 2.0",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:examples/online_serving/streamlit_openai_chatbot_webserver.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
vLLM Chat Assistant - A Streamlit Web Interface
A streamlined chat interface that quickly integrates
with vLLM API server.
Features:
- Multiple chat sessions management
- Streaming response display
- Configurable API endpoint
- Real-time chat history
- Reasoning Display: Optional thinking process visualization
Requirements:
pip install streamlit openai
Usage:
# Start the app with default settings
streamlit run streamlit_openai_chatbot_webserver.py
# Start with custom vLLM API endpoint
VLLM_API_BASE="http://your-server:8000/v1" \
streamlit run streamlit_openai_chatbot_webserver.py
# Enable debug mode
streamlit run streamlit_openai_chatbot_webserver.py \
--logger.level=debug
"""
import os
from datetime import datetime
import streamlit as st
from openai import OpenAI
# Get command line arguments from environment variables
openai_api_key = os.getenv("VLLM_API_KEY", "EMPTY")
openai_api_base = os.getenv("VLLM_API_BASE", "http://localhost:8000/v1")
# Initialize session states for managing chat sessions
if "sessions" not in st.session_state:
st.session_state.sessions = {}
if "current_session" not in st.session_state:
st.session_state.current_session = None
if "messages" not in st.session_state:
st.session_state.messages = []
if "active_session" not in st.session_state:
st.session_state.active_session = None
# Add new session state for reasoning
if "show_reasoning" not in st.session_state:
st.session_state.show_reasoning = {}
# Initialize session state for API base URL
if "api_base_url" not in st.session_state:
st.session_state.api_base_url = openai_api_base
def create_new_chat_session():
"""Create a new chat session with timestamp as unique identifier.
This function initializes a new chat session by:
1. Generating a timestamp-based session ID
2. Creating an empty message list for the new session
3. Setting the new session as both current and active session
4. Resetting the messages list for the new session
Returns:
None
Session State Updates:
- sessions: Adds new empty message list with timestamp key
- current_session: Sets to new session ID
- active_session: Sets to new session ID
- messages: Resets to empty list
"""
session_id = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
st.session_state.sessions[session_id] = []
st.session_state.current_session = session_id
st.session_state.active_session = session_id
st.session_state.messages = []
def switch_to_chat_session(session_id):
"""Switch the active chat context to a different session.
Args:
session_id (str): The timestamp ID of the session to switch to
This function handles chat session switching by:
1. Setting the specified session as current
2. Updating the active session marker
3. Loading the messages history from the specified session
Session State Updates:
- current_session: Updated to specified session_id
- active_session: Updated to specified session_id
- messages: Loaded from sessions[session_id]
"""
st.session_state.current_session = session_id
st.session_state.active_session = session_id
st.session_state.messages = st.session_state.sessions[session_id]
def get_llm_response(messages, model, reason, content_ph=None, reasoning_ph=None):
"""Generate and stream LLM response with optional reasoning process.
Args:
messages (list): List of conversation message dicts with 'role' and 'content'
model (str): The model identifier to use for generation
reason (bool): Whether to enable and display reasoning process
content_ph (streamlit.empty): Placeholder for streaming response content
reasoning_ph (streamlit.empty): Placeholder for streaming reasoning process
Returns:
tuple: (str, str)
- First string contains the complete response text
- Second string contains the complete reasoning text (if enabled)
Features:
- Streams both reasoning and response text in real-time
- Handles model API errors gracefully
- Supports live updating of thinking process
- Maintains separate content and reasoning displays
Raises:
Exception: Wrapped in error message if API call fails
Note:
The function uses streamlit placeholders for live updates.
When reason=True, the reasoning process appears above the response.
"""
full_text = ""
think_text = ""
live_think = None
# Build request parameters
params = {"model": model, "messages": messages, "stream": True}
if reason:
params["extra_body"] = {"chat_template_kwargs": {"enable_thinking": True}}
try:
response = client.chat.completions.create(**params)
if isinstance(response, str):
if content_ph:
content_ph.markdown(response)
return response, ""
# Prepare reasoning expander above content
if reason and reasoning_ph:
exp = reasoning_ph.expander("💭 Thinking Process (live)", expanded=True)
live_think = exp.empty()
# Stream chunks
for chunk in response:
delta = chunk.choices[0].delta
# Stream reasoning first
if reason and hasattr(delta, "reasoning") and live_think:
rc = delta.reasoning
if rc:
think_text += rc
live_think.markdown(think_text + "▌")
# Then stream content
if hasattr(delta, "content") and delta.content and content_ph:
full_text += delta.content
content_ph.markdown(full_text + "▌")
# Finalize displays: reasoning remains above, content below
if reason and live_think:
live_think.markdown(think_text)
if content_ph:
content_ph.markdown(full_text)
return full_text, think_text
except Exception as e:
st.error(f"Error details: {str(e)}")
return f"Error: {str(e)}", ""
# Sidebar - API Settings first
st.sidebar.title("API Settings")
new_api_base = st.sidebar.text_input(
"API Base URL:", value=st.session_state.api_base_url
)
if new_api_base != st.session_state.api_base_url:
st.session_state.api_base_url = new_api_base
st.rerun()
st.sidebar.divider()
# Sidebar - Session Management
st.sidebar.title("Chat Sessions")
if st.sidebar.button("New Session"):
create_new_chat_session()
# Display all sessions in reverse chronological order
for session_id in sorted(st.session_state.sessions.keys(), reverse=True):
# Mark the active session with a pinned button
if session_id == st.session_state.active_session:
st.sidebar.button(
f"📍 {session_id}",
key=session_id,
type="primary",
on_click=switch_to_chat_session,
args=(session_id,),
)
else:
st.sidebar.button(
f"Session {session_id}",
key=session_id,
on_click=switch_to_chat_session,
args=(session_id,),
)
# Main interface
st.title("vLLM Chat Assistant")
# Initialize OpenAI client with API settings
client = OpenAI(api_key=openai_api_key, base_url=st.session_state.api_base_url)
# Get and display current model id
models = client.models.list()
model = models.data[0].id
st.markdown(f"**Model**: {model}")
# Initialize first session if none exists
if st.session_state.current_session is None:
create_new_chat_session()
st.session_state.active_session = st.session_state.current_session
# Update the chat history display section
for idx, msg in enumerate(st.session_state.messages):
# Render user messages normally
if msg["role"] == "user":
with st.chat_message("user"):
st.write(msg["content"])
# Render assistant messages with reasoning above
else:
# If reasoning exists for this assistant message, show it above the content
if idx in st.session_state.show_reasoning:
with st.expander("💭 Thinking Process", expanded=False):
st.markdown(st.session_state.show_reasoning[idx])
with st.chat_message("assistant"):
st.write(msg["content"])
# Setup & Cache reasoning support check
@st.cache_data(show_spinner=False)
def server_supports_reasoning():
"""Check if the current model supports reasoning capability.
Returns:
bool: True if the model supports reasoning, False otherwise
"""
resp = client.chat.completions.create(
model=model,
messages=[{"role": "user", "content": "Hi"}],
stream=False,
)
return hasattr(resp.choices[0].message, "reasoning") and bool(
resp.choices[0].message.reasoning
)
# Check support
supports_reasoning = server_supports_reasoning()
# Add reasoning toggle in sidebar if supported
reason = False # Default to False
if supports_reasoning:
reason = st.sidebar.checkbox("Enable Reasoning", value=False)
else:
st.sidebar.markdown(
"<span style='color:gray;'>Reasoning unavailable for this model.</span>",
unsafe_allow_html=True,
)
# reason remains False
# Update the input handling section
if prompt := st.chat_input("Type your message here..."):
# Save and display user message
st.session_state.messages.append({"role": "user", "content": prompt})
st.session_state.sessions[st.session_state.current_session] = (
st.session_state.messages
)
with st.chat_message("user"):
st.write(prompt)
# Prepare LLM messages
msgs = [
{"role": m["role"], "content": m["content"]} for m in st.session_state.messages
]
# Stream assistant response
with st.chat_message("assistant"):
# Placeholders: reasoning above, content below
reason_ph = st.empty()
content_ph = st.empty()
full, think = get_llm_response(msgs, model, reason, content_ph, reason_ph)
# Determine index for this new assistant message
message_index = len(st.session_state.messages)
# Save assistant reply
st.session_state.messages.append({"role": "assistant", "content": full})
# Persist reasoning in session state if any
if reason and think:
st.session_state.show_reasoning[message_index] = think
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/online_serving/streamlit_openai_chatbot_webserver.py",
"license": "Apache License 2.0",
"lines": 254,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:examples/online_serving/kv_events_subscriber.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from typing import Any
import msgspec
import zmq
from msgspec.msgpack import Decoder
from vllm.v1.core.kv_cache_utils import ExternalBlockHash
#
# Types copied from vllm.distributed.kv_events
#
class EventBatch(msgspec.Struct, array_like=True, omit_defaults=True, gc=False):
ts: float
events: list[Any]
class KVCacheEvent(
msgspec.Struct, array_like=True, omit_defaults=True, gc=False, tag=True
):
"""Base class for all KV cache-related events"""
class BlockStored(KVCacheEvent):
block_hashes: list[ExternalBlockHash]
parent_block_hash: ExternalBlockHash | None
token_ids: list[int]
block_size: int
lora_id: int | None
"""Deprecated: use `lora_name` for KV block key hash.
Retained for backward compatibility.
"""
medium: str | None
lora_name: str | None
extra_keys: list[tuple[Any, ...] | None] | None = None
"""Extra keys used in block hash computation, one entry per block in
block_hashes. Each entry contains MM identifiers, LoRA name, cache_salt,
prompt embeddings data, etc. for that specific block.
"""
class BlockRemoved(KVCacheEvent):
block_hashes: list[ExternalBlockHash]
medium: str | None
class AllBlocksCleared(KVCacheEvent):
pass
class KVEventBatch(EventBatch):
events: list[BlockStored | BlockRemoved | AllBlocksCleared]
def process_event(event_batch):
print(f"Received event batch at {event_batch.ts}:")
for event in event_batch.events:
print(f" - {event}")
def main():
decoder = Decoder(type=KVEventBatch)
last_seq = -1
context = zmq.Context()
# Set up the main subscription socket
sub = context.socket(zmq.SUB)
sub.connect("tcp://localhost:5557")
topic = "kv-events"
sub.setsockopt_string(zmq.SUBSCRIBE, topic)
# Initialize replay socket
replay = context.socket(zmq.REQ)
replay.connect("tcp://localhost:5558")
poller = zmq.Poller()
poller.register(replay, zmq.POLLIN)
print("Listening for KV cache events on topic:", topic)
while True:
try:
if sub.poll(50):
_, seq_bytes, payload = sub.recv_multipart()
seq = int.from_bytes(seq_bytes, "big")
if last_seq >= 0 and seq > last_seq + 1:
missed = seq - last_seq - 1
print(
f"Missed {missed} messages (last: {last_seq}, current: {seq})"
)
replay.send((last_seq + 1).to_bytes(8, "big"))
while poller.poll(timeout=200):
seq_bytes, replay_payload = replay.recv_multipart()
if not replay_payload:
# End of replay marker is sent as an empty frame
# for the payload
break
replay_seq = int.from_bytes(seq_bytes, "big")
if replay_seq > last_seq:
event_batch = decoder.decode(replay_payload)
process_event(event_batch)
last_seq = replay_seq
if replay_seq >= seq - 1:
break
event_batch = decoder.decode(payload)
process_event(event_batch)
# ... do other periodic work or check for shutdown ...
except KeyboardInterrupt:
print("Interrupted")
break
except Exception as e:
print("Error decoding message:", e)
if __name__ == "__main__":
main()
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/online_serving/kv_events_subscriber.py",
"license": "Apache License 2.0",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/distributed/test_events.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import threading
import time
import msgspec
import pytest
from vllm.distributed.kv_events import (
EventBatch,
EventPublisherFactory,
NullEventPublisher,
)
DP_RANK = 0
class EventSample(
msgspec.Struct,
tag=True, # type: ignore
array_like=True, # type: ignore
):
"""Test event for publisher testing"""
id: int
value: str
class SampleBatch(EventBatch):
"""Test event batch for publisher testing"""
events: list[EventSample]
def create_test_events(count: int) -> SampleBatch:
"""Create a batch of test events"""
events = [EventSample(id=i, value=f"test-{i}") for i in range(count)]
return SampleBatch(ts=time.time(), events=events)
def test_basic_publishing(publisher, subscriber):
"""Test basic event publishing works"""
test_batch = create_test_events(5)
publisher.publish(test_batch)
result = subscriber.receive_one(timeout=1000)
assert result is not None, "No message received"
seq, received = result
assert seq == 0, "Sequence number mismatch"
assert received.ts == pytest.approx(test_batch.ts, abs=0.1), "Timestamp mismatch"
assert len(received.events) == len(test_batch.events), "Number of events mismatch"
for i, event in enumerate(received.events):
assert event.id == i, "Event id mismatch"
assert event.value == f"test-{i}", "Event value mismatch"
def test_multiple_events(publisher, subscriber):
"""Test publishing and receiving multiple event batches"""
for _ in range(10):
batch = create_test_events(2)
publisher.publish(batch)
received = []
for _ in range(10):
data = subscriber.receive_one(timeout=100)
if data:
received.append(data)
assert len(received) == 10, "Number of messages mismatch"
seqs = [seq for seq, _ in received]
assert seqs == list(range(10)), "Sequence numbers mismatch"
def test_replay_mechanism(publisher, subscriber):
"""Test the replay mechanism works correctly"""
for _ in range(19):
batch = create_test_events(1)
publisher.publish(batch)
time.sleep(0.5) # Need publisher to process above requests
subscriber.request_replay(10)
batch = create_test_events(1)
publisher.publish(batch) # 20th message
replayed = subscriber.receive_replay()
assert len(replayed) > 0, "No replayed messages received"
seqs = [seq for seq, _ in replayed]
assert all(seq >= 10 for seq in seqs), "Replayed messages not in order"
assert seqs == list(range(min(seqs), max(seqs) + 1)), (
"Replayed messages not consecutive"
)
def test_buffer_limit(publisher, subscriber, publisher_config):
"""Test buffer limit behavior"""
buffer_size = publisher_config.buffer_steps
# Publish more events than the buffer can hold
for i in range(buffer_size + 10):
batch = create_test_events(1)
publisher.publish(batch)
time.sleep(0.5) # Need publisher to process above requests
subscriber.request_replay(0)
batch = create_test_events(1)
publisher.publish(batch)
replayed = subscriber.receive_replay()
assert len(replayed) <= buffer_size, "Can't replay more than buffer size"
oldest_seq = min(seq for seq, _ in replayed)
assert oldest_seq >= 10, "The oldest sequence should be at least 10"
def test_topic_filtering(publisher_config):
"""
Test that a subscriber only receives messages matching its topic filter
"""
publisher_config.replay_endpoint = None
publisher_config.topic = "foo"
pub = EventPublisherFactory.create(publisher_config, DP_RANK)
from .conftest import MockSubscriber
sub_foo = MockSubscriber(publisher_config.endpoint, None, "foo")
sub_bar = MockSubscriber(publisher_config.endpoint, None, "bar")
try:
time.sleep(0.1)
for _ in range(3):
pub.publish(create_test_events(1))
foo_received = [sub_foo.receive_one(timeout=200) for _ in range(3)]
assert all(msg is not None for msg in foo_received), (
"Subscriber with matching topic should receive messages"
)
bar_received = [sub_bar.receive_one(timeout=200) for _ in range(3)]
assert all(msg is None for msg in bar_received), (
"Subscriber with non-matching topic should receive no messages"
)
finally:
pub.shutdown()
sub_foo.close()
sub_bar.close()
def test_high_volume(publisher, subscriber):
"""Test publishing and receiving a high volume of events"""
num_batches = 10_000
events_per_batch = 100
# Publish events in a separate thread to not block
def publish_events():
for i in range(num_batches):
batch = create_test_events(events_per_batch)
publisher.publish(batch)
# Small delay to avoid overwhelming
if i % 100 == 0:
time.sleep(0.01)
received: list[tuple[int, SampleBatch]] = []
publisher_thread = threading.Thread(target=publish_events)
publisher_thread.start()
start_time = time.time()
while len(received) < num_batches:
if time.time() - start_time > 10: # Timeout after 10 seconds
break
result = subscriber.receive_one(timeout=100)
if result:
received.append(result)
publisher_thread.join()
assert len(received) >= num_batches * 0.9, "We should have received most messages"
seqs = [seq for seq, _ in received]
assert sorted(seqs) == seqs, "Sequence numbers should be in order"
def test_null_publisher():
"""Test that NullEventPublisher can be used without errors"""
publisher = NullEventPublisher(DP_RANK)
# This should not raise any errors
batch = create_test_events(5)
publisher.publish(batch)
publisher.shutdown()
def test_data_parallel_rank_tagging(publisher_config):
"""Test that events are properly tagged with their data parallel rank"""
publisher_config.topic = "foo"
pub_0 = EventPublisherFactory.create(publisher_config, DP_RANK)
pub_1 = EventPublisherFactory.create(publisher_config, DP_RANK + 1)
# Hardcode the expected endpoints based on port offsetting behavior
# Both ranks get offsets according to _offset_endpoint_port function
base_endpoint = publisher_config.endpoint
if "tcp://" in base_endpoint:
# For TCP endpoints: tcp://localhost:5557 -> tcp://localhost:5557, tcp://localhost:5558
expected_endpoint_0 = base_endpoint # rank 0 gets port + 0 = same port
expected_endpoint_1 = base_endpoint.replace(
":5557", ":5558"
) # rank 1 gets port + 1
else:
# For inproc endpoints: inproc://test -> inproc://test_dp0, inproc://test_dp1
expected_endpoint_0 = base_endpoint # rank 0 gets base
expected_endpoint_1 = base_endpoint + "_dp1" # rank 1 gets _dp1
from .conftest import MockSubscriber
sub_0 = MockSubscriber(expected_endpoint_0, None, publisher_config.topic)
sub_1 = MockSubscriber(expected_endpoint_1, None, publisher_config.topic)
try:
time.sleep(0.1) # Let publishers start up
# Publish events from different ranks
batch_0 = create_test_events(2)
batch_1 = create_test_events(3)
pub_0.publish(batch_0)
pub_1.publish(batch_1)
# Receive events from rank 0
result_0 = sub_0.receive_one(timeout=200)
assert result_0 is not None, "No message received from rank 0"
seq_0, received_0 = result_0
# Receive events from rank 1
result_1 = sub_1.receive_one(timeout=200)
assert result_1 is not None, "No message received from rank 1"
seq_1, received_1 = result_1
# Verify DP rank tagging
assert received_0.data_parallel_rank == 0, (
f"Expected DP rank 0, got {received_0.data_parallel_rank}"
)
assert received_1.data_parallel_rank == 1, (
f"Expected DP rank 1, got {received_1.data_parallel_rank}"
)
# Verify event content is correct
assert len(received_0.events) == 2, "Wrong number of events from rank 0"
assert len(received_1.events) == 3, "Wrong number of events from rank 1"
finally:
pub_0.shutdown()
pub_1.shutdown()
sub_0.close()
sub_1.close()
def test_event_publisher_factory():
"""Test event publisher factory creation behavior under different configurations"""
from vllm.config.kv_events import KVEventsConfig
from vllm.distributed.kv_events import ZmqEventPublisher
# test config is None
publisher = EventPublisherFactory.create(None, DP_RANK)
assert isinstance(publisher, NullEventPublisher)
publisher.shutdown()
# test disable kv cache events
config = KVEventsConfig(
enable_kv_cache_events=False,
publisher="zmq", # Even if zmq is specified, should return NullEventPublisher
endpoint="tcp://localhost:5557",
)
publisher = EventPublisherFactory.create(config, DP_RANK)
assert isinstance(publisher, NullEventPublisher)
publisher.shutdown()
# test zmq publisher
config = KVEventsConfig(
enable_kv_cache_events=True,
publisher="zmq",
endpoint="inproc://test-factory-true",
)
publisher = EventPublisherFactory.create(config, DP_RANK)
assert isinstance(publisher, ZmqEventPublisher)
publisher.shutdown()
# test unknown publisher
with pytest.raises(ValueError, match="Input should be"):
KVEventsConfig(
enable_kv_cache_events=True,
publisher="unknown_publisher",
endpoint="tcp://localhost:5557",
)
# test publisher not specified
config = KVEventsConfig(
enable_kv_cache_events=True,
# publisher not specified, should default to "zmq"
endpoint="tcp://localhost:5557",
)
publisher = EventPublisherFactory.create(config, DP_RANK)
assert isinstance(publisher, ZmqEventPublisher)
publisher.shutdown()
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/distributed/test_events.py",
"license": "Apache License 2.0",
"lines": 236,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_executor/models/aimv2.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# A modified implementation of the AIMv2 Transformer
# inserted here also the image tokenizer used by Ovis2
from collections.abc import Iterable
import torch
import torch.nn as nn
from vllm.distributed import get_tensor_model_parallel_world_size
from vllm.distributed.utils import divide
from vllm.model_executor.layers.activation import SiluAndMul
from vllm.model_executor.layers.attention import MMEncoderAttention
from vllm.model_executor.layers.conv import Conv2dLayer
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.linear import (
MergedColumnParallelLinear,
QKVParallelLinear,
RowParallelLinear,
)
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
from vllm.transformers_utils.configs.ovis import AIMv2Config
class AIMv2SwiGLUFFN(nn.Module):
def __init__(
self, config: AIMv2Config, quant_config: QuantizationConfig, prefix: str
):
super().__init__()
hidden_features = config.intermediate_size
in_features = config.hidden_size
bias = config.use_bias
self.fc13 = MergedColumnParallelLinear(
in_features,
[hidden_features] * 2,
bias=bias,
quant_config=quant_config,
prefix=f"{prefix}.fc13",
)
self.fc2 = RowParallelLinear(
input_size=hidden_features,
output_size=in_features,
bias=bias,
quant_config=quant_config,
prefix=f"{prefix}.fc2",
)
self.act_fn = SiluAndMul()
def forward(self, x: torch.Tensor) -> torch.Tensor:
x, _ = self.fc13(x)
x = self.act_fn(x)
x, _ = self.fc2(x)
return x
class AIMv2PatchEmbed(nn.Module):
def __init__(self, config: AIMv2Config):
super().__init__()
self.proj = Conv2dLayer(
config.num_channels,
config.hidden_size,
kernel_size=(config.patch_size, config.patch_size),
stride=(config.patch_size, config.patch_size),
)
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.proj(x).flatten(2).transpose(1, 2)
x = self.norm.forward_native(x)
return x
class AIMv2ViTPreprocessor(nn.Module):
def __init__(self, config: AIMv2Config):
super().__init__()
num_patches = (config.image_size // config.patch_size) ** 2
self.patchifier = AIMv2PatchEmbed(config)
self.pos_embed = nn.Parameter(torch.zeros((1, num_patches, config.hidden_size)))
def forward(self, x: torch.Tensor) -> torch.Tensor:
tokens = self.patchifier(x)
_, N, _ = tokens.shape
pos_embed = self.pos_embed.to(tokens.device)
tokens = tokens + pos_embed[:, :N]
return tokens
class AIMv2Attention(nn.Module):
def __init__(
self, config: AIMv2Config, quant_config: QuantizationConfig, prefix: str
):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
"embed_dim must be divisible by num_heads "
f"(got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale = self.head_dim**-0.5
self.qkv = QKVParallelLinear(
hidden_size=self.embed_dim,
head_size=self.head_dim,
total_num_heads=self.num_heads,
bias=config.qkv_bias,
quant_config=quant_config,
prefix=f"{prefix}.qkv",
)
self.proj = RowParallelLinear(
input_size=self.embed_dim,
output_size=self.embed_dim,
bias=config.use_bias,
quant_config=quant_config,
prefix=f"{prefix}.proj",
)
self.tp_size = get_tensor_model_parallel_world_size()
self.num_heads_per_partition = divide(self.num_heads, self.tp_size)
self.attn = MMEncoderAttention(
self.num_heads_per_partition,
self.head_dim,
self.scale,
prefix=f"{prefix}.attn",
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
qkv, _ = self.qkv(x)
q, k, v = qkv.chunk(3, dim=-1)
x = self.attn(q, k, v)
x, _ = self.proj(x)
return x
class AIMv2Block(nn.Module):
def __init__(
self, config: AIMv2Config, quant_config: QuantizationConfig, prefix: str
):
super().__init__()
self.attn = AIMv2Attention(
config, quant_config=quant_config, prefix=f"{prefix}.attn"
)
self.norm_1 = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.mlp = AIMv2SwiGLUFFN(
config, quant_config=quant_config, prefix=f"{prefix}.mlp"
)
self.norm_2 = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x + self.attn(self.norm_1.forward_native(x))
x = x + self.mlp(self.norm_2.forward_native(x))
return x
class AIMv2Transformer(nn.Module):
def __init__(
self,
config: AIMv2Config,
quant_config: QuantizationConfig,
*,
require_post_norm: bool | None = None,
prefix: str = "",
):
super().__init__()
self.blocks = nn.ModuleList(
[
AIMv2Block(config, quant_config, prefix=f"{prefix}.blocks.{i}")
for i in range(config.num_hidden_layers)
]
)
if require_post_norm:
self.post_trunk_norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
else:
self.post_trunk_norm = None
def forward(self, tokens: torch.Tensor) -> torch.Tensor:
# they take the -1 as the ref embeddings, like a clip skip
for block in self.blocks:
tokens = block(tokens)
if self.post_trunk_norm is not None:
tokens = self.post_trunk_norm(tokens)
return tokens
class AIMv2Model(torch.nn.Module):
def __init__(
self,
config: AIMv2Config,
quant_config: QuantizationConfig,
*,
require_post_norm: bool | None = None,
prefix: str = "",
):
super().__init__()
self.preprocessor = AIMv2ViTPreprocessor(config)
self.trunk = AIMv2Transformer(
config,
quant_config=quant_config,
require_post_norm=require_post_norm,
prefix=f"{prefix}.trunk",
)
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
x = self.preprocessor(pixel_values)
x = self.trunk(x)
return x
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
(".fc13", ".fc1", 0),
(".fc13", ".fc3", 1),
]
params_dict = dict(self.named_parameters())
loaded_params: set[str] = set()
for name, loaded_weight in weights:
# post_layernorm is optional in SiglipVisionModel
if (
name.startswith("trunk.post_trunk_norm")
and self.trunk.post_trunk_norm is None
):
continue
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
break
else:
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
loaded_params.add(name)
return loaded_params
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/aimv2.py",
"license": "Apache License 2.0",
"lines": 214,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/reasoning/test_qwen3_reasoning_parser.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
from transformers import AutoTokenizer
from tests.reasoning.utils import (
StreamingReasoningReconstructor,
run_reasoning_extraction,
run_reasoning_extraction_streaming,
)
from vllm.entrypoints.openai.chat_completion.protocol import ChatCompletionRequest
from vllm.reasoning import ReasoningParser, ReasoningParserManager
parser_name = "qwen3"
start_token = "<think>"
end_token = "</think>"
REASONING_MODEL_NAMES = [
"Qwen/Qwen3-0.6B",
"Qwen/Qwen3.5-397B-A17B",
"Qwen/Qwen3-4B-Thinking-2507",
]
@pytest.fixture(scope="module", params=REASONING_MODEL_NAMES)
def qwen3_tokenizer(request):
return AutoTokenizer.from_pretrained(request.param)
# --- <think> in prompt, only </think> in output (typical) ---
WITHOUT_START_TOKEN = {
"output": "This is a reasoning section</think>This is the rest",
"reasoning": "This is a reasoning section",
"content": "This is the rest",
}
WITHOUT_START_TOKEN_STREAM = {
"output": "This is a reasoning section</think>This is the rest",
"reasoning": "This is a reasoning section",
"content": "This is the rest",
}
WITHOUT_START_TOKEN_COMPLETE_REASONING = {
"output": "This is a reasoning section</think>",
"reasoning": "This is a reasoning section",
"content": None,
}
# --- <think> present in output (old template / edge case) ---
WITH_THINK = {
"output": "<think>This is a reasoning section</think>This is the rest",
"reasoning": "This is a reasoning section",
"content": "This is the rest",
}
WITH_THINK_STREAM = {
"output": "<think>This is a reasoning section</think>This is the rest",
"reasoning": "This is a reasoning section",
"content": "This is the rest",
}
# --- No think tokens at all (thinking enabled, truncated) ---
# With thinking enabled (default), no think tokens means the output was
# truncated before </think> could be generated. All output is reasoning.
WITHOUT_THINK = {
"output": "This is the rest",
"reasoning": "This is the rest",
"content": None,
}
# In streaming, the parser cannot distinguish "thinking disabled" from
# "reasoning in progress" when no think tokens have appeared yet.
# It assumes reasoning. The serving layer handles the "thinking disabled"
# case by checking prompt_is_reasoning_end_arr before calling the parser.
WITHOUT_THINK_STREAM = {
"output": "This is the rest",
"reasoning": "This is the rest",
"content": None,
}
# --- Edge cases ---
COMPLETE_REASONING = {
"output": "<think>This is a reasoning section</think>",
"reasoning": "This is a reasoning section",
"content": None,
}
MULTILINE_REASONING = {
"output": "<think>This is a reasoning\nsection</think>This is the rest\nThat",
"reasoning": "This is a reasoning\nsection",
"content": "This is the rest\nThat",
}
# Truncated output: <think> present but no </think> (thinking enabled).
# Everything is reasoning because the output was cut off mid-thought.
ONLY_OPEN_TAG = {
"output": "<think>This is a reasoning section",
"reasoning": "This is a reasoning section",
"content": None,
}
ONLY_OPEN_TAG_STREAM = {
"output": "<think>This is a reasoning section",
"reasoning": "This is a reasoning section",
"content": None,
}
# Truncated output without <think> prefix (Qwen3.5 style where <think>
# is in the prompt). No </think> means truncation — all is reasoning.
TRUNCATED_NO_START_TOKEN = {
"output": "This is a reasoning section",
"reasoning": "This is a reasoning section",
"content": None,
}
TRUNCATED_NO_START_TOKEN_STREAM = {
"output": "This is a reasoning section",
"reasoning": "This is a reasoning section",
"content": None,
}
TEST_CASES = [
pytest.param(
False,
WITHOUT_START_TOKEN,
id="without_start_token",
),
pytest.param(
True,
WITHOUT_START_TOKEN_STREAM,
id="without_start_token_stream",
),
pytest.param(
False,
WITHOUT_START_TOKEN_COMPLETE_REASONING,
id="without_start_token_complete_reasoning",
),
pytest.param(
True,
WITHOUT_START_TOKEN_COMPLETE_REASONING,
id="without_start_token_complete_reasoning_stream",
),
pytest.param(
False,
WITH_THINK,
id="with_think",
),
pytest.param(
True,
WITH_THINK_STREAM,
id="with_think_stream",
),
pytest.param(
False,
WITHOUT_THINK,
id="without_think",
),
pytest.param(
True,
WITHOUT_THINK_STREAM,
id="without_think_stream",
),
pytest.param(
False,
COMPLETE_REASONING,
id="complete_reasoning",
),
pytest.param(
True,
COMPLETE_REASONING,
id="complete_reasoning_stream",
),
pytest.param(
False,
MULTILINE_REASONING,
id="multiline_reasoning",
),
pytest.param(
True,
MULTILINE_REASONING,
id="multiline_reasoning_stream",
),
pytest.param(
False,
ONLY_OPEN_TAG,
id="only_open_tag",
),
pytest.param(
True,
ONLY_OPEN_TAG_STREAM,
id="only_open_tag_stream",
),
pytest.param(
False,
TRUNCATED_NO_START_TOKEN,
id="truncated_no_start_token",
),
pytest.param(
True,
TRUNCATED_NO_START_TOKEN_STREAM,
id="truncated_no_start_token_stream",
),
]
@pytest.mark.parametrize("streaming, param_dict", TEST_CASES)
def test_reasoning(
streaming: bool,
param_dict: dict,
qwen3_tokenizer,
):
output = qwen3_tokenizer.tokenize(param_dict["output"])
output_tokens: list[str] = [
qwen3_tokenizer.convert_tokens_to_string([token]) for token in output
]
parser: ReasoningParser = ReasoningParserManager.get_reasoning_parser(parser_name)(
qwen3_tokenizer
)
reasoning, content = run_reasoning_extraction(
parser, output_tokens, streaming=streaming
)
assert reasoning == param_dict["reasoning"]
assert content == param_dict["content"]
# Multi-token delta tests: simulate real-world streaming where a single
# delta can contain multiple tokens (e.g., speculative decoding).
MULTI_TOKEN_DELTA_CASES = [
pytest.param(
# <think> grouped with following text in one delta
["<think>This is a reasoning section", "</think>", "This is the rest"],
"This is a reasoning section",
"This is the rest",
id="start_token_grouped_with_text",
),
pytest.param(
# </think> grouped with following content in one delta
["reasoning section", "</think>This is the rest"],
"reasoning section",
"This is the rest",
id="end_token_grouped_with_content",
),
pytest.param(
# <think> and </think> in the same delta, no content after
["<think>reasoning</think>"],
"reasoning",
None,
id="start_and_end_in_one_delta_no_content",
),
pytest.param(
# No start token, end grouped with content (Qwen3.5 style)
["reasoning section", "</think>content"],
"reasoning section",
"content",
id="no_start_end_grouped_with_content",
),
]
@pytest.mark.parametrize(
"deltas, expected_reasoning, expected_content", MULTI_TOKEN_DELTA_CASES
)
def test_reasoning_streaming_multi_token_deltas(
deltas: list[str],
expected_reasoning: str | None,
expected_content: str | None,
qwen3_tokenizer,
):
"""Test that multi-token deltas don't leak <think> into reasoning."""
parser: ReasoningParser = ReasoningParserManager.get_reasoning_parser(parser_name)(
qwen3_tokenizer
)
reconstructor: StreamingReasoningReconstructor = run_reasoning_extraction_streaming(
parser, deltas
)
assert reconstructor.reasoning == expected_reasoning
assert (reconstructor.other_content or None) == expected_content
# --- Tests for enable_thinking=False (thinking explicitly disabled) ---
THINKING_DISABLED_CASES = [
pytest.param(
"This is plain content",
None,
"This is plain content",
id="thinking_disabled_plain_content",
),
pytest.param(
"Some output without think tokens",
None,
"Some output without think tokens",
id="thinking_disabled_no_think_tokens",
),
]
@pytest.mark.parametrize(
"output, expected_reasoning, expected_content", THINKING_DISABLED_CASES
)
def test_reasoning_thinking_disabled(
output: str,
expected_reasoning: str | None,
expected_content: str | None,
qwen3_tokenizer,
):
"""When enable_thinking=False, output without </think> is all content."""
parser: ReasoningParser = ReasoningParserManager.get_reasoning_parser(parser_name)(
qwen3_tokenizer,
chat_template_kwargs={"enable_thinking": False},
)
reasoning, content = parser.extract_reasoning(
model_output=output,
request=ChatCompletionRequest(messages=[], model="test-model"),
)
assert reasoning == expected_reasoning
assert content == expected_content
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/reasoning/test_qwen3_reasoning_parser.py",
"license": "Apache License 2.0",
"lines": 286,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/reasoning/qwen3_reasoning_parser.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Sequence
from vllm.entrypoints.openai.chat_completion.protocol import (
ChatCompletionRequest,
)
from vllm.entrypoints.openai.engine.protocol import DeltaMessage
from vllm.entrypoints.openai.responses.protocol import (
ResponsesRequest,
)
from vllm.reasoning.basic_parsers import BaseThinkingReasoningParser
from vllm.tokenizers import TokenizerLike
class Qwen3ReasoningParser(BaseThinkingReasoningParser):
"""
Reasoning parser for the Qwen3/Qwen3.5 model family.
The Qwen3 model family uses <think>...</think> tokens to denote reasoning
text. Starting with Qwen3.5, the chat template places <think> in the
prompt so only </think> appears in the generated output. The model
provides a strict switch to disable reasoning output via the
'enable_thinking=False' parameter.
When thinking is disabled, the template places <think>\\n\\n</think>\\n\\n
in the prompt. The serving layer detects this via prompt_is_reasoning_end
and routes deltas as content without calling the streaming parser.
NOTE: Models up to the 2507 release (e.g., Qwen/Qwen3-235B-A22B-Instruct-2507)
use an older chat template where the model generates <think> itself.
This parser handles both styles: if <think> appears in the generated output
it is stripped before extraction (non-streaming) or skipped (streaming).
"""
def __init__(self, tokenizer: TokenizerLike, *args, **kwargs):
super().__init__(tokenizer, *args, **kwargs)
chat_kwargs = kwargs.get("chat_template_kwargs", {}) or {}
# Qwen3 defaults to thinking enabled; only treat output as
# pure content when the user explicitly disables it.
self.thinking_enabled = chat_kwargs.get("enable_thinking", True)
@property
def start_token(self) -> str:
"""The token that starts reasoning content."""
return "<think>"
@property
def end_token(self) -> str:
"""The token that ends reasoning content."""
return "</think>"
def extract_reasoning(
self, model_output: str, request: ChatCompletionRequest | ResponsesRequest
) -> tuple[str | None, str | None]:
"""
Extract reasoning content from the model output.
The <think> token is placed in the prompt by the chat template,
so typically only </think> appears in the generated output.
If <think> is present (e.g. from a different template), it is
stripped before extraction.
When thinking is explicitly disabled and no </think> appears,
returns (None, model_output) — all output is content.
Otherwise (thinking enabled, default), a missing </think> means
the output was truncated and everything is reasoning:
returns (model_output, None).
Returns:
tuple[Optional[str], Optional[str]]: reasoning content and content
"""
# Strip <think> if present in the generated output.
model_output_parts = model_output.partition(self.start_token)
model_output = (
model_output_parts[2] if model_output_parts[1] else model_output_parts[0]
)
if self.end_token not in model_output:
if not self.thinking_enabled:
# Thinking explicitly disabled — treat everything as content.
return None, model_output
# Thinking enabled but no </think>: output was truncated.
# Everything generated so far is reasoning.
return model_output, None
# Extract reasoning content from the model output.
reasoning, _, content = model_output.partition(self.end_token)
final_content = content or None
return reasoning, final_content
def extract_reasoning_streaming(
self,
previous_text: str,
current_text: str,
delta_text: str,
previous_token_ids: Sequence[int],
current_token_ids: Sequence[int],
delta_token_ids: Sequence[int],
) -> DeltaMessage | None:
"""
Extract reasoning content from a streaming delta.
Since <think> is placed in the prompt by the chat template, all
generated tokens before </think> are reasoning and tokens after
are content.
NOTE: When thinking is disabled, no think tokens appear in the
generated output. The serving layer detects this via
prompt_is_reasoning_end and routes deltas as content without
calling this method.
"""
# Strip <think> from delta if present (old template / edge case
# where the model generates <think> itself).
if self.start_token_id in delta_token_ids:
start_idx = delta_text.find(self.start_token)
if start_idx >= 0:
delta_text = delta_text[start_idx + len(self.start_token) :]
if self.end_token_id in delta_token_ids:
# End token in this delta: split reasoning from content.
end_index = delta_text.find(self.end_token)
if end_index >= 0:
reasoning = delta_text[:end_index]
content = delta_text[end_index + len(self.end_token) :]
if not reasoning and not content:
return None
return DeltaMessage(
reasoning=reasoning if reasoning else None,
content=content if content else None,
)
# end_token_id in IDs but not in text (already stripped)
return None
# No end token in this delta.
if not delta_text:
# Nothing left after stripping start token.
return None
elif self.end_token_id in previous_token_ids:
# End token already passed: everything is content now.
return DeltaMessage(content=delta_text)
else:
# No end token yet: still in reasoning phase.
return DeltaMessage(reasoning=delta_text)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/reasoning/qwen3_reasoning_parser.py",
"license": "Apache License 2.0",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/models/multimodal/processing/test_minimax_vl_01.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
from PIL import Image
from vllm.multimodal import MULTIMODAL_REGISTRY
from vllm.multimodal.parse import ImageSize
from vllm.multimodal.processing import BaseMultiModalProcessor
from ....conftest import ImageTestAssets
from ...utils import build_model_context
@pytest.mark.parametrize("model_id", ["MiniMaxAI/MiniMax-VL-01"])
@pytest.mark.parametrize("num_imgs", [1, 2])
def test_processor_override(
image_assets: ImageTestAssets,
model_id: str,
num_imgs: int,
):
ctx = build_model_context(
model_id,
mm_processor_kwargs=None,
limit_mm_per_prompt={"image": num_imgs},
)
processor = MULTIMODAL_REGISTRY.create_processor(ctx.model_config)
prompt = "<image>" * num_imgs
image = Image.new("RGB", size=(364, 364))
mm_data = {"image": [image] * num_imgs}
processed_inputs = processor(
prompt,
mm_items=processor.info.parse_mm_data(mm_data),
hf_processor_mm_kwargs={},
)
image_placeholders = processed_inputs["mm_placeholders"]["image"]
assert len(image_placeholders) == num_imgs
def _validate_image_prompt_replacements_one(
processor: BaseMultiModalProcessor,
num_imgs: int,
failed_size_excs: list[tuple[ImageSize, Exception]],
image_size: ImageSize,
) -> None:
prompt = "<image>" * num_imgs
image = Image.new("RGB", size=image_size)
mm_data = {"image": [image] * num_imgs}
try:
processed_inputs = processor(
prompt,
mm_items=processor.info.parse_mm_data(mm_data),
hf_processor_mm_kwargs={},
)
image_placeholders = processed_inputs["mm_placeholders"]["image"]
assert len(image_placeholders) == num_imgs
except Exception as exc:
failed_size_excs.append((image_size, exc))
def _test_image_prompt_replacements(
processor,
*,
num_imgs: int,
image_sizes: list[ImageSize],
) -> None:
failed_size_excs = list[tuple[ImageSize, Exception]]()
for size in image_sizes:
_validate_image_prompt_replacements_one(
processor, num_imgs, failed_size_excs, size
)
if failed_size_excs:
msg = "Found failing image sizes:" + "\n========\n".join(
f"[{size}]\n{exc}" for size, exc in failed_size_excs
)
raise AssertionError(msg)
@pytest.mark.parametrize("model_id", ["MiniMaxAI/MiniMax-VL-01"])
@pytest.mark.parametrize("num_imgs", [1, 2])
def test_processor_prompt_replacements_regression(model_id, num_imgs):
ctx = build_model_context(
model_id,
mm_processor_kwargs=None,
limit_mm_per_prompt={"image": num_imgs},
)
processor = MULTIMODAL_REGISTRY.create_processor(ctx.model_config)
image_ratios = [
(171, 152),
(184, 161),
(198, 176),
(333, 296),
(369, 328),
(488, 183),
(2560, 1669),
]
image_sizes = [
size for w, h in image_ratios for size in [ImageSize(w, h), ImageSize(h, w)]
]
_test_image_prompt_replacements(
processor,
num_imgs=num_imgs,
image_sizes=image_sizes,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/models/multimodal/processing/test_minimax_vl_01.py",
"license": "Apache License 2.0",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_executor/models/minimax_vl_01.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Iterable, Mapping
from typing import Annotated, Literal, TypeAlias
import torch
import torch.nn as nn
from transformers import BatchFeature, PretrainedConfig
from transformers.models.llava_next.modeling_llava_next import (
get_anyres_image_grid_shape,
unpad_image,
)
from vllm.config import VllmConfig
from vllm.model_executor.layers.activation import get_act_fn
from vllm.model_executor.layers.linear import ColumnParallelLinear, RowParallelLinear
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.multimodal import MULTIMODAL_REGISTRY
from vllm.multimodal.inputs import MultiModalFieldConfig
from vllm.sequence import IntermediateTensors
from vllm.utils.tensor_schema import TensorSchema, TensorShape
from .clip import CLIPVisionModel
from .interfaces import MultiModalEmbeddings, SupportsMultiModal, SupportsPP
from .llava import (
BaseLlavaMultiModalProcessor,
LlavaDummyInputsBuilder,
init_vision_tower_for_llava,
)
from .llava_next import LlavaNextProcessingInfo
from .pixtral import PixtralHFVisionModel
from .siglip import SiglipVisionModel
from .utils import (
AutoWeightsLoader,
init_vllm_registered_model,
maybe_prefix,
)
class MiniMaxVL01ImagePixelInputs(TensorSchema):
"""
Dimensions:
- bn: Batch size * number of images
- np: Number of patches + 1
- c: Number of channels (3)
- h: Height
- w: Width
Note that `num_patches` may be different per batch and image,
in which case the data is passed as a list instead of a batched tensor.
"""
type: Literal["pixel_values"] = "pixel_values"
pixel_values: Annotated[
torch.Tensor | list[torch.Tensor],
TensorShape("bn", "np", 3, "h", "w", dynamic_dims={"np", "h", "w"}),
]
image_sizes: Annotated[torch.Tensor | None, TensorShape("bn", 2)]
# This should be in `(height, width)` format.
class MiniMaxVL01ImageEmbeddingInputs(TensorSchema):
"""
Dimensions:
- bn: Batch size * number of images
- ifs: Image feature size
- hs: Hidden size (must match language model backbone)
"""
type: Literal["image_embeds"] = "image_embeds"
data: Annotated[torch.Tensor, TensorShape("bn", "ifs", "hs")]
MiniMaxVL01ImageInputs: TypeAlias = (
MiniMaxVL01ImagePixelInputs | MiniMaxVL01ImageEmbeddingInputs
)
class MiniMaxVL01MultiModalProjector(nn.Module):
def __init__(
self,
vision_hidden_size: int,
text_hidden_size: int,
projector_hidden_act: str,
multimodal_projector_bias: bool,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
):
super().__init__()
self.linear_1 = ColumnParallelLinear(
vision_hidden_size,
text_hidden_size,
bias=multimodal_projector_bias,
quant_config=quant_config,
prefix=f"{prefix}.linear_1",
)
self.act = get_act_fn(projector_hidden_act)
self.linear_2 = RowParallelLinear(
text_hidden_size,
text_hidden_size,
bias=multimodal_projector_bias,
quant_config=quant_config,
prefix=f"{prefix}.linear_2",
)
def forward(self, image_features: torch.Tensor) -> torch.Tensor:
hidden_states, _ = self.linear_1(image_features)
hidden_states = self.act(hidden_states)
hidden_states, _ = self.linear_2(hidden_states)
return hidden_states
class MiniMaxVL01DummyInputsBuilder(LlavaDummyInputsBuilder):
pass
class MiniMaxVL01ProcessingInfo(LlavaNextProcessingInfo):
def get_hf_config(self): # Need to override the config type
return self.ctx.get_hf_config(PretrainedConfig)
def get_hf_processor(self, **kwargs: object):
hf_processor = self.ctx.get_hf_processor(**kwargs)
image_processor = hf_processor.image_processor
image_processor.anyres_preprocess = image_processor.anyres_for_vllm_preprocess
return hf_processor
def get_supported_mm_limits(self) -> Mapping[str, int | None]:
return {"image": None}
class MiniMaxVL01MultiModalProcessor(
BaseLlavaMultiModalProcessor[MiniMaxVL01ProcessingInfo]
):
def _call_hf_processor(
self,
prompt: str,
mm_data: Mapping[str, object],
mm_kwargs: Mapping[str, object],
tok_kwargs: Mapping[str, object],
) -> BatchFeature:
processed_outputs = super()._call_hf_processor(
prompt=prompt,
mm_data=mm_data,
mm_kwargs=mm_kwargs,
tok_kwargs=tok_kwargs,
)
pixel_values = processed_outputs.get("pixel_values")
if pixel_values is not None:
# Avoid padding since we need the output for each image to be
# independent of other images for the cache to work correctly
image_sizes = processed_outputs["image_sizes"]
assert len(pixel_values) == len(image_sizes)
processed_outputs["pixel_values"] = [
p[:, :h, :w] for p, (h, w) in zip(pixel_values, image_sizes)
]
return processed_outputs
def _get_mm_fields_config(
self,
hf_inputs: BatchFeature,
hf_processor_mm_kwargs: Mapping[str, object],
) -> Mapping[str, MultiModalFieldConfig]:
return {
"pixel_values": MultiModalFieldConfig.batched("image"),
"image_sizes": MultiModalFieldConfig.batched("image"),
"image_embeds": MultiModalFieldConfig.batched("image"),
}
@MULTIMODAL_REGISTRY.register_processor(
MiniMaxVL01MultiModalProcessor,
info=MiniMaxVL01ProcessingInfo,
dummy_inputs=MiniMaxVL01DummyInputsBuilder,
)
class MiniMaxVL01ForConditionalGeneration(nn.Module, SupportsMultiModal, SupportsPP):
packed_modules_mapping = {
"qkv_proj": ["q_proj", "k_proj", "v_proj"],
"gate_up_proj": ["gate_proj", "up_proj"],
}
@classmethod
def get_placeholder_str(cls, modality: str, i: int) -> str | None:
if modality.startswith("image"):
return "<image>"
raise ValueError("Only image modality is supported")
def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None:
super().__init__()
config = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
multimodal_config = vllm_config.model_config.multimodal_config
self.config = config
self.multimodal_config = multimodal_config
with self._mark_tower_model(vllm_config, "image"):
self.vision_tower = init_vision_tower_for_llava(
config,
quant_config=quant_config,
require_post_norm=False,
prefix=maybe_prefix(prefix, "vision_tower"),
)
self.multi_modal_projector = MiniMaxVL01MultiModalProjector(
vision_hidden_size=config.vision_config.hidden_size,
text_hidden_size=config.text_config.hidden_size,
projector_hidden_act=config.projector_hidden_act,
multimodal_projector_bias=True,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "multi_modal_projector"),
)
self.image_newline = nn.Parameter(
torch.empty(config.text_config.hidden_size)
)
with self._mark_language_model(vllm_config):
self.language_model = init_vllm_registered_model(
vllm_config=vllm_config,
hf_config=config.text_config,
prefix=maybe_prefix(prefix, "language_model"),
)
self.vision_feature_layer = config.vision_feature_layer
self.vocab_size = config.text_config.vocab_size
self.pad_token_id = -1
if self.config.text_config.pad_token_id is not None:
self.pad_token_id = self.config.text_config.pad_token_id
self.make_empty_intermediate_tensors = (
self.language_model.make_empty_intermediate_tensors
)
def _image_pixels_to_features(
self,
vision_tower: CLIPVisionModel | SiglipVisionModel | PixtralHFVisionModel,
pixel_values: torch.Tensor | list[torch.Tensor],
) -> torch.Tensor | tuple[torch.Tensor, ...]:
# NOTE: we skip the step to select the vision feature layer since
# this is already done inside the vision tower
feature_select_strategy = self.config.vision_feature_select_strategy
return tuple(
vision_tower(p, feature_select_strategy=feature_select_strategy)
for p in pixel_values
)
# adapted from https://huggingface.co/MiniMaxAI/MiniMax-VL-01/blob/main/modeling_minimax_vl_01.py#L616-L631
def pack_image_features(
self, image_features: list[torch.Tensor], image_sizes: torch.Tensor
):
new_image_features = []
for image_idx, image_feature in enumerate(image_features):
if image_feature.shape[0] > 1:
base_image_feature = image_feature[0]
image_feature = image_feature[1:]
height = width = (
self.config.vision_config.image_size
// self.config.vision_config.patch_size
)
if height * width != base_image_feature.shape[0]:
raise ValueError(
"The number of patches is not consistent with the image size."
)
num_patch_height, num_patch_width = get_anyres_image_grid_shape(
image_sizes[image_idx],
self.config.image_grid_pinpoints,
self.config.vision_config.image_size,
)
image_feature = image_feature.view(
num_patch_height, num_patch_width, height, width, -1
)
image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous()
image_feature = image_feature.flatten(1, 2).flatten(2, 3)
image_feature = unpad_image(image_feature, image_sizes[image_idx])
image_feature = torch.cat(
(
image_feature,
self.image_newline[:, None, None]
.expand(*image_feature.shape[:-1], 1)
.to(image_feature.dtype),
),
dim=-1,
)
image_feature = image_feature.flatten(1, 2).transpose(0, 1)
image_feature = torch.cat((base_image_feature, image_feature), dim=0)
else:
image_feature = image_feature[0]
image_feature = torch.cat(
(image_feature, self.image_newline[None].to(image_feature)), dim=0
)
new_image_features.append(image_feature)
return new_image_features
def _process_image_pixels(
self,
inputs: MiniMaxVL01ImagePixelInputs,
) -> torch.Tensor | tuple[torch.Tensor, ...]:
pixel_values = inputs["pixel_values"]
return self._image_pixels_to_features(self.vision_tower, pixel_values)
def _process_image_input(
self,
image_input: MiniMaxVL01ImageInputs,
) -> torch.Tensor | tuple[torch.Tensor, ...]:
if image_input["type"] == "image_embeds":
return image_input["data"]
image_features = self._process_image_pixels(image_input)
if isinstance(image_features, torch.Tensor):
return self.multi_modal_projector(image_features)
feature_sizes = [image_feature.shape[0] for image_feature in image_features]
image_embeds = self.multi_modal_projector(torch.cat(image_features))
image_embeds = torch.split(image_embeds, feature_sizes)
image_sizes = image_input.get("image_sizes")
return self.pack_image_features(image_embeds, image_sizes)
def _parse_and_validate_image_input(
self, **kwargs: object
) -> MiniMaxVL01ImageInputs | None:
pixel_values = kwargs.pop("pixel_values", None)
image_sizes = kwargs.pop("image_sizes", None)
image_embeds = kwargs.pop("image_embeds", None)
if pixel_values is None and image_embeds is None:
return None
if pixel_values is not None and image_sizes is not None:
return MiniMaxVL01ImagePixelInputs(
type="pixel_values",
pixel_values=pixel_values,
image_sizes=image_sizes,
)
if image_embeds is not None:
return MiniMaxVL01ImageEmbeddingInputs(
type="image_embeds",
data=image_embeds,
)
raise AssertionError("This line should be unreachable.")
def embed_multimodal(self, **kwargs: object) -> MultiModalEmbeddings:
image_input = self._parse_and_validate_image_input(**kwargs)
if image_input is None:
return []
return self._process_image_input(image_input)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
**kwargs: object,
) -> torch.Tensor | IntermediateTensors:
if intermediate_tensors is not None:
inputs_embeds = None
hidden_states = self.language_model.model(
input_ids, positions, intermediate_tensors, inputs_embeds=inputs_embeds
)
return hidden_states
def compute_logits(
self,
hidden_states: torch.Tensor,
) -> torch.Tensor | None:
return self.language_model.compute_logits(hidden_states)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
loader = AutoWeightsLoader(self)
return loader.load_weights(weights)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/minimax_vl_01.py",
"license": "Apache License 2.0",
"lines": 322,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vnpy/vnpy:tests/test_alpha101.py | import pytest
import polars as pl
import numpy as np
from datetime import datetime, timedelta
from vnpy.alpha.dataset.utility import calculate_by_expression
def create_test_df(n_symbols: int = 50, n_days: int = 300) -> pl.DataFrame:
"""
Create test DataFrame with extreme values.
Columns: datetime, vt_symbol, open, high, low, close, volume, vwap
"""
np.random.seed(42)
symbols = [f"SH.{600000 + i}" for i in range(n_symbols)]
dates = [datetime(2023, 1, 1) + timedelta(days=i) for i in range(n_days)]
data = []
for sym_idx, symbol in enumerate(symbols):
# Base price for each symbol
base_price = 10 + np.random.rand() * 90
for day_idx, dt in enumerate(dates):
# Generate random prices
change = (np.random.rand() - 0.5) * 0.1
close = base_price * (1 + change)
high = close * (1 + np.random.rand() * 0.03)
low = close * (1 - np.random.rand() * 0.03)
open_price = low + (high - low) * np.random.rand()
volume = float(np.random.randint(100000, 10000000))
vwap = (high + low + close) / 3
# Insert extreme values
if day_idx == 50 + sym_idx % 10:
close = np.nan
elif day_idx == 100 + sym_idx % 10:
volume = 0.0
elif day_idx == 150 + sym_idx % 10:
close = 1e-10
data.append({
"datetime": dt,
"vt_symbol": symbol,
"open": open_price,
"high": high,
"low": low,
"close": close,
"volume": volume,
"vwap": vwap
})
if not np.isnan(close):
base_price = close
return pl.DataFrame(data)
@pytest.fixture(scope="module")
def test_df() -> pl.DataFrame:
"""Create test data: 50 symbols, 300 days, with extreme values."""
return create_test_df(n_symbols=50, n_days=300)
returns_expr = "(close / ts_delay(close, 1) - 1)"
class TestAlpha101:
"""Test Alpha101 Factors"""
def test_alpha1(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#1"""
expr = f"(cs_rank(ts_argmax(pow1(quesval(0, {returns_expr}, close, ts_std({returns_expr}, 20)), 2.0), 5)) - 0.5)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha2(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#2"""
expr = "(-1) * ts_corr(cs_rank(ts_delta(log(volume), 2)), cs_rank((close - open) / open), 6)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha3(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#3"""
expr = "ts_corr(cs_rank(open), cs_rank(volume), 10) * -1"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha4(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#4"""
expr = "-1 * ts_rank(cs_rank(low), 9)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha5(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#5"""
expr = "cs_rank((open - (ts_sum(vwap, 10) / 10))) * (-1 * abs(cs_rank((close - vwap))))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha6(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#6"""
expr = "(-1) * ts_corr(open, volume, 10)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha7(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#7"""
expr = "quesval2(ts_mean(volume, 20), volume, (-1 * ts_rank(abs(close - ts_delay(close, 7)), 60)) * sign(ts_delta(close, 7)), -1)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha8(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#8"""
expr = f"-1 * cs_rank(((ts_sum(open, 5) * ts_sum({returns_expr}, 5)) - ts_delay((ts_sum(open, 5) * ts_sum({returns_expr}, 5)), 10)))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha9(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#9"""
expr = "quesval(0, ts_min(ts_delta(close, 1), 5), ts_delta(close, 1), quesval(0, ts_max(ts_delta(close, 1), 5), (-1 * ts_delta(close, 1)), ts_delta(close, 1)))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha10(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#10"""
expr = "cs_rank(quesval(0, ts_min(ts_delta(close, 1), 4), ts_delta(close, 1), quesval(0, ts_max(ts_delta(close, 1), 4), (-1 * ts_delta(close, 1)), ts_delta(close, 1))))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha11(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#11"""
expr = "(cs_rank(ts_max(vwap - close, 3)) + cs_rank(ts_min(vwap - close, 3))) * cs_rank(ts_delta(volume, 3))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha12(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#12"""
expr = "sign(ts_delta(volume, 1)) * (-1 * ts_delta(close, 1))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha13(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#13"""
expr = "-1 * cs_rank(ts_cov(cs_rank(close), cs_rank(volume), 5))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha14(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#14"""
expr = f"(-1 * cs_rank(({returns_expr}) - ts_delay({returns_expr}, 3))) * ts_corr(open, volume, 10)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha15(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#15"""
expr = "-1 * ts_sum(cs_rank(ts_corr(cs_rank(high), cs_rank(volume), 3)), 3)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha16(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#16"""
expr = "-1 * cs_rank(ts_cov(cs_rank(high), cs_rank(volume), 5))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha17(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#17"""
expr = "(-1 * cs_rank(ts_rank(close, 10))) * cs_rank(close - 2 * ts_delay(close, 1) + ts_delay(close, 2)) * cs_rank(ts_rank(volume / ts_mean(volume, 20), 5))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha18(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#18"""
expr = "-1 * cs_rank((ts_std(abs(close - open), 5) + (close - open)) + ts_corr(close, open, 10))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha19(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#19"""
expr = f"(-1 * sign(ts_delta(close, 7) + (close - ts_delay(close, 7)))) * (cs_rank(ts_sum({returns_expr}, 250) + 1) + 1)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha20(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#20"""
expr = "(-1 * cs_rank(open - ts_delay(high, 1))) * cs_rank(open - ts_delay(close, 1)) * cs_rank(open - ts_delay(low, 1))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha21(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#21"""
expr = "quesval2((ts_mean(close, 8) + ts_std(close, 8)), ts_mean(close, 2), -1, quesval2(ts_mean(close, 2), (ts_mean(close, 8) - ts_std(close, 8)), 1, quesval(1, (volume / ts_mean(volume, 20)), 1, -1)))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha22(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#22"""
expr = "-1 * ts_delta(ts_corr(high, volume, 5), 5) * cs_rank(ts_std(close, 20))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha23(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#23"""
expr = "quesval2(ts_mean(high, 20), high, -1 * ts_delta(high, 2), 0)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha24(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#24"""
expr = "quesval(0.05, ts_delta(ts_sum(close, 100) / 100, 100) / ts_delay(close, 100), (-1 * ts_delta(close, 3)), (-1 * (close - ts_min(close, 100))))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha25(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#25"""
expr = f"cs_rank( (-1 * {returns_expr}) * ts_mean(volume, 20) * vwap * (high - close) )"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha26(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#26"""
expr = "-1 * ts_max(ts_corr(ts_rank(volume, 5), ts_rank(high, 5), 5), 3)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha27(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#27"""
expr = "quesval(0.5, cs_rank(ts_mean(ts_corr(cs_rank(volume), cs_rank(vwap), 6), 2)), -1, 1)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha28(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#28"""
expr = "cs_scale(ts_corr(ts_mean(volume, 20), low, 5) + (high + low) / 2 - close)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha29(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#29"""
expr = f"ts_min(ts_product(cs_rank(cs_rank(cs_scale(log(ts_sum(ts_min(cs_rank(cs_rank((-1 * cs_rank(ts_delta((close - 1), 5))))), 2), 1))))), 1), 5) + ts_rank(ts_delay((-1 * {returns_expr}), 6), 5)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha30(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#30"""
expr = "((cs_rank(sign(close - ts_delay(close, 1)) + sign(ts_delay(close, 1) - ts_delay(close, 2)) + sign(ts_delay(close, 2) - ts_delay(close, 3))) * -1 + 1) * ts_sum(volume, 5)) / ts_sum(volume, 20)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha31(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#31"""
expr = "(cs_rank(cs_rank(cs_rank(ts_decay_linear((-1) * cs_rank(cs_rank(ts_delta(close, 10))), 10)))) + cs_rank((-1) * ts_delta(close, 3))) + sign(cs_scale(ts_corr(ts_mean(volume, 20), low, 12)))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha32(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#32"""
expr = "cs_scale((ts_sum(close, 7) / 7 - close)) + (20 * cs_scale(ts_corr(vwap, ts_delay(close, 5), 230)))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha33(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#33"""
expr = "cs_rank((-1) * (open / close * -1 + 1))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha34(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#34"""
expr = f"cs_rank((cs_rank(ts_std({returns_expr}, 2) / ts_std({returns_expr}, 5)) * -1 + 1) + (cs_rank(ts_delta(close, 1)) * -1 + 1))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha35(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#35"""
expr = f"(ts_rank(volume, 32) * (ts_rank((close + high - low), 16) * -1 + 1)) * (ts_rank({returns_expr}, 32) * -1 + 1)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha36(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#36"""
expr = f"((((2.21 * cs_rank(ts_corr((close - open), ts_delay(volume, 1), 15))) + (0.7 * cs_rank((open - close)))) + (0.73 * cs_rank(ts_rank(ts_delay((-1) * {returns_expr}, 6), 5)))) + cs_rank(abs(ts_corr(vwap, ts_mean(volume, 20), 6)))) + (0.6 * cs_rank(((ts_sum(close, 200) / 200 - open) * (close - open))))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha37(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#37"""
expr = "cs_rank(ts_corr(ts_delay((open - close), 1), close, 200)) + cs_rank((open - close))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha38(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#38"""
expr = "((-1) * cs_rank(ts_rank(close, 10))) * cs_rank((close / open))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha39(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#39"""
expr = f"((-1) * cs_rank((ts_delta(close, 7) * (cs_rank(ts_decay_linear((volume / ts_mean(volume, 20)), 9)) * -1 + 1)))) * (cs_rank(ts_sum({returns_expr}, 250)) + 1)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha40(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#40"""
expr = "((-1) * cs_rank(ts_std(high, 10))) * ts_corr(high, volume, 10)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha41(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#41"""
expr = "pow1((high * low), 0.5) - vwap"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha42(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#42"""
expr = "cs_rank((vwap - close)) / cs_rank((vwap + close))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha43(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#43"""
expr = "ts_rank((volume / ts_mean(volume, 20)), 20) * ts_rank((-1) * ts_delta(close, 7), 8)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha44(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#44"""
expr = "(-1) * ts_corr(high, cs_rank(volume), 5)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha45(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#45"""
expr = "(-1) * cs_rank(ts_sum(ts_delay(close, 5), 20) / 20) * ts_corr(close, volume, 2) * cs_rank(ts_corr(ts_sum(close, 5), ts_sum(close, 20), 2))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha46(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#46"""
expr = "quesval(0.25, ((ts_delay(close, 20) - ts_delay(close, 10)) / 10 - (ts_delay(close, 10) - close) / 10), -1, quesval(0, ((ts_delay(close, 20) - ts_delay(close, 10)) / 10 - (ts_delay(close, 10) - close) / 10), (-1) * (close - ts_delay(close, 1)), 1))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha47(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#47"""
expr = "((cs_rank(pow1(close, -1)) * volume / ts_mean(volume, 20)) * (high * cs_rank(high - close)) / (ts_sum(high, 5) / 5)) - cs_rank(vwap - ts_delay(vwap, 5))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha48(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#48"""
expr = "(ts_corr(ts_delta(close, 1), ts_delta(ts_delay(close, 1), 1), 250) * ts_delta(close, 1)) / close / ts_sum(pow1((ts_delta(close, 1) / ts_delay(close, 1)), 2), 250)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha49(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#49"""
expr = "quesval(-0.1, ((ts_delay(close, 20) - ts_delay(close, 10)) / 10 - (ts_delay(close, 10) - close) / 10), (-1) * (close - ts_delay(close, 1)), 1)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha50(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#50"""
expr = "(-1) * ts_max(cs_rank(ts_corr(cs_rank(volume), cs_rank(vwap), 5)), 5)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha51(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#51"""
expr = "quesval(-0.05, ((ts_delay(close, 20) - ts_delay(close, 10)) / 10 - (ts_delay(close, 10) - close) / 10), (-1) * (close - ts_delay(close, 1)), 1)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha52(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#52"""
expr = f"(((-1) * ts_min(low, 5)) + ts_delay(ts_min(low, 5), 5)) * cs_rank((ts_sum({returns_expr}, 240) - ts_sum({returns_expr}, 20)) / 220) * ts_rank(volume, 5)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha53(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#53"""
expr = "(-1) * ts_delta(((close - low) - (high - close)) / (close - low), 9)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha54(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#54"""
expr = "((-1) * ((low - close) * pow1(open, 5))) / ((low - high) * pow1(close, 5))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha55(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#55"""
expr = "(-1) * ts_corr(cs_rank((close - ts_min(low, 12)) / (ts_max(high, 12) - ts_min(low, 12))), cs_rank(volume), 6)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
# Alpha56 skipped (missing cap/market_cap field)
def test_alpha57(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#57"""
expr = "-1 * ((close - vwap) / ts_decay_linear(cs_rank(ts_argmax(close, 30)), 2))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha58(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#58"""
expr = "(-1) * ts_rank(ts_decay_linear(ts_corr(vwap, volume, 4), 8), 6)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha59(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#59"""
expr = "(-1) * ts_rank(ts_decay_linear(ts_corr(((vwap * 0.728317) + (vwap * (1 - 0.728317))), volume, 4), 16), 8)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha60(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#60"""
expr = "- 1 * ((2 * cs_scale(cs_rank((((close - low) - (high - close)) / (high - low)) * volume))) - cs_scale(cs_rank(ts_argmax(close, 10))))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha61(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#61"""
expr = "quesval2(cs_rank(vwap - ts_min(vwap, 16)), cs_rank(ts_corr(vwap, ts_mean(volume, 180), 18)), 1, 0)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha62(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#62"""
expr = "cast_to_int(cs_rank(ts_corr(vwap, ts_sum(ts_mean(volume, 20), 22), 10)) < cs_rank(cast_to_int((cs_rank(open) + cs_rank(open)) < (cs_rank((high + low) / 2) + cs_rank(high))))) * -1"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha63(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#63"""
expr = "(cs_rank(ts_decay_linear(ts_delta(close, 2), 8)) - cs_rank(ts_decay_linear(ts_corr(vwap * 0.318108 + open * 0.681892, ts_sum(ts_mean(volume, 180), 37), 14), 12))) * -1"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha64(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#64"""
expr = "cast_to_int(cs_rank(ts_corr(ts_sum(((open * 0.178404) + (low * (1 - 0.178404))), 13), ts_sum(ts_mean(volume, 120), 13), 17)) < cs_rank(ts_delta((((high + low) / 2 * 0.178404) + (vwap * (1 - 0.178404))), 4))) * -1"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha65(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#65"""
expr = "cast_to_int(cs_rank(ts_corr(((open * 0.00817205) + (vwap * (1 - 0.00817205))), ts_sum(ts_mean(volume, 60), 9), 6)) < cs_rank(open - ts_min(open, 14))) * -1"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha66(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#66"""
expr = "(cs_rank(ts_decay_linear(ts_delta(vwap, 4), 7)) + ts_rank(ts_decay_linear((((low * 0.96633) + (low * (1 - 0.96633))) - vwap) / (open - ((high + low) / 2)), 11), 7)) * -1"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha67(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#67"""
expr = "pow2(cs_rank(high - ts_min(high, 2)), cs_rank(ts_corr(vwap, ts_mean(volume, 20), 6))) * -1"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha68(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#68"""
expr = "cast_to_int(ts_rank(ts_corr(cs_rank(high), cs_rank(ts_mean(volume, 15)), 9), 14) < cs_rank(ts_delta((close * 0.518371 + low * (1 - 0.518371)), 1))) * -1"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha69(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#69"""
expr = "pow2(cs_rank(ts_max(ts_delta(vwap, 3), 5)), ts_rank(ts_corr(close * 0.490655 + vwap * 0.509345, ts_mean(volume, 20), 5), 9)) * -1"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha70(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#70"""
expr = "pow2(cs_rank(ts_delta(vwap, 1)), ts_rank(ts_corr(close, ts_mean(volume, 50), 18), 18)) * -1"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha71(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#71"""
expr = "ts_greater(ts_rank(ts_decay_linear(ts_corr(ts_rank(close, 3), ts_rank(ts_mean(volume, 180), 12), 18), 4), 16), ts_rank(ts_decay_linear(pow1(cs_rank((low + open) - (vwap + vwap)), 2), 16), 4))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha72(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#72"""
expr = "cs_rank(ts_decay_linear(ts_corr((high + low) / 2, ts_mean(volume, 40), 9), 10)) / cs_rank(ts_decay_linear(ts_corr(ts_rank(vwap, 4), ts_rank(volume, 19), 7), 3))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha73(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#73"""
expr = "ts_greater(cs_rank(ts_decay_linear(ts_delta(vwap, 5), 3)), ts_rank(ts_decay_linear((ts_delta(open * 0.147155 + low * 0.852845, 2) / (open * 0.147155 + low * 0.852845)) * -1, 3), 17)) * -1"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha74(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#74"""
expr = "quesval2(cs_rank(ts_corr(close, ts_sum(ts_mean(volume, 30), 37), 15)), cs_rank(ts_corr(cs_rank(high * 0.0261661 + vwap * 0.9738339), cs_rank(volume), 11)), 1, 0) * -1"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha75(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#75"""
expr = "quesval2(cs_rank(ts_corr(vwap, volume, 4)), cs_rank(ts_corr(cs_rank(low), cs_rank(ts_mean(volume, 50)), 12)), 1, 0)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha76(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#76"""
expr = "ts_greater(cs_rank(ts_decay_linear(ts_delta(vwap, 1), 12)), ts_rank(ts_decay_linear(ts_rank(ts_corr(low, ts_mean(volume, 81), 8), 20), 17), 19)) * -1"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha77(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#77"""
expr = "ts_less(cs_rank(ts_decay_linear((((high + low) / 2 + high) - (vwap + high)), 20)), cs_rank(ts_decay_linear(ts_corr((high + low) / 2, ts_mean(volume, 40), 3), 6)))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha78(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#78"""
expr = "pow2(cs_rank(ts_corr(ts_sum((low * 0.352233) + (vwap * (1 - 0.352233)), 20), ts_sum(ts_mean(volume, 40), 20), 7)), cs_rank(ts_corr(cs_rank(vwap), cs_rank(volume), 6)))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha79(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#79"""
expr = "quesval2(cs_rank(ts_delta(close * 0.60733 + open * 0.39267, 1)), cs_rank(ts_corr(ts_rank(vwap, 4), ts_rank(ts_mean(volume, 150), 9), 15)), 1, 0)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha80(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#80"""
expr = "pow2(cs_rank(sign(ts_delta(open * 0.868128 + high * 0.131872, 4))), ts_rank(ts_corr(high, ts_mean(volume, 10), 5), 6)) * -1"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha81(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#81"""
expr = "quesval2(cs_rank(log(ts_product(cs_rank(pow1(cs_rank(ts_corr(vwap, ts_sum(ts_mean(volume, 10), 50), 8)), 4)), 15))), cs_rank(ts_corr(cs_rank(vwap), cs_rank(volume), 5)), 1, 0) * -1"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha82(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#82"""
expr = "ts_less(cs_rank(ts_decay_linear(ts_delta(open, 1), 15)), ts_rank(ts_decay_linear(ts_corr(volume, open, 17), 7), 13)) * -1"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha83(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#83"""
expr = "(cs_rank(ts_delay((high - low) / (ts_sum(close, 5) / 5), 2)) * cs_rank(cs_rank(volume))) / (((high - low) / (ts_sum(close, 5) / 5)) / (vwap - close))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha84(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#84"""
expr = "pow2(ts_rank(vwap - ts_max(vwap, 15), 21), ts_delta(close, 5))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha85(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#85"""
expr = "pow2(cs_rank(ts_corr(high * 0.876703 + close * 0.123297, ts_mean(volume, 30), 10)), cs_rank(ts_corr(ts_rank((high + low) / 2, 4), ts_rank(volume, 10), 7)))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha86(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#86"""
expr = "quesval2(ts_rank(ts_corr(close, ts_sum(ts_mean(volume, 20), 15), 6), 20), cs_rank((open + close) - (vwap + open)), 1, 0) * -1"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha87(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#87"""
expr = "ts_greater(cs_rank(ts_decay_linear(ts_delta(close * 0.369701 + vwap * 0.630299, 2), 3)), ts_rank(ts_decay_linear(abs(ts_corr(ts_mean(volume, 81), close, 13)), 5), 14)) * -1"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha88(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#88"""
expr = "ts_less(cs_rank(ts_decay_linear((cs_rank(open) + cs_rank(low)) - (cs_rank(high) + cs_rank(close)), 8)), ts_rank(ts_decay_linear(ts_corr(ts_rank(close, 8), ts_rank(ts_mean(volume, 60), 21), 8), 7), 3))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha89(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#89"""
expr = "(ts_rank(ts_decay_linear(ts_corr(low, ts_mean(volume, 10), 7), 6), 4) - ts_rank(ts_decay_linear(ts_delta(vwap, 3), 10), 15))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha90(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#90"""
expr = "pow2(cs_rank(close - ts_max(close, 5)), ts_rank(ts_corr(ts_mean(volume, 40), low, 5), 3)) * -1"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha91(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#91"""
expr = "(ts_rank(ts_decay_linear(ts_decay_linear(ts_corr(close, volume, 10), 16), 4), 5) - cs_rank(ts_decay_linear(ts_corr(vwap, ts_mean(volume, 30), 4), 3))) * -1"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha92(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#92"""
expr = "ts_less(ts_rank(ts_decay_linear(quesval2(((high + low) / 2 + close), (low + open), 1, 0), 15), 19), ts_rank(ts_decay_linear(ts_corr(cs_rank(low), cs_rank(ts_mean(volume, 30)), 8), 7), 7))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha93(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#93"""
expr = "ts_rank(ts_decay_linear(ts_corr(vwap, ts_mean(volume, 81), 17), 20), 8) / cs_rank(ts_decay_linear(ts_delta(close * 0.524434 + vwap * 0.475566, 3), 16))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha94(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#94"""
expr = "pow2(cs_rank(vwap - ts_min(vwap, 12)), ts_rank(ts_corr(ts_rank(vwap, 20), ts_rank(ts_mean(volume, 60), 4), 18), 3)) * -1"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha95(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#95"""
expr = "quesval2(cs_rank(open - ts_min(open, 12)), ts_rank(pow1(cs_rank(ts_corr(ts_sum((high + low) / 2, 19), ts_sum(ts_mean(volume, 40), 19), 13)), 5), 12), 1, 0)"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha96(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#96"""
expr = "ts_greater(ts_rank(ts_decay_linear(ts_corr(cs_rank(vwap), cs_rank(volume), 4), 4), 8), ts_rank(ts_decay_linear(ts_argmax(ts_corr(ts_rank(close, 7), ts_rank(ts_mean(volume, 60), 4), 4), 13), 14), 13)) * -1"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha97(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#97"""
expr = "(cs_rank(ts_decay_linear(ts_delta(low * 0.721001 + vwap * 0.278999, 3), 20)) - ts_rank(ts_decay_linear(ts_rank(ts_corr(ts_rank(low, 8), ts_rank(ts_mean(volume, 60), 17), 5), 19), 16), 7)) * -1"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha98(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#98"""
expr = "cs_rank(ts_decay_linear(ts_corr(vwap, ts_sum(ts_mean(volume, 5), 26), 5), 7)) - cs_rank(ts_decay_linear(ts_rank(ts_argmin(ts_corr(cs_rank(open), cs_rank(ts_mean(volume, 15)), 21), 9), 7), 8))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha99(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#99"""
expr = "quesval2(cs_rank(ts_corr(ts_sum((high + low) / 2, 20), ts_sum(ts_mean(volume, 60), 20), 9)), cs_rank(ts_corr(low, volume, 6)), 1, 0) * -1"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha100(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#100"""
expr = "-1 * ((1.5 * cs_scale(cs_rank(((close - low) - (high - close)) / (high - low) * volume))) - cs_scale(ts_corr(close, cs_rank(ts_mean(volume, 20)), 5) - cs_rank(ts_argmin(close, 30)))) * (volume / ts_mean(volume, 20))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
def test_alpha101(self, test_df: pl.DataFrame) -> None:
"""Test Alpha#101"""
expr = "((close - open) / ((high - low) + 0.001))"
result = calculate_by_expression(test_df, expr)
assert "data" in result.columns
if __name__ == "__main__":
pytest.main([__file__, "-v", "--tb=short"]) # Run tests with verbose output and short traceback
| {
"repo_id": "vnpy/vnpy",
"file_path": "tests/test_alpha101.py",
"license": "MIT License",
"lines": 556,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vnpy/vnpy:vnpy/alpha/dataset/datasets/alpha_101.py | import polars as pl
from vnpy.alpha import AlphaDataset
class Alpha101(AlphaDataset):
"""101 basic factors from WorldQuant"""
def __init__(
self,
df: pl.DataFrame,
train_period: tuple[str, str],
valid_period: tuple[str, str],
test_period: tuple[str, str]
) -> None:
"""Constructor"""
super().__init__(
df=df,
train_period=train_period,
valid_period=valid_period,
test_period=test_period,
)
returns_expr: str = "(close / ts_delay(close, 1) - 1)"
# Alpha1
self.add_feature("alpha1", f"(cs_rank(ts_argmax(pow1(quesval(0, {returns_expr}, close, ts_std({returns_expr}, 20)), 2.0), 5)) - 0.5)")
# Alpha2
self.add_feature("alpha2", "(-1) * ts_corr(cs_rank(ts_delta(log(volume), 2)), cs_rank((close - open) / open), 6)")
# Alpha3
self.add_feature("alpha3", "ts_corr(cs_rank(open), cs_rank(volume), 10) * -1")
# Alpha4
self.add_feature("alpha4", "-1 * ts_rank(cs_rank(low), 9)")
# Alpha5
self.add_feature("alpha5", "cs_rank((open - (ts_sum(vwap, 10) / 10))) * (-1 * abs(cs_rank((close - vwap))))")
# Alpha6
self.add_feature("alpha6", "(-1) * ts_corr(open, volume, 10)")
# Alpha7
self.add_feature("alpha7", "quesval2(ts_mean(volume, 20), volume, (-1 * ts_rank(abs(close - ts_delay(close, 7)), 60)) * sign(ts_delta(close, 7)), -1)")
# Alpha8
self.add_feature("alpha8", f"-1 * cs_rank(((ts_sum(open, 5) * ts_sum({returns_expr}, 5)) - ts_delay((ts_sum(open, 5) * ts_sum({returns_expr}, 5)), 10)))")
# Alpha9
self.add_feature("alpha9", "quesval(0, ts_min(ts_delta(close, 1), 5), ts_delta(close, 1), quesval(0, ts_max(ts_delta(close, 1), 5), (-1 * ts_delta(close, 1)), ts_delta(close, 1)))")
# Alpha10
self.add_feature("alpha10", "cs_rank(quesval(0, ts_min(ts_delta(close, 1), 4), ts_delta(close, 1), quesval(0, ts_max(ts_delta(close, 1), 4), (-1 * ts_delta(close, 1)), ts_delta(close, 1))))")
# Alpha11
self.add_feature("alpha11", "(cs_rank(ts_max(vwap - close, 3)) + cs_rank(ts_min(vwap - close, 3))) * cs_rank(ts_delta(volume, 3))")
# Alpha12
self.add_feature("alpha12", "sign(ts_delta(volume, 1)) * (-1 * ts_delta(close, 1))")
# Alpha13
self.add_feature("alpha13", "-1 * cs_rank(ts_cov(cs_rank(close), cs_rank(volume), 5))")
# Alpha14
self.add_feature("alpha14", f"(-1 * cs_rank(({returns_expr}) - ts_delay({returns_expr}, 3))) * ts_corr(open, volume, 10)")
# Alpha15
self.add_feature("alpha15", "-1 * ts_sum(cs_rank(ts_corr(cs_rank(high), cs_rank(volume), 3)), 3)")
# Alpha16
self.add_feature("alpha16", "-1 * cs_rank(ts_cov(cs_rank(high), cs_rank(volume), 5))")
# Alpha17
self.add_feature("alpha17", "(-1 * cs_rank(ts_rank(close, 10))) * cs_rank(close - 2 * ts_delay(close, 1) + ts_delay(close, 2)) * cs_rank(ts_rank(volume / ts_mean(volume, 20), 5))")
# Alpha18
self.add_feature("alpha18", "-1 * cs_rank((ts_std(abs(close - open), 5) + (close - open)) + ts_corr(close, open, 10))")
# Alpha19
self.add_feature("alpha19", f"(-1 * sign(ts_delta(close, 7) + (close - ts_delay(close, 7)))) * (cs_rank(ts_sum({returns_expr}, 250) + 1) + 1)")
# Alpha20
self.add_feature("alpha20", "(-1 * cs_rank(open - ts_delay(high, 1))) * cs_rank(open - ts_delay(close, 1)) * cs_rank(open - ts_delay(low, 1))")
# Alpha21 (the innermost original was >=1, implemented as >1)
self.add_feature("alpha21", "quesval2((ts_mean(close, 8) + ts_std(close, 8)), ts_mean(close, 2), -1, quesval2(ts_mean(close, 2), (ts_mean(close, 8) - ts_std(close, 8)), 1, quesval(1, (volume / ts_mean(volume, 20)), 1, -1)))")
# Alpha22
self.add_feature("alpha22", "-1 * ts_delta(ts_corr(high, volume, 5), 5) * cs_rank(ts_std(close, 20))")
# Alpha23
self.add_feature("alpha23", "quesval2(ts_mean(high, 20), high, -1 * ts_delta(high, 2), 0)")
# Alpha24 (the original condition was <=0.05, implemented as <0.05)
self.add_feature("alpha24", "quesval(0.05, ts_delta(ts_sum(close, 100) / 100, 100) / ts_delay(close, 100), (-1 * ts_delta(close, 3)), (-1 * (close - ts_min(close, 100))))")
# Alpha25
self.add_feature("alpha25", f"cs_rank( (-1 * {returns_expr}) * ts_mean(volume, 20) * vwap * (high - close) )")
# Alpha26
self.add_feature("alpha26", "-1 * ts_max(ts_corr(ts_rank(volume, 5), ts_rank(high, 5), 5), 3)")
# Alpha27
self.add_feature("alpha27", "quesval(0.5, cs_rank(ts_mean(ts_corr(cs_rank(volume), cs_rank(vwap), 6), 2)), -1, 1)")
# Alpha28
self.add_feature("alpha28", "cs_scale(ts_corr(ts_mean(volume, 20), low, 5) + (high + low) / 2 - close)")
# Alpha29
self.add_feature("alpha29", f"ts_min(ts_product(cs_rank(cs_rank(cs_scale(log(ts_sum(ts_min(cs_rank(cs_rank((-1 * cs_rank(ts_delta((close - 1), 5))))), 2), 1))))), 1), 5) + ts_rank(ts_delay((-1 * {returns_expr}), 6), 5)")
# Alpha30
self.add_feature("alpha30", "((cs_rank(sign(close - ts_delay(close, 1)) + sign(ts_delay(close, 1) - ts_delay(close, 2)) + sign(ts_delay(close, 2) - ts_delay(close, 3))) * -1 + 1) * ts_sum(volume, 5)) / ts_sum(volume, 20)")
# Alpha31
self.add_feature("alpha31", "(cs_rank(cs_rank(cs_rank(ts_decay_linear((-1) * cs_rank(cs_rank(ts_delta(close, 10))), 10)))) + cs_rank((-1) * ts_delta(close, 3))) + sign(cs_scale(ts_corr(ts_mean(volume, 20), low, 12)))")
# Alpha32
self.add_feature("alpha32", "cs_scale((ts_sum(close, 7) / 7 - close)) + (20 * cs_scale(ts_corr(vwap, ts_delay(close, 5), 230)))")
# Alpha33
self.add_feature("alpha33", "cs_rank((-1) * (open / close * -1 + 1))")
# Alpha34
self.add_feature("alpha34", f"cs_rank((cs_rank(ts_std({returns_expr}, 2) / ts_std({returns_expr}, 5)) * -1 + 1) + (cs_rank(ts_delta(close, 1)) * -1 + 1))")
# Alpha35
self.add_feature("alpha35", f"(ts_rank(volume, 32) * (ts_rank((close + high - low), 16) * -1 + 1)) * (ts_rank({returns_expr}, 32) * -1 + 1)")
# Alpha36
self.add_feature("alpha36", f"((((2.21 * cs_rank(ts_corr((close - open), ts_delay(volume, 1), 15))) + (0.7 * cs_rank((open - close)))) + (0.73 * cs_rank(ts_rank(ts_delay((-1) * {returns_expr}, 6), 5)))) + cs_rank(abs(ts_corr(vwap, ts_mean(volume, 20), 6)))) + (0.6 * cs_rank(((ts_sum(close, 200) / 200 - open) * (close - open))))")
# Alpha37
self.add_feature("alpha37", "cs_rank(ts_corr(ts_delay((open - close), 1), close, 200)) + cs_rank((open - close))")
# Alpha38
self.add_feature("alpha38", "((-1) * cs_rank(ts_rank(close, 10))) * cs_rank((close / open))")
# Alpha39
self.add_feature("alpha39", f"((-1) * cs_rank((ts_delta(close, 7) * (cs_rank(ts_decay_linear((volume / ts_mean(volume, 20)), 9)) * -1 + 1)))) * (cs_rank(ts_sum({returns_expr}, 250)) + 1)")
# Alpha40
self.add_feature("alpha40", "((-1) * cs_rank(ts_std(high, 10))) * ts_corr(high, volume, 10)")
# Alpha41
self.add_feature("alpha41", "pow1((high * low), 0.5) - vwap")
# Alpha42
self.add_feature("alpha42", "cs_rank((vwap - close)) / cs_rank((vwap + close))")
# Alpha43
self.add_feature("alpha43", "ts_rank((volume / ts_mean(volume, 20)), 20) * ts_rank((-1) * ts_delta(close, 7), 8)")
# Alpha44
self.add_feature("alpha44", "(-1) * ts_corr(high, cs_rank(volume), 5)")
# Alpha45
self.add_feature("alpha45", "(-1) * cs_rank(ts_sum(ts_delay(close, 5), 20) / 20) * ts_corr(close, volume, 2) * cs_rank(ts_corr(ts_sum(close, 5), ts_sum(close, 20), 2))")
# Alpha46
self.add_feature("alpha46", "quesval(0.25, ((ts_delay(close, 20) - ts_delay(close, 10)) / 10 - (ts_delay(close, 10) - close) / 10), -1, quesval(0, ((ts_delay(close, 20) - ts_delay(close, 10)) / 10 - (ts_delay(close, 10) - close) / 10), (-1) * (close - ts_delay(close, 1)), 1))")
# Alpha47
self.add_feature("alpha47", "((cs_rank(pow1(close, -1)) * volume / ts_mean(volume, 20)) * (high * cs_rank(high - close)) / (ts_sum(high, 5) / 5)) - cs_rank(vwap - ts_delay(vwap, 5))")
# Alpha48 (contains `IndNeutralize`, currently not implemented)
# self.add_feature("alpha48", "(ts_corr(ts_delta(close, 1), ts_delta(ts_delay(close, 1), 1), 250) * ts_delta(close, 1)) / close / ts_sum(pow1((ts_delta(close, 1) / ts_delay(close, 1)), 2), 250)")
# Alpha49
self.add_feature("alpha49", "quesval(-0.1, ((ts_delay(close, 20) - ts_delay(close, 10)) / 10 - (ts_delay(close, 10) - close) / 10), (-1) * (close - ts_delay(close, 1)), 1)")
# Alpha50
self.add_feature("alpha50", "(-1) * ts_max(cs_rank(ts_corr(cs_rank(volume), cs_rank(vwap), 5)), 5)")
# Alpha51
self.add_feature("alpha51", "quesval(-0.05, ((ts_delay(close, 20) - ts_delay(close, 10)) / 10 - (ts_delay(close, 10) - close) / 10), (-1) * (close - ts_delay(close, 1)), 1)")
# Alpha52
self.add_feature("alpha52", f"(((-1) * ts_min(low, 5)) + ts_delay(ts_min(low, 5), 5)) * cs_rank((ts_sum({returns_expr}, 240) - ts_sum({returns_expr}, 20)) / 220) * ts_rank(volume, 5)")
# Alpha53
self.add_feature("alpha53", "(-1) * ts_delta(((close - low) - (high - close)) / (close - low), 9)")
# Alpha54
self.add_feature("alpha54", "((-1) * ((low - close) * pow1(open, 5))) / ((low - high) * pow1(close, 5))")
# Alpha55
self.add_feature("alpha55", "(-1) * ts_corr(cs_rank((close - ts_min(low, 12)) / (ts_max(high, 12) - ts_min(low, 12))), cs_rank(volume), 6)")
# Alpha56 (missing `cap` field, cannot be implemented)
# original formula: (0 - (1 * (rank((sum(returns, 10) / sum(sum(returns, 2), 3))) * rank((returns * cap)))))
# Alpha57
self.add_feature("alpha57", "-1 * ((close - vwap) / ts_decay_linear(cs_rank(ts_argmax(close, 30)), 2))")
# Alpha58 (contains `IndNeutralize`, currently not implemented)
# self.add_feature("alpha58", "(-1) * ts_rank(ts_decay_linear(ts_corr(vwap, volume, 4), 8), 6)")
# Alpha59 (contains `IndNeutralize`, currently not implemented)
# self.add_feature("alpha59", "(-1) * ts_rank(ts_decay_linear(ts_corr(((vwap * 0.728317) + (vwap * (1 - 0.728317))), volume, 4), 16), 8)")
# Alpha60
self.add_feature("alpha60", "- 1 * ((2 * cs_scale(cs_rank((((close - low) - (high - close)) / (high - low)) * volume))) - cs_scale(cs_rank(ts_argmax(close, 10))))")
# Alpha61
self.add_feature("alpha61", "quesval2(cs_rank(vwap - ts_min(vwap, 16)), cs_rank(ts_corr(vwap, ts_mean(volume, 180), 18)), 1, 0)")
# Alpha62
self.add_feature("alpha62", "(cs_rank(ts_corr(vwap, ts_sum(ts_mean(volume, 20), 22), 10)) < cs_rank((cs_rank(open) + cs_rank(open)) < (cs_rank((high + low) / 2) + cs_rank(high)))) * -1")
# Alpha63 (contains `IndNeutralize`, currently not implemented)
# self.add_feature("alpha63", "(cs_rank(ts_decay_linear(ts_delta(close, 2), 8)) - cs_rank(ts_decay_linear(ts_corr(vwap * 0.318108 + open * 0.681892, ts_sum(ts_mean(volume, 180), 37), 14), 12))) * -1")
# Alpha64
self.add_feature("alpha64", "(cs_rank(ts_corr(ts_sum(((open * 0.178404) + (low * (1 - 0.178404))), 13), ts_sum(ts_mean(volume, 120), 13), 17)) < cs_rank(ts_delta((((high + low) / 2 * 0.178404) + (vwap * (1 - 0.178404))), 4))) * -1")
# Alpha65
self.add_feature("alpha65", "(cs_rank(ts_corr(((open * 0.00817205) + (vwap * (1 - 0.00817205))), ts_sum(ts_mean(volume, 60), 9), 6)) < cs_rank(open - ts_min(open, 14))) * -1")
# Alpha66
self.add_feature("alpha66", "(cs_rank(ts_decay_linear(ts_delta(vwap, 4), 7)) + ts_rank(ts_decay_linear((((low * 0.96633) + (low * (1 - 0.96633))) - vwap) / (open - ((high + low) / 2)), 11), 7)) * -1")
# Alpha67 (contains `IndNeutralize`, currently not implemented)
# self.add_feature("alpha67", "pow2(cs_rank(high - ts_min(high, 2)), cs_rank(ts_corr(vwap, ts_mean(volume, 20), 6))) * -1")
# Alpha68
self.add_feature("alpha68", "(ts_rank(ts_corr(cs_rank(high), cs_rank(ts_mean(volume, 15)), 9), 14) < cs_rank(ts_delta((close * 0.518371 + low * (1 - 0.518371)), 1))) * -1")
# Alpha69 (contains `IndNeutralize`, currently not implemented)
# self.add_feature("alpha69", "pow2(cs_rank(ts_max(ts_delta(vwap, 3), 5)), ts_rank(ts_corr(close * 0.490655 + vwap * 0.509345, ts_mean(volume, 20), 5), 9)) * -1")
# Alpha70 (contains `IndNeutralize`, currently not implemented)
# self.add_feature("alpha70", "pow2(cs_rank(ts_delta(vwap, 1)), ts_rank(ts_corr(close, ts_mean(volume, 50), 18), 18)) * -1")
# Alpha71
self.add_feature("alpha71", "ts_greater(ts_rank(ts_decay_linear(ts_corr(ts_rank(close, 3), ts_rank(ts_mean(volume, 180), 12), 18), 4), 16), ts_rank(ts_decay_linear(pow1(cs_rank((low + open) - (vwap + vwap)), 2), 16), 4))")
# Alpha72
self.add_feature("alpha72", "cs_rank(ts_decay_linear(ts_corr((high + low) / 2, ts_mean(volume, 40), 9), 10)) / cs_rank(ts_decay_linear(ts_corr(ts_rank(vwap, 4), ts_rank(volume, 19), 7), 3))")
# Alpha73
self.add_feature("alpha73", "ts_greater(cs_rank(ts_decay_linear(ts_delta(vwap, 5), 3)), ts_rank(ts_decay_linear((ts_delta(open * 0.147155 + low * 0.852845, 2) / (open * 0.147155 + low * 0.852845)) * -1, 3), 17)) * -1")
# Alpha74
self.add_feature("alpha74", "quesval2(cs_rank(ts_corr(close, ts_sum(ts_mean(volume, 30), 37), 15)), cs_rank(ts_corr(cs_rank(high * 0.0261661 + vwap * 0.9738339), cs_rank(volume), 11)), 1, 0) * -1")
# Alpha75
self.add_feature("alpha75", "quesval2(cs_rank(ts_corr(vwap, volume, 4)), cs_rank(ts_corr(cs_rank(low), cs_rank(ts_mean(volume, 50)), 12)), 1, 0)")
# Alpha76 (contains `IndNeutralize`, currently not implemented)
# self.add_feature("alpha76", "ts_greater(cs_rank(ts_decay_linear(ts_delta(vwap, 1), 12)), ts_rank(ts_decay_linear(ts_rank(ts_corr(low, ts_mean(volume, 81), 8), 20), 17), 19)) * -1")
# Alpha77
self.add_feature("alpha77", "ts_less(cs_rank(ts_decay_linear((((high + low) / 2 + high) - (vwap + high)), 20)), cs_rank(ts_decay_linear(ts_corr((high + low) / 2, ts_mean(volume, 40), 3), 6)))")
# Alpha78
self.add_feature("alpha78", "pow2(cs_rank(ts_corr(ts_sum((low * 0.352233) + (vwap * (1 - 0.352233)), 20), ts_sum(ts_mean(volume, 40), 20), 7)), cs_rank(ts_corr(cs_rank(vwap), cs_rank(volume), 6)))")
# Alpha79 (contains `IndNeutralize`, currently not implemented)
# self.add_feature("alpha79", "quesval2(cs_rank(ts_delta(close * 0.60733 + open * 0.39267, 1)), cs_rank(ts_corr(ts_rank(vwap, 4), ts_rank(ts_mean(volume, 150), 9), 15)), 1, 0)")
# Alpha80 (contains `IndNeutralize`, currently not implemented)
# self.add_feature("alpha80", "pow2(cs_rank(sign(ts_delta(open * 0.868128 + high * 0.131872, 4))), ts_rank(ts_corr(high, ts_mean(volume, 10), 5), 6)) * -1")
# Alpha81
self.add_feature("alpha81", "quesval2(cs_rank(log(ts_product(cs_rank(pow1(cs_rank(ts_corr(vwap, ts_sum(ts_mean(volume, 10), 50), 8)), 4)), 15))), cs_rank(ts_corr(cs_rank(vwap), cs_rank(volume), 5)), 1, 0) * -1")
# Alpha82 (contains `IndNeutralize`, currently not implemented)
# self.add_feature("alpha82", "ts_less(cs_rank(ts_decay_linear(ts_delta(open, 1), 15)), ts_rank(ts_decay_linear(ts_corr(volume, open, 17), 7), 13)) * -1")
# Alpha83
self.add_feature("alpha83", "(cs_rank(ts_delay((high - low) / (ts_sum(close, 5) / 5), 2)) * cs_rank(cs_rank(volume))) / (((high - low) / (ts_sum(close, 5) / 5)) / (vwap - close))")
# Alpha84
self.add_feature("alpha84", "pow2(ts_rank(vwap - ts_max(vwap, 15), 21), ts_delta(close, 5))")
# Alpha85
self.add_feature("alpha85", "pow2(cs_rank(ts_corr(high * 0.876703 + close * 0.123297, ts_mean(volume, 30), 10)), cs_rank(ts_corr(ts_rank((high + low) / 2, 4), ts_rank(volume, 10), 7)))")
# Alpha86
self.add_feature("alpha86", "quesval2(ts_rank(ts_corr(close, ts_sum(ts_mean(volume, 20), 15), 6), 20), cs_rank((open + close) - (vwap + open)), 1, 0) * -1")
# Alpha87 (contains `IndNeutralize`, currently not implemented)
# self.add_feature("alpha87", "ts_greater(cs_rank(ts_decay_linear(ts_delta(close * 0.369701 + vwap * 0.630299, 2), 3)), ts_rank(ts_decay_linear(abs(ts_corr(ts_mean(volume, 81), close, 13)), 5), 14)) * -1")
# Alpha88
self.add_feature("alpha88", "ts_less(cs_rank(ts_decay_linear((cs_rank(open) + cs_rank(low)) - (cs_rank(high) + cs_rank(close)), 8)), ts_rank(ts_decay_linear(ts_corr(ts_rank(close, 8), ts_rank(ts_mean(volume, 60), 21), 8), 7), 3))")
# Alpha89 (contains `IndNeutralize`, currently not implemented)
# self.add_feature("alpha89", "(ts_rank(ts_decay_linear(ts_corr(low, ts_mean(volume, 10), 7), 6), 4) - ts_rank(ts_decay_linear(ts_delta(vwap, 3), 10), 15))")
# Alpha90 (contains `IndNeutralize`, currently not implemented)
# self.add_feature("alpha90", "pow2(cs_rank(close - ts_max(close, 5)), ts_rank(ts_corr(ts_mean(volume, 40), low, 5), 3)) * -1")
# Alpha91 (contains `IndNeutralize`, currently not implemented)
# self.add_feature("alpha91", "(ts_rank(ts_decay_linear(ts_decay_linear(ts_corr(close, volume, 10), 16), 4), 5) - cs_rank(ts_decay_linear(ts_corr(vwap, ts_mean(volume, 30), 4), 3))) * -1")
# Alpha92
self.add_feature("alpha92", "ts_less(ts_rank(ts_decay_linear(quesval2(((high + low) / 2 + close), (low + open), 1, 0), 15), 19), ts_rank(ts_decay_linear(ts_corr(cs_rank(low), cs_rank(ts_mean(volume, 30)), 8), 7), 7))")
# Alpha93 (contains `IndNeutralize`, currently not implemented)
# self.add_feature("alpha93", "ts_rank(ts_decay_linear(ts_corr(vwap, ts_mean(volume, 81), 17), 20), 8) / cs_rank(ts_decay_linear(ts_delta(close * 0.524434 + vwap * 0.475566, 3), 16))")
# Alpha94
self.add_feature("alpha94", "pow2(cs_rank(vwap - ts_min(vwap, 12)), ts_rank(ts_corr(ts_rank(vwap, 20), ts_rank(ts_mean(volume, 60), 4), 18), 3)) * -1")
# Alpha95
self.add_feature("alpha95", "quesval2(cs_rank(open - ts_min(open, 12)), ts_rank(pow1(cs_rank(ts_corr(ts_sum((high + low) / 2, 19), ts_sum(ts_mean(volume, 40), 19), 13)), 5), 12), 1, 0)")
# Alpha96
self.add_feature("alpha96", "ts_greater(ts_rank(ts_decay_linear(ts_corr(cs_rank(vwap), cs_rank(volume), 4), 4), 8), ts_rank(ts_decay_linear(ts_argmax(ts_corr(ts_rank(close, 7), ts_rank(ts_mean(volume, 60), 4), 4), 13), 14), 13)) * -1")
# Alpha97 (contains `IndNeutralize`, currently not implemented)
# self.add_feature("alpha97", "(cs_rank(ts_decay_linear(ts_delta(low * 0.721001 + vwap * 0.278999, 3), 20)) - ts_rank(ts_decay_linear(ts_rank(ts_corr(ts_rank(low, 8), ts_rank(ts_mean(volume, 60), 17), 5), 19), 16), 7)) * -1")
# Alpha98
self.add_feature("alpha98", "cs_rank(ts_decay_linear(ts_corr(vwap, ts_sum(ts_mean(volume, 5), 26), 5), 7)) - cs_rank(ts_decay_linear(ts_rank(ts_argmin(ts_corr(cs_rank(open), cs_rank(ts_mean(volume, 15)), 21), 9), 7), 8))")
# Alpha99
self.add_feature("alpha99", "quesval2(cs_rank(ts_corr(ts_sum((high + low) / 2, 20), ts_sum(ts_mean(volume, 60), 20), 9)), cs_rank(ts_corr(low, volume, 6)), 1, 0) * -1")
# Alpha100 (contains `IndNeutralize`, currently not implemented)
# self.add_feature("alpha100", "-1 * ((1.5 * cs_scale(cs_rank(((close - low) - (high - close)) / (high - low) * volume))) - cs_scale(ts_corr(close, cs_rank(ts_mean(volume, 20)), 5) - cs_rank(ts_argmin(close, 30)))) * (volume / ts_mean(volume, 20))")
# Alpha101
self.add_feature("alpha101", "((close - open) / ((high - low) + 0.001))")
# Set label
self.set_label("ts_delay(close, -3) / ts_delay(close, -1) - 1")
| {
"repo_id": "vnpy/vnpy",
"file_path": "vnpy/alpha/dataset/datasets/alpha_101.py",
"license": "MIT License",
"lines": 223,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
vnpy/vnpy:vnpy/alpha/dataset/math_function.py | """
Math Functions
"""
import polars as pl
from .utility import DataProxy
def less(feature1: DataProxy, feature2: DataProxy | float) -> DataProxy:
"""Return the minimum value between two features"""
if isinstance(feature2, DataProxy):
df_merged: pl.DataFrame = feature1.df.join(feature2.df, on=["datetime", "vt_symbol"])
else:
df_merged = feature1.df.with_columns(pl.lit(feature2).alias("data_right"))
df: pl.DataFrame = df_merged.select(
pl.col("datetime"),
pl.col("vt_symbol"),
pl.min_horizontal("data", "data_right").over("vt_symbol").alias("data")
)
return DataProxy(df)
def greater(feature1: DataProxy, feature2: DataProxy | float) -> DataProxy:
"""Return the maximum value between two features"""
if isinstance(feature2, DataProxy):
df_merged: pl.DataFrame = feature1.df.join(feature2.df, on=["datetime", "vt_symbol"])
else:
df_merged = feature1.df.with_columns(pl.lit(feature2).alias("data_right"))
df: pl.DataFrame = df_merged.select(
pl.col("datetime"),
pl.col("vt_symbol"),
pl.max_horizontal("data", "data_right").over("vt_symbol").alias("data")
)
return DataProxy(df)
def log(feature: DataProxy) -> DataProxy:
"""Calculate the natural logarithm of the feature"""
df: pl.DataFrame = feature.df.select(
pl.col("datetime"),
pl.col("vt_symbol"),
pl.col("data").log().over("vt_symbol")
)
return DataProxy(df)
def abs(feature: DataProxy) -> DataProxy:
"""Calculate the absolute value of the feature"""
df: pl.DataFrame = feature.df.select(
pl.col("datetime"),
pl.col("vt_symbol"),
pl.col("data").abs().over("vt_symbol")
)
return DataProxy(df)
def sign(feature: DataProxy) -> DataProxy:
"""Calculate the sign of the feature"""
df: pl.DataFrame = feature.df.select(
pl.col("datetime"),
pl.col("vt_symbol"),
pl.when(pl.col("data") > 0).then(1).when(pl.col("data") < 0).then(-1).otherwise(0).alias("data")
)
return DataProxy(df)
def quesval(threshold: float, feature1: DataProxy, feature2: DataProxy | float | int, feature3: DataProxy | float | int) -> DataProxy:
"""Return feature2 if threshold < feature1, otherwise feature3"""
df_merged = feature1.df
if isinstance(feature2, DataProxy):
df_merged = df_merged.join(feature2.df, on=["datetime", "vt_symbol"], suffix="_true")
else:
df_merged = df_merged.with_columns(pl.lit(feature2).alias("data_true"))
if isinstance(feature3, DataProxy):
df_merged = df_merged.join(feature3.df, on=["datetime", "vt_symbol"], suffix="_false")
else:
df_merged = df_merged.with_columns(pl.lit(feature3).alias("data_false"))
df: pl.DataFrame = df_merged.with_columns(
pl.when(threshold < pl.col("data"))
.then(pl.col("data_true"))
.otherwise(pl.col("data_false"))
.alias("data")
).select(["datetime", "vt_symbol", "data"])
return DataProxy(df)
def quesval2(threshold: DataProxy, feature1: DataProxy, feature2: DataProxy | float | int, feature3: DataProxy | float | int) -> DataProxy:
"""Return feature2 if threshold < feature1, otherwise feature3 (DataProxy threshold version)"""
df_merged: pl.DataFrame = threshold.df.join(feature1.df, on=["datetime", "vt_symbol"], suffix="_cond")
if isinstance(feature2, DataProxy):
df_merged = df_merged.join(feature2.df, on=["datetime", "vt_symbol"], suffix="_true")
else:
df_merged = df_merged.with_columns(pl.lit(feature2).alias("data_true"))
if isinstance(feature3, DataProxy):
df_merged = df_merged.join(feature3.df, on=["datetime", "vt_symbol"], suffix="_false")
else:
df_merged = df_merged.with_columns(pl.lit(feature3).alias("data_false"))
df: pl.DataFrame = df_merged.with_columns(
pl.when(pl.col("data_cond") < pl.col("data"))
.then(pl.col("data_true"))
.otherwise(pl.col("data_false"))
.alias("data")
).select(["datetime", "vt_symbol", "data"])
return DataProxy(df)
def pow1(base: DataProxy, exponent: float) -> DataProxy:
"""Safe power operation for DataProxy (handles negative base values)"""
df: pl.DataFrame = base.df.with_columns(
pl.when(pl.col("data") > 0)
.then(pl.col("data").pow(exponent))
.when(pl.col("data") < 0)
.then(pl.lit(-1) * pl.col("data").abs().pow(exponent))
.otherwise(0)
.alias("data")
)
return DataProxy(df)
def pow2(base: DataProxy, exponent: DataProxy) -> DataProxy:
"""Power operation between two DataProxy objects (base^exponent)
handle logic:
- base > 0: calculate base^exponent
- base < 0 and exponent is integer: calculate -1 * |base|^exponent
- other cases (base = 0, exponent is NaN, negative base and non-integer exponent): return 0
Note: use floor method to check integer rather than cast(Int64) method, because NaN cannot be converted to integer will report an error
"""
base_renamed = base.df.rename({"data": "base_data"})
exp_renamed = exponent.df.rename({"data": "exp_data"})
df_merged: pl.DataFrame = base_renamed.join(exp_renamed, on=["datetime", "vt_symbol"], how="left")
df: pl.DataFrame = df_merged.with_columns(
pl.when(pl.col("base_data") > 0)
.then(pl.col("base_data").pow(pl.col("exp_data")))
.when(
(pl.col("base_data") < 0) &
(~pl.col("exp_data").is_nan()) &
(pl.col("exp_data").floor() == pl.col("exp_data"))
)
.then((-1) * pl.col("base_data").abs().pow(pl.col("exp_data")))
.otherwise(pl.lit(None))
.fill_nan(None)
.fill_null(0)
.alias("data")
).select(["datetime", "vt_symbol", "data"])
return DataProxy(df)
| {
"repo_id": "vnpy/vnpy",
"file_path": "vnpy/alpha/dataset/math_function.py",
"license": "MIT License",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
wagtail/wagtail:wagtail/admin/tests/test_chooser.py | import json
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase, override_settings
from django.urls import reverse
from wagtail.test.utils import WagtailTestUtils
class TestChooserUploadHidden(WagtailTestUtils, TestCase):
"""Test upload tab visibility for generic chooser"""
def setUp(self):
self.login()
@override_settings(WAGTAILDOCS_EXTENSIONS=["pdf"])
def test_upload_tab_visible_on_validation_error(self):
"""Test upload tab visibility on validation error"""
# Upload file with invalid extension
test_file = SimpleUploadedFile("test.txt", b"Test File")
response = self.client.post(
reverse("wagtaildocs_chooser:create"), {"title": "Test", "file": test_file}
)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content.decode("utf-8"))
self.assertEqual(response_json["step"], "reshow_creation_form")
html = response_json.get("htmlFragment")
self.assertIsNotNone(html)
soup = self.get_soup(html)
upload_tab = soup.find(id="tab-upload")
self.assertIsNotNone(upload_tab)
self.assertNotIn("hidden", upload_tab.attrs)
def test_upload_tab_hidden_on_initial_load(self):
"""Test upload tab hidden on initial load"""
response = self.client.get(reverse("wagtaildocs_chooser:choose"))
response_json = json.loads(response.content.decode("utf-8"))
html = response_json.get("html", "")
self.assertIsNotNone(html)
soup = self.get_soup(html)
upload_tab = soup.find(id="tab-upload")
self.assertIsNotNone(upload_tab)
self.assertIn("hidden", upload_tab.attrs)
| {
"repo_id": "wagtail/wagtail",
"file_path": "wagtail/admin/tests/test_chooser.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
wagtail/wagtail:wagtail/snippets/tests/test_chooser_block.py | from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.test import TestCase
from wagtail.blocks.field_block import FieldBlockAdapter
from wagtail.models import Locale
from wagtail.snippets.blocks import SnippetChooserBlock
from wagtail.snippets.widgets import AdminSnippetChooser
from wagtail.test.testapp.models import Advert, AdvertWithCustomPrimaryKey
class TestSnippetChooserBlock(TestCase):
fixtures = ["test.json"]
def test_serialize(self):
"""The value of a SnippetChooserBlock (a snippet instance) should serialize to an ID"""
block = SnippetChooserBlock(Advert)
test_advert = Advert.objects.get(text="test_advert")
self.assertEqual(block.get_prep_value(test_advert), test_advert.id)
# None should serialize to None
self.assertIsNone(block.get_prep_value(None))
def test_deserialize(self):
"""The serialized value of a SnippetChooserBlock (an ID) should deserialize to a snippet instance"""
block = SnippetChooserBlock(Advert)
test_advert = Advert.objects.get(text="test_advert")
self.assertEqual(block.to_python(test_advert.id), test_advert)
# None should deserialize to None
self.assertIsNone(block.to_python(None))
def test_reference_model_by_string(self):
block = SnippetChooserBlock("tests.Advert")
test_advert = Advert.objects.get(text="test_advert")
self.assertEqual(block.to_python(test_advert.id), test_advert)
def test_adapt(self):
block = SnippetChooserBlock(
Advert,
help_text="pick an advert, any advert",
description="An advert to be displayed on the sidebar.",
)
block.set_name("test_snippetchooserblock")
js_args = FieldBlockAdapter().js_args(block)
self.assertEqual(js_args[0], "test_snippetchooserblock")
self.assertIsInstance(js_args[1], AdminSnippetChooser)
self.assertEqual(js_args[1].model, Advert)
self.assertEqual(
js_args[2],
{
"label": "Test snippetchooserblock",
"description": "An advert to be displayed on the sidebar.",
"required": True,
"icon": "snippet",
"blockDefId": block.definition_prefix,
"isPreviewable": block.is_previewable,
"helpText": "pick an advert, any advert",
"classname": "w-field w-field--model_choice_field w-field--admin_snippet_chooser",
"attrs": {},
"showAddCommentButton": True,
"strings": {"ADD_COMMENT": "Add Comment"},
},
)
def test_form_response(self):
block = SnippetChooserBlock(Advert)
test_advert = Advert.objects.get(text="test_advert")
value = block.value_from_datadict({"advert": str(test_advert.id)}, {}, "advert")
self.assertEqual(value, test_advert)
empty_value = block.value_from_datadict({"advert": ""}, {}, "advert")
self.assertIsNone(empty_value)
def test_clean(self):
required_block = SnippetChooserBlock(Advert)
nonrequired_block = SnippetChooserBlock(Advert, required=False)
test_advert = Advert.objects.get(text="test_advert")
self.assertEqual(required_block.clean(test_advert), test_advert)
with self.assertRaises(ValidationError):
required_block.clean(None)
self.assertEqual(nonrequired_block.clean(test_advert), test_advert)
self.assertIsNone(nonrequired_block.clean(None))
def test_deconstruct(self):
block = SnippetChooserBlock(Advert, required=False)
path, args, kwargs = block.deconstruct()
self.assertEqual(path, "wagtail.snippets.blocks.SnippetChooserBlock")
self.assertEqual(args, (Advert,))
self.assertEqual(kwargs, {"required": False})
def test_extract_references(self):
block = SnippetChooserBlock(Advert)
test_advert = Advert.objects.get(text="test_advert")
self.assertListEqual(
list(block.extract_references(test_advert)),
[(Advert, str(test_advert.id), "", "")],
)
# None should not yield any references
self.assertListEqual(list(block.extract_references(None)), [])
def test_exception_on_non_snippet_model(self):
with self.assertRaises(ImproperlyConfigured):
block = SnippetChooserBlock(Locale)
block.widget
class TestSnippetChooserBlockWithCustomPrimaryKey(TestCase):
fixtures = ["test.json"]
def test_serialize(self):
"""The value of a SnippetChooserBlock (a snippet instance) should serialize to an ID"""
block = SnippetChooserBlock(AdvertWithCustomPrimaryKey)
test_advert = AdvertWithCustomPrimaryKey.objects.get(pk="advert/01")
self.assertEqual(block.get_prep_value(test_advert), test_advert.pk)
# None should serialize to None
self.assertIsNone(block.get_prep_value(None))
def test_deserialize(self):
"""The serialized value of a SnippetChooserBlock (an ID) should deserialize to a snippet instance"""
block = SnippetChooserBlock(AdvertWithCustomPrimaryKey)
test_advert = AdvertWithCustomPrimaryKey.objects.get(pk="advert/01")
self.assertEqual(block.to_python(test_advert.pk), test_advert)
# None should deserialize to None
self.assertIsNone(block.to_python(None))
def test_adapt(self):
block = SnippetChooserBlock(
AdvertWithCustomPrimaryKey,
help_text="pick an advert, any advert",
description="An advert to be displayed on the footer.",
)
block.set_name("test_snippetchooserblock")
js_args = FieldBlockAdapter().js_args(block)
self.assertEqual(js_args[0], "test_snippetchooserblock")
self.assertIsInstance(js_args[1], AdminSnippetChooser)
self.assertEqual(js_args[1].model, AdvertWithCustomPrimaryKey)
self.assertEqual(
js_args[2],
{
"label": "Test snippetchooserblock",
"description": "An advert to be displayed on the footer.",
"required": True,
"icon": "snippet",
"blockDefId": block.definition_prefix,
"isPreviewable": block.is_previewable,
"helpText": "pick an advert, any advert",
"classname": "w-field w-field--model_choice_field w-field--admin_snippet_chooser",
"attrs": {},
"showAddCommentButton": True,
"strings": {"ADD_COMMENT": "Add Comment"},
},
)
def test_form_response(self):
block = SnippetChooserBlock(AdvertWithCustomPrimaryKey)
test_advert = AdvertWithCustomPrimaryKey.objects.get(pk="advert/01")
value = block.value_from_datadict(
{"advertwithcustomprimarykey": str(test_advert.pk)},
{},
"advertwithcustomprimarykey",
)
self.assertEqual(value, test_advert)
empty_value = block.value_from_datadict(
{"advertwithcustomprimarykey": ""}, {}, "advertwithcustomprimarykey"
)
self.assertIsNone(empty_value)
def test_clean(self):
required_block = SnippetChooserBlock(AdvertWithCustomPrimaryKey)
nonrequired_block = SnippetChooserBlock(
AdvertWithCustomPrimaryKey, required=False
)
test_advert = AdvertWithCustomPrimaryKey.objects.get(pk="advert/01")
self.assertEqual(required_block.clean(test_advert), test_advert)
with self.assertRaises(ValidationError):
required_block.clean(None)
self.assertEqual(nonrequired_block.clean(test_advert), test_advert)
self.assertIsNone(nonrequired_block.clean(None))
| {
"repo_id": "wagtail/wagtail",
"file_path": "wagtail/snippets/tests/test_chooser_block.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 156,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
wagtail/wagtail:wagtail/snippets/tests/test_chooser_panel.py | from django.contrib.auth.models import AnonymousUser
from django.test import RequestFactory, TestCase
from wagtail.admin.panels import FieldPanel, ObjectList, get_edit_handler
from wagtail.snippets.widgets import AdminSnippetChooser
from wagtail.test.testapp.models import (
Advert,
AdvertWithCustomPrimaryKey,
SnippetChooserModel,
SnippetChooserModelWithCustomPrimaryKey,
)
from wagtail.test.utils import WagtailTestUtils
class TestSnippetChooserPanel(WagtailTestUtils, TestCase):
fixtures = ["test.json"]
def setUp(self):
self.request = RequestFactory().get("/")
user = AnonymousUser() # technically, Anonymous users cannot access the admin
self.request.user = user
model = SnippetChooserModel
self.advert_text = "Test advert text"
test_snippet = model.objects.create(
advert=Advert.objects.create(text=self.advert_text)
)
self.edit_handler = get_edit_handler(model)
self.form_class = self.edit_handler.get_form_class()
form = self.form_class(instance=test_snippet)
edit_handler = self.edit_handler.get_bound_panel(
instance=test_snippet, form=form, request=self.request
)
self.snippet_chooser_panel = [
panel
for panel in edit_handler.children
if getattr(panel, "field_name", None) == "advert"
][0]
def test_render_html(self):
field_html = self.snippet_chooser_panel.render_html()
self.assertIn(self.advert_text, field_html)
self.assertIn("Choose advert", field_html)
self.assertIn("Choose another advert", field_html)
self.assertIn("icon icon-snippet icon", field_html)
def test_render_as_empty_field(self):
test_snippet = SnippetChooserModel()
form = self.form_class(instance=test_snippet)
edit_handler = self.edit_handler.get_bound_panel(
instance=test_snippet, form=form, request=self.request
)
snippet_chooser_panel = [
panel
for panel in edit_handler.children
if getattr(panel, "field_name", None) == "advert"
][0]
field_html = snippet_chooser_panel.render_html()
self.assertIn("Choose advert", field_html)
self.assertIn("Choose another advert", field_html)
def test_render_js(self):
self.assertIn(
'new SnippetChooser("id_advert", {"modalUrl": "/admin/snippets/choose/tests/advert/"});',
self.snippet_chooser_panel.render_html(),
)
def test_target_model_autodetected(self):
edit_handler = ObjectList([FieldPanel("advert")]).bind_to_model(
SnippetChooserModel
)
form_class = edit_handler.get_form_class()
form = form_class()
widget = form.fields["advert"].widget
self.assertIsInstance(widget, AdminSnippetChooser)
self.assertEqual(widget.model, Advert)
class TestSnippetChooserPanelWithCustomPrimaryKey(WagtailTestUtils, TestCase):
fixtures = ["test.json"]
def setUp(self):
self.request = RequestFactory().get("/")
user = AnonymousUser() # technically, Anonymous users cannot access the admin
self.request.user = user
model = SnippetChooserModelWithCustomPrimaryKey
self.advert_text = "Test advert text"
test_snippet = model.objects.create(
advertwithcustomprimarykey=AdvertWithCustomPrimaryKey.objects.create(
advert_id="advert/02", text=self.advert_text
)
)
self.edit_handler = get_edit_handler(model)
self.form_class = self.edit_handler.get_form_class()
form = self.form_class(instance=test_snippet)
edit_handler = self.edit_handler.get_bound_panel(
instance=test_snippet, form=form, request=self.request
)
self.snippet_chooser_panel = [
panel
for panel in edit_handler.children
if getattr(panel, "field_name", None) == "advertwithcustomprimarykey"
][0]
def test_render_html(self):
field_html = self.snippet_chooser_panel.render_html()
self.assertIn(self.advert_text, field_html)
self.assertIn("Choose advert with custom primary key", field_html)
self.assertIn("Choose another advert with custom primary key", field_html)
def test_render_as_empty_field(self):
test_snippet = SnippetChooserModelWithCustomPrimaryKey()
form = self.form_class(instance=test_snippet)
edit_handler = self.edit_handler.get_bound_panel(
instance=test_snippet, form=form, request=self.request
)
snippet_chooser_panel = [
panel
for panel in edit_handler.children
if getattr(panel, "field_name", None) == "advertwithcustomprimarykey"
][0]
field_html = snippet_chooser_panel.render_html()
self.assertIn("Choose advert with custom primary key", field_html)
self.assertIn("Choose another advert with custom primary key", field_html)
def test_render_js(self):
self.assertIn(
'new SnippetChooser("id_advertwithcustomprimarykey", {"modalUrl": "/admin/snippets/choose/tests/advertwithcustomprimarykey/"});',
self.snippet_chooser_panel.render_html(),
)
def test_target_model_autodetected(self):
edit_handler = ObjectList(
[FieldPanel("advertwithcustomprimarykey")]
).bind_to_model(SnippetChooserModelWithCustomPrimaryKey)
form_class = edit_handler.get_form_class()
form = form_class()
widget = form.fields["advertwithcustomprimarykey"].widget
self.assertIsInstance(widget, AdminSnippetChooser)
self.assertEqual(widget.model, AdvertWithCustomPrimaryKey)
| {
"repo_id": "wagtail/wagtail",
"file_path": "wagtail/snippets/tests/test_chooser_panel.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
wagtail/wagtail:wagtail/snippets/tests/test_chooser_views.py | import json
from django.contrib.admin.utils import quote
from django.test import TestCase, TransactionTestCase
from django.test.utils import override_settings
from django.urls import reverse
from wagtail.models import Locale
from wagtail.test.snippets.models import (
NonAutocompleteSearchableSnippet,
SearchableSnippet,
TranslatableSnippet,
)
from wagtail.test.testapp.models import (
Advert,
AdvertWithCustomPrimaryKey,
AdvertWithCustomUUIDPrimaryKey,
DraftStateModel,
)
from wagtail.test.utils import WagtailTestUtils
class TestSnippetChoose(WagtailTestUtils, TestCase):
fixtures = ["test.json"]
def setUp(self):
self.login()
self.url_args = ["tests", "advert"]
def get(self, params=None):
app_label, model_name = self.url_args
return self.client.get(
reverse(f"wagtailsnippetchoosers_{app_label}_{model_name}:choose"),
params or {},
)
def test_simple(self):
response = self.get()
self.assertTemplateUsed(response, "wagtailadmin/generic/chooser/chooser.html")
# Check locale filter doesn't exist normally
self.assertNotIn(
'<select data-chooser-modal-search-filter name="lang">',
response.json()["html"],
)
def test_no_results(self):
Advert.objects.all().delete()
response = self.get()
self.assertTemplateUsed(response, "wagtailadmin/generic/chooser/chooser.html")
response_html = response.json()["html"]
self.assertIn('href="/admin/snippets/tests/advert/add/"', response_html)
def test_ordering(self):
"""
Listing should be ordered by PK if no ordering has been set on the model
"""
Advert.objects.all().delete()
for i in range(10, 0, -1):
Advert.objects.create(pk=i, text="advert %d" % i)
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["results"][0].text, "advert 1")
def test_simple_pagination(self):
# page numbers in range should be accepted
response = self.get({"p": 1})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/generic/chooser/chooser.html")
# page numbers out of range should return 404
response = self.get({"p": 9999})
self.assertEqual(response.status_code, 404)
def test_not_searchable(self):
# filter_form should not have a search field
self.assertFalse(self.get().context["filter_form"].fields.get("q"))
@override_settings(WAGTAIL_I18N_ENABLED=False)
def test_locale_filter_requires_i18n_enabled(self):
self.url_args = ["snippetstests", "translatablesnippet"]
fr_locale = Locale.objects.create(language_code="fr")
TranslatableSnippet.objects.create(text="English snippet")
TranslatableSnippet.objects.create(text="French snippet", locale=fr_locale)
response = self.get()
# Check the filter is omitted
response_html = response.json()["html"]
self.assertNotIn("data-chooser-modal-search-filter", response_html)
self.assertNotIn('name="locale"', response_html)
@override_settings(WAGTAIL_I18N_ENABLED=True)
def test_filter_by_locale(self):
self.url_args = ["snippetstests", "translatablesnippet"]
fr_locale = Locale.objects.create(language_code="fr")
TranslatableSnippet.objects.create(text="English snippet")
TranslatableSnippet.objects.create(text="French snippet", locale=fr_locale)
response = self.get()
# Check the filter is added
response_html = response.json()["html"]
self.assertIn("data-chooser-modal-search-filter", response_html)
self.assertIn('name="locale"', response_html)
# Check both snippets are shown
self.assertEqual(len(response.context["results"]), 2)
self.assertEqual(response.context["results"][0].text, "English snippet")
self.assertEqual(response.context["results"][1].text, "French snippet")
# Now test with a locale selected
response = self.get({"locale": "en"})
self.assertEqual(len(response.context["results"]), 1)
self.assertEqual(response.context["results"][0].text, "English snippet")
class TestSnippetChooseResults(WagtailTestUtils, TestCase):
fixtures = ["test.json"]
def setUp(self):
self.login()
def get(self, params=None):
return self.client.get(
reverse("wagtailsnippetchoosers_tests_advert:choose_results"), params or {}
)
def test_simple(self):
response = self.get()
self.assertTemplateUsed(response, "wagtailsnippets/chooser/results.html")
def test_no_results(self):
Advert.objects.all().delete()
response = self.get()
self.assertTemplateUsed(response, "wagtailsnippets/chooser/results.html")
self.assertContains(
response,
'href="/admin/snippets/tests/advert/add/"',
)
class TestSnippetChooseStatus(WagtailTestUtils, TestCase):
def setUp(self):
self.login()
@classmethod
def setUpTestData(cls):
cls.draft = DraftStateModel.objects.create(text="foo", live=False)
cls.live = DraftStateModel.objects.create(text="bar", live=True)
cls.live_draft = DraftStateModel.objects.create(text="baz", live=True)
cls.live_draft.save_revision()
def get(self, view_name, params=None):
return self.client.get(
reverse(f"wagtailsnippetchoosers_tests_draftstatemodel:{view_name}"),
params or {},
)
def test_choose_view_shows_status_column(self):
response = self.get("choose")
html = response.json()["html"]
self.assertTagInHTML("<th>Status</th>", html)
self.assertTagInHTML('<span class="w-status">draft</span>', html)
self.assertTagInHTML(
'<span class="w-status w-status--primary">live</span>', html
)
self.assertTagInHTML(
'<span class="w-status w-status--primary">live + draft</span>', html
)
def test_choose_results_view_shows_status_column(self):
response = self.get("choose_results")
self.assertContains(response, "<th>Status</th>", html=True)
self.assertContains(response, '<span class="w-status">draft</span>', html=True)
self.assertContains(
response, '<span class="w-status w-status--primary">live</span>', html=True
)
self.assertContains(
response,
'<span class="w-status w-status--primary">live + draft</span>',
html=True,
)
class TestSnippetChooseWithSearchableSnippet(WagtailTestUtils, TransactionTestCase):
def setUp(self):
self.login()
# Create some instances of the searchable snippet for testing
self.snippet_a = SearchableSnippet.objects.create(text="Hello")
self.snippet_b = SearchableSnippet.objects.create(text="World")
self.snippet_c = SearchableSnippet.objects.create(text="Hello World")
def get(self, params=None):
return self.client.get(
reverse("wagtailsnippetchoosers_snippetstests_searchablesnippet:choose"),
params or {},
)
def test_simple(self):
response = self.get()
self.assertTemplateUsed(response, "wagtailadmin/generic/chooser/chooser.html")
# All snippets should be in items
items = list(response.context["results"].object_list)
self.assertIn(self.snippet_a, items)
self.assertIn(self.snippet_b, items)
self.assertIn(self.snippet_c, items)
def test_is_searchable(self):
# filter_form should have a search field
self.assertTrue(self.get().context["filter_form"].fields.get("q"))
def test_search_hello(self):
response = self.get({"q": "Hello"})
# Just snippets with "Hello" should be in items
items = list(response.context["results"].object_list)
self.assertIn(self.snippet_a, items)
self.assertNotIn(self.snippet_b, items)
self.assertIn(self.snippet_c, items)
def test_search_world(self):
response = self.get({"q": "World"})
# Just snippets with "World" should be in items
items = list(response.context["results"].object_list)
self.assertNotIn(self.snippet_a, items)
self.assertIn(self.snippet_b, items)
self.assertIn(self.snippet_c, items)
def test_partial_match(self):
response = self.get({"q": "hello wo"})
# should perform partial matching and return "Hello World"
items = list(response.context["results"].object_list)
self.assertNotIn(self.snippet_a, items)
self.assertNotIn(self.snippet_b, items)
self.assertIn(self.snippet_c, items)
class TestSnippetChooseWithNonAutocompleteSearchableSnippet(
WagtailTestUtils, TransactionTestCase
):
"""
Test that searchable snippets with no AutocompleteFields defined can still be searched using
full words
"""
def setUp(self):
self.login()
# Create some instances of the searchable snippet for testing
self.snippet_a = NonAutocompleteSearchableSnippet.objects.create(text="Hello")
self.snippet_b = NonAutocompleteSearchableSnippet.objects.create(text="World")
self.snippet_c = NonAutocompleteSearchableSnippet.objects.create(
text="Hello World"
)
def get(self, params=None):
return self.client.get(
reverse(
"wagtailsnippetchoosers_snippetstests_nonautocompletesearchablesnippet:choose"
),
params or {},
)
def test_search_hello(self):
with self.assertWarnsRegex(
RuntimeWarning, "does not specify any AutocompleteFields"
):
response = self.get({"q": "Hello"})
# Just snippets with "Hello" should be in items
items = list(response.context["results"].object_list)
self.assertIn(self.snippet_a, items)
self.assertNotIn(self.snippet_b, items)
self.assertIn(self.snippet_c, items)
class TestSnippetChosen(WagtailTestUtils, TestCase):
fixtures = ["test.json"]
def setUp(self):
self.login()
def get(self, pk, params=None):
return self.client.get(
reverse("wagtailsnippetchoosers_tests_advert:chosen", args=(pk,)),
params or {},
)
def test_choose_a_page(self):
response = self.get(pk=Advert.objects.all()[0].pk)
response_json = json.loads(response.content.decode())
self.assertEqual(response_json["step"], "chosen")
def test_choose_a_non_existing_page(self):
response = self.get(999999)
self.assertEqual(response.status_code, 404)
class TestSnippetChooseWithCustomPrimaryKey(WagtailTestUtils, TestCase):
fixtures = ["test.json"]
def setUp(self):
self.login()
def get(self, params=None):
return self.client.get(
reverse("wagtailsnippetchoosers_tests_advertwithcustomprimarykey:choose"),
params or {},
)
def test_simple(self):
response = self.get()
self.assertTemplateUsed(response, "wagtailadmin/generic/chooser/chooser.html")
self.assertEqual(response.context["header_icon"], "snippet")
self.assertEqual(response.context["icon"], "snippet")
def test_ordering(self):
"""
Listing should be ordered by PK if no ordering has been set on the model
"""
AdvertWithCustomPrimaryKey.objects.all().delete()
for i in range(10, 0, -1):
AdvertWithCustomPrimaryKey.objects.create(pk=i, text="advert %d" % i)
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["results"][0].text, "advert 1")
class TestSnippetChosenWithCustomPrimaryKey(WagtailTestUtils, TestCase):
fixtures = ["test.json"]
def setUp(self):
self.login()
def get(self, pk, params=None):
return self.client.get(
reverse(
"wagtailsnippetchoosers_tests_advertwithcustomprimarykey:chosen",
args=(quote(pk),),
),
params or {},
)
def test_choose_a_page(self):
response = self.get(pk=AdvertWithCustomPrimaryKey.objects.all()[0].pk)
response_json = json.loads(response.content.decode())
self.assertEqual(response_json["step"], "chosen")
class TestSnippetChosenWithCustomUUIDPrimaryKey(WagtailTestUtils, TestCase):
fixtures = ["test.json"]
def setUp(self):
self.login()
def get(self, pk, params=None):
return self.client.get(
reverse(
"wagtailsnippetchoosers_tests_advertwithcustomuuidprimarykey:chosen",
args=(quote(pk),),
),
params or {},
)
def test_choose_a_page(self):
response = self.get(pk=AdvertWithCustomUUIDPrimaryKey.objects.all()[0].pk)
response_json = json.loads(response.content.decode())
self.assertEqual(response_json["step"], "chosen")
| {
"repo_id": "wagtail/wagtail",
"file_path": "wagtail/snippets/tests/test_chooser_views.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 298,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
wagtail/wagtail:wagtail/snippets/tests/test_chooser_widget.py | from django.test import TestCase
from wagtail.snippets.widgets import (
AdminSnippetChooser,
SnippetChooserAdapter,
)
from wagtail.test.testapp.models import Advert
from wagtail.test.utils import WagtailTestUtils
class TestAdminSnippetChooserWidget(WagtailTestUtils, TestCase):
def test_adapt(self):
widget = AdminSnippetChooser(Advert)
js_args = SnippetChooserAdapter().js_args(widget)
self.assertEqual(len(js_args), 3)
self.assertInHTML(
'<input type="hidden" name="__NAME__" id="__ID__">', js_args[0]
)
self.assertIn("Choose advert", js_args[0])
self.assertEqual(js_args[1], "__ID__")
| {
"repo_id": "wagtail/wagtail",
"file_path": "wagtail/snippets/tests/test_chooser_widget.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
wagtail/wagtail:wagtail/snippets/tests/test_compare_revisions_view.py | import datetime
from django.contrib.admin.utils import quote
from django.contrib.auth.models import Permission
from django.test import TestCase
from django.urls import reverse
from django.utils.timezone import make_aware
from wagtail.test.testapp.models import (
RevisableChildModel,
RevisableModel,
)
from wagtail.test.utils import WagtailTestUtils
from wagtail.test.utils.template_tests import AdminTemplateTestUtils
class TestCompareRevisions(AdminTemplateTestUtils, WagtailTestUtils, TestCase):
# Actual tests for the comparison classes can be found in test_compare.py
def setUp(self):
self.snippet = RevisableModel.objects.create(text="Initial revision")
self.initial_revision = self.snippet.save_revision()
self.initial_revision.created_at = make_aware(datetime.datetime(2022, 5, 10))
self.initial_revision.save()
self.snippet.text = "First edit"
self.snippet.save()
self.edit_revision = self.snippet.save_revision()
self.edit_revision.created_at = make_aware(datetime.datetime(2022, 5, 11))
self.edit_revision.save()
self.snippet.text = "Final revision"
self.snippet.save()
self.final_revision = self.snippet.save_revision()
self.final_revision.created_at = make_aware(datetime.datetime(2022, 5, 12))
self.final_revision.save()
self.login()
def get(self, revision_a_id, revision_b_id):
compare_url = reverse(
"wagtailsnippets_tests_revisablemodel:revisions_compare",
args=(quote(self.snippet.pk), revision_a_id, revision_b_id),
)
return self.client.get(compare_url)
def test_compare_revisions(self):
response = self.get(self.initial_revision.pk, self.edit_revision.pk)
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
'<span class="deletion">Initial revision</span><span class="addition">First edit</span>',
html=True,
)
index_url = reverse("wagtailsnippets_tests_revisablemodel:list", args=[])
edit_url = reverse(
"wagtailsnippets_tests_revisablemodel:edit",
args=(self.snippet.id,),
)
history_url = reverse(
"wagtailsnippets_tests_revisablemodel:history",
args=(self.snippet.id,),
)
self.assertBreadcrumbsItemsRendered(
[
# "Snippets" index link is omitted as RevisableModel has its own menu item
{"url": index_url, "label": "Revisable models"},
{"url": edit_url, "label": str(self.snippet)},
{"url": history_url, "label": "History"},
{"url": "", "label": "Compare", "sublabel": str(self.snippet)},
],
response.content,
)
soup = self.get_soup(response.content)
edit_button = soup.select_one(f"a.w-header-button[href='{edit_url}']")
self.assertIsNotNone(edit_button)
self.assertEqual(edit_button.text.strip(), "Edit")
def test_compare_revisions_earliest(self):
response = self.get("earliest", self.edit_revision.pk)
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
'<span class="deletion">Initial revision</span><span class="addition">First edit</span>',
html=True,
)
def test_compare_revisions_latest(self):
response = self.get(self.edit_revision.id, "latest")
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
'<span class="deletion">First edit</span><span class="addition">Final revision</span>',
html=True,
)
def test_compare_revisions_live(self):
# Mess with the live version, bypassing revisions
self.snippet.text = "Live edited"
self.snippet.save(update_fields=["text"])
response = self.get(self.final_revision.id, "live")
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
'<span class="deletion">Final revision</span><span class="addition">Live edited</span>',
html=True,
)
class TestCompareRevisionsWithPerUserPanels(WagtailTestUtils, TestCase):
def setUp(self):
self.snippet = RevisableChildModel.objects.create(
text="Foo bar", secret_text="Secret text"
)
self.old_revision = self.snippet.save_revision()
self.snippet.text = "Foo baz"
self.snippet.secret_text = "Secret unseen note"
self.new_revision = self.snippet.save_revision()
self.compare_url = reverse(
"wagtailsnippets_tests_revisablechildmodel:revisions_compare",
args=(quote(self.snippet.pk), self.old_revision.pk, self.new_revision.pk),
)
def test_comparison_as_superuser(self):
self.login()
response = self.client.get(self.compare_url)
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
'Foo <span class="deletion">bar</span><span class="addition">baz</span>',
html=True,
)
self.assertContains(
response,
'Secret <span class="deletion">text</span><span class="addition">unseen note</span>',
html=True,
)
def test_comparison_as_ordinary_user(self):
user = self.create_user(username="editor", password="password")
add_permission = Permission.objects.get(
content_type__app_label="tests", codename="change_revisablechildmodel"
)
admin_permission = Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
)
user.user_permissions.add(add_permission, admin_permission)
self.login(username="editor", password="password")
response = self.client.get(self.compare_url)
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
'Foo <span class="deletion">bar</span><span class="addition">baz</span>',
html=True,
)
self.assertNotContains(response, "unseen note")
| {
"repo_id": "wagtail/wagtail",
"file_path": "wagtail/snippets/tests/test_compare_revisions_view.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 139,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.