sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
vllm-project/vllm:vllm/model_executor/model_loader/reload/sanitize.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from types import MethodType
import torch
__all__ = ["sanitize_layer_refs", "restore_layer_refs"]
layer_ref_sentinel = object()
def sanitize_layer_refs(tensor: torch.Tensor, layer: torch.nn.Module) -> torch.Tensor:
"""
Removes references to layer held by tensor attributes. Specifically, removes the
`__self__` attribute of weight loader methods attached to the tensor.
Used by `capture_layer_to_meta` to avoid circular references to layers in
`LAYERWISE_INFO`, leading to modules never being cleaned up. Without sanitation,
tensors will reference layers, and the WeakKeyDictionary will never evict entries,
even when the model is deleted.
:param tensor: tensor to be sanitized
:param layer: layer whose references should be removed
:return: sanitized tensor
"""
for key, value in tensor.__dict__.items():
if isinstance(value, MethodType) and value.__self__ is layer:
tensor.__dict__[key] = value.__func__.__get__(layer_ref_sentinel)
return tensor
def restore_layer_refs(tensor: torch.Tensor, layer: torch.nn.Module) -> torch.Tensor:
"""
Restores references to layer held by tensor attributes.
Used by `restore_layer_on_meta` to add back layer references, allowing for proper
weight loading.
:param tensor: tensor to be sanitized
:param layer: layer whose references should be removed
:return: sanitized tensor
"""
for key, value in tensor.__dict__.items():
if isinstance(value, MethodType) and value.__self__ is layer_ref_sentinel:
tensor.__dict__[key] = value.__func__.__get__(layer)
return tensor
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/model_loader/reload/sanitize.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/model_loader/reload/torchao_decorator.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Iterable
from functools import wraps
from types import FunctionType
from typing import TYPE_CHECKING
import torch
from vllm.config import ModelConfig
from .layerwise import (
finalize_layerwise_reload,
initialize_layerwise_reload,
)
if TYPE_CHECKING:
from vllm.model_executor.models.utils import AutoWeightsLoader
__all__ = ["set_torchao_reload_attrs", "support_quantized_model_reload_from_hp_weights"]
def set_torchao_reload_attrs(model: torch.nn.Module, model_config: ModelConfig):
model._do_torchao_reload = True
model._model_config = model_config
def support_quantized_model_reload_from_hp_weights(original_load_weights: FunctionType):
"""
Decorator for `load_weights` method for AutoWeightsLoader.load_weights to support
reloading high precision (bfloat16/float16/float32) weight for an already quantized
model, this involves restoring the weights to a high precision weights and
then online quantize the weights.
Only applies to torchao quantized models. Assumes that all model weights are
loaded within a single weights iterator (cannot perform batched updates)
"""
@wraps(original_load_weights)
def patched_model_load_weights(
self: "AutoWeightsLoader",
weights: Iterable[tuple[str, torch.Tensor]],
*args,
**kwargs,
):
model = self.module
if not getattr(model, "_do_torchao_reload", False):
return original_load_weights(self, weights, *args, **kwargs)
initialize_layerwise_reload(model)
loaded_weights = original_load_weights(self, weights, *args, **kwargs)
finalize_layerwise_reload(model, model._model_config)
return loaded_weights
return patched_model_load_weights
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/model_loader/reload/torchao_decorator.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/model_loader/reload/types.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from dataclasses import dataclass, field
from inspect import BoundArguments
import torch
__all__ = ["LayerTensors", "LayerReloadingInfo"]
# encodes both parameters and buffers separately
LayerTensors = tuple[dict[str, torch.Tensor], dict[str, torch.Tensor]]
@dataclass
class LayerReloadingInfo:
# model format (meta), populated by `record_metadata_for_reloading`
restore_metadata: LayerTensors = field(default_factory=lambda: ({}, {}))
# kernel format (device)
kernel_tensors: LayerTensors = field(default_factory=lambda: ({}, {}))
# track how many restored elements are ready for loading
load_numel: int = 0
load_numel_total: int | None = None
# stores arguments and tensors ready for loading
loaded_weights: list[tuple[str, BoundArguments]] = field(default_factory=list)
def reset(self):
self.__init__(restore_metadata=self.restore_metadata) # type: ignore[misc]
def can_process(self) -> bool:
return self.load_numel_total is not None
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/model_loader/reload/types.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/model_loader/reload/utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import torch
from .types import LayerTensors
__all__ = [
"get_layer_tensors",
"get_layer_params_buffers",
"get_layer_size",
]
def get_layer_tensors(layer: torch.nn.Module) -> dict[str, torch.Tensor]:
"""Get all parameters and buffers from a module as a dict."""
params, buffers = get_layer_params_buffers(layer)
return params | buffers
def get_layer_params_buffers(layer: torch.nn.Module) -> LayerTensors:
"""Get all parameters and buffers of a module as a tuple of dicts."""
return (
{name: param for name, param in layer._parameters.items() if param is not None},
{name: buffer for name, buffer in layer._buffers.items() if buffer is not None},
)
def get_layer_size(layer: torch.nn.Module) -> int:
"""Calculate total number of elements across all tensors in a layer."""
return sum(tensor.numel() for tensor in get_layer_tensors(layer).values())
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/model_loader/reload/utils.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:examples/online_serving/openai_realtime_client.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
This script demonstrates how to use the vLLM Realtime WebSocket API to perform
audio transcription by uploading an audio file.
Before running this script, you must start the vLLM server with a realtime-capable
model, for example:
vllm serve mistralai/Voxtral-Mini-4B-Realtime-2602 --enforce-eager
Requirements:
- vllm with audio support
- websockets
- librosa
- numpy
The script:
1. Connects to the Realtime WebSocket endpoint
2. Converts an audio file to PCM16 @ 16kHz
3. Sends audio chunks to the server
4. Receives and prints transcription as it streams
"""
import argparse
import asyncio
import base64
import json
import librosa
import numpy as np
import websockets
from vllm.assets.audio import AudioAsset
def audio_to_pcm16_base64(audio_path: str) -> str:
"""
Load an audio file and convert it to base64-encoded PCM16 @ 16kHz.
"""
# Load audio and resample to 16kHz mono
audio, _ = librosa.load(audio_path, sr=16000, mono=True)
# Convert to PCM16
pcm16 = (audio * 32767).astype(np.int16)
# Encode as base64
return base64.b64encode(pcm16.tobytes()).decode("utf-8")
async def realtime_transcribe(audio_path: str, host: str, port: int, model: str):
"""
Connect to the Realtime API and transcribe an audio file.
"""
uri = f"ws://{host}:{port}/v1/realtime"
async with websockets.connect(uri) as ws:
# Wait for session.created
response = json.loads(await ws.recv())
if response["type"] == "session.created":
print(f"Session created: {response['id']}")
else:
print(f"Unexpected response: {response}")
return
# Validate model
await ws.send(json.dumps({"type": "session.update", "model": model}))
# Signal ready to start
await ws.send(json.dumps({"type": "input_audio_buffer.commit"}))
# Convert audio file to base64 PCM16
print(f"Loading audio from: {audio_path}")
audio_base64 = audio_to_pcm16_base64(audio_path)
# Send audio in chunks (4KB of raw audio = ~8KB base64)
chunk_size = 4096
audio_bytes = base64.b64decode(audio_base64)
total_chunks = (len(audio_bytes) + chunk_size - 1) // chunk_size
print(f"Sending {total_chunks} audio chunks...")
for i in range(0, len(audio_bytes), chunk_size):
chunk = audio_bytes[i : i + chunk_size]
await ws.send(
json.dumps(
{
"type": "input_audio_buffer.append",
"audio": base64.b64encode(chunk).decode("utf-8"),
}
)
)
# Signal all audio is sent
await ws.send(json.dumps({"type": "input_audio_buffer.commit", "final": True}))
print("Audio sent. Waiting for transcription...\n")
# Receive transcription
print("Transcription: ", end="", flush=True)
while True:
response = json.loads(await ws.recv())
if response["type"] == "transcription.delta":
print(response["delta"], end="", flush=True)
elif response["type"] == "transcription.done":
print(f"\n\nFinal transcription: {response['text']}")
if response.get("usage"):
print(f"Usage: {response['usage']}")
break
elif response["type"] == "error":
print(f"\nError: {response['error']}")
break
def main(args):
if args.audio_path:
audio_path = args.audio_path
else:
# Use default audio asset
audio_path = str(AudioAsset("mary_had_lamb").get_local_path())
print(f"No audio path provided, using default: {audio_path}")
asyncio.run(realtime_transcribe(audio_path, args.host, args.port, args.model))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Realtime WebSocket Transcription Client"
)
parser.add_argument(
"--model",
type=str,
default="mistralai/Voxtral-Mini-4B-Realtime-2602",
help="Model that is served and should be pinged.",
)
parser.add_argument(
"--audio_path",
type=str,
default=None,
help="Path to the audio file to transcribe.",
)
parser.add_argument(
"--host",
type=str,
default="localhost",
help="vLLM server host (default: localhost)",
)
parser.add_argument(
"--port",
type=int,
default=8000,
help="vLLM server port (default: 8000)",
)
args = parser.parse_args()
main(args)
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/online_serving/openai_realtime_client.py",
"license": "Apache License 2.0",
"lines": 127,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:examples/online_serving/openai_realtime_microphone_client.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Minimal Gradio demo for real-time speech transcription using the vLLM Realtime API.
Start the vLLM server first:
vllm serve mistralai/Voxtral-Mini-4B-Realtime-2602 --enforce-eager
Then run this script:
python openai_realtime_microphone_client.py --host localhost --port 8000
Use --share to create a public Gradio link.
Requirements: websockets, numpy, gradio
"""
import argparse
import asyncio
import base64
import json
import queue
import threading
import gradio as gr
import numpy as np
import websockets
SAMPLE_RATE = 16_000
# Global state
audio_queue: queue.Queue = queue.Queue()
transcription_text = ""
is_running = False
ws_url = ""
model = ""
async def websocket_handler():
"""Connect to WebSocket and handle audio streaming + transcription."""
global transcription_text, is_running
async with websockets.connect(ws_url) as ws:
# Wait for session.created
await ws.recv()
# Validate model
await ws.send(json.dumps({"type": "session.update", "model": model}))
# Signal ready
await ws.send(json.dumps({"type": "input_audio_buffer.commit"}))
async def send_audio():
while is_running:
try:
chunk = await asyncio.get_event_loop().run_in_executor(
None, lambda: audio_queue.get(timeout=0.1)
)
await ws.send(
json.dumps(
{"type": "input_audio_buffer.append", "audio": chunk}
)
)
except queue.Empty:
continue
async def receive_transcription():
global transcription_text
async for message in ws:
data = json.loads(message)
if data.get("type") == "transcription.delta":
transcription_text += data["delta"]
await asyncio.gather(send_audio(), receive_transcription())
def start_websocket():
"""Start WebSocket connection in background thread."""
global is_running
is_running = True
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
loop.run_until_complete(websocket_handler())
except Exception as e:
print(f"WebSocket error: {e}")
def start_recording():
"""Start the transcription service."""
global transcription_text
transcription_text = ""
thread = threading.Thread(target=start_websocket, daemon=True)
thread.start()
return gr.update(interactive=False), gr.update(interactive=True), ""
def stop_recording():
"""Stop the transcription service."""
global is_running
is_running = False
return gr.update(interactive=True), gr.update(interactive=False), transcription_text
def process_audio(audio):
"""Process incoming audio and queue for streaming."""
global transcription_text
if audio is None or not is_running:
return transcription_text
sample_rate, audio_data = audio
# Convert to mono if stereo
if len(audio_data.shape) > 1:
audio_data = audio_data.mean(axis=1)
# Normalize to float
if audio_data.dtype == np.int16:
audio_float = audio_data.astype(np.float32) / 32767.0
else:
audio_float = audio_data.astype(np.float32)
# Resample to 16kHz if needed
if sample_rate != SAMPLE_RATE:
num_samples = int(len(audio_float) * SAMPLE_RATE / sample_rate)
audio_float = np.interp(
np.linspace(0, len(audio_float) - 1, num_samples),
np.arange(len(audio_float)),
audio_float,
)
# Convert to PCM16 and base64 encode
pcm16 = (audio_float * 32767).astype(np.int16)
b64_chunk = base64.b64encode(pcm16.tobytes()).decode("utf-8")
audio_queue.put(b64_chunk)
return transcription_text
# Gradio interface
with gr.Blocks(title="Real-time Speech Transcription") as demo:
gr.Markdown("# Real-time Speech Transcription")
gr.Markdown("Click **Start** and speak into your microphone.")
with gr.Row():
start_btn = gr.Button("Start", variant="primary")
stop_btn = gr.Button("Stop", variant="stop", interactive=False)
audio_input = gr.Audio(sources=["microphone"], streaming=True, type="numpy")
transcription_output = gr.Textbox(label="Transcription", lines=5)
start_btn.click(
start_recording, outputs=[start_btn, stop_btn, transcription_output]
)
stop_btn.click(stop_recording, outputs=[start_btn, stop_btn, transcription_output])
audio_input.stream(
process_audio, inputs=[audio_input], outputs=[transcription_output]
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Realtime WebSocket Transcription with Gradio"
)
parser.add_argument(
"--model",
type=str,
default="mistralai/Voxtral-Mini-4B-Realtime-2602",
help="Model that is served and should be pinged.",
)
parser.add_argument(
"--host", type=str, default="localhost", help="vLLM server host"
)
parser.add_argument("--port", type=int, default=8000, help="vLLM server port")
parser.add_argument(
"--share", action="store_true", help="Create public Gradio link"
)
args = parser.parse_args()
ws_url = f"ws://{args.host}:{args.port}/v1/realtime"
model = args.model
demo.launch(share=args.share)
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/online_serving/openai_realtime_microphone_client.py",
"license": "Apache License 2.0",
"lines": 143,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/entrypoints/openai/test_realtime_validation.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import asyncio
import base64
import json
import warnings
import librosa
import numpy as np
import pytest
import websockets
from vllm.assets.audio import AudioAsset
from ...utils import RemoteOpenAIServer
from .conftest import add_attention_backend
MISTRAL_FORMAT_ARGS = [
"--tokenizer_mode",
"mistral",
"--config_format",
"mistral",
"--load_format",
"mistral",
]
MODEL_NAME = "mistralai/Voxtral-Mini-4B-Realtime-2602"
def _get_websocket_url(server: RemoteOpenAIServer) -> str:
"""Convert HTTP URL to WebSocket URL for realtime endpoint."""
http_url = server.url_root
ws_url = http_url.replace("http://", "ws://")
return f"{ws_url}/v1/realtime"
async def receive_event(ws, timeout: float = 60.0) -> dict:
"""Receive and parse JSON event from WebSocket."""
message = await asyncio.wait_for(ws.recv(), timeout=timeout)
return json.loads(message)
async def send_event(ws, event: dict) -> None:
"""Send JSON event to WebSocket."""
await ws.send(json.dumps(event))
@pytest.fixture
def mary_had_lamb_audio_chunks() -> list[str]:
"""Audio split into ~1 second chunks for streaming."""
path = AudioAsset("mary_had_lamb").get_local_path()
audio, _ = librosa.load(str(path), sr=16000, mono=True)
# Split into ~0.1 second chunks (1600 samples at 16kHz)
chunk_size = 1600
chunks = []
for i in range(0, len(audio), chunk_size):
chunk = audio[i : i + chunk_size]
chunk_int16 = (chunk * 32767).astype(np.int16)
chunk_bytes = chunk_int16.tobytes()
chunks.append(base64.b64encode(chunk_bytes).decode("utf-8"))
return chunks
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_multi_chunk_streaming(
model_name, mary_had_lamb_audio_chunks, rocm_aiter_fa_attention
):
"""Test streaming multiple audio chunks before committing."""
server_args = ["--enforce-eager", "--max-model-len", "2048"]
if model_name.startswith("mistralai"):
server_args += MISTRAL_FORMAT_ARGS
add_attention_backend(server_args, rocm_aiter_fa_attention)
with RemoteOpenAIServer(model_name, server_args) as remote_server:
ws_url = _get_websocket_url(remote_server)
async with websockets.connect(ws_url) as ws:
# Receive session.created
event = await receive_event(ws, timeout=30.0)
assert event["type"] == "session.created"
await send_event(ws, {"type": "session.update", "model": model_name})
# Wait for the server to acknowledge the session update.
try:
while True:
event = await receive_event(ws, timeout=5.0)
if event["type"] == "session.updated":
break
except TimeoutError:
warnings.warn(
f"session.updated not received within {5.0}s after "
"session.update. The server may not implement this event.",
stacklevel=2,
)
# (ROCm) Warm-up: send a non-final commit (required to start
# transcription) with a small audio chunk to trigger aiter
# compilation on first use.
await send_event(ws, {"type": "input_audio_buffer.commit"})
await send_event(
ws,
{
"type": "input_audio_buffer.append",
"audio": mary_had_lamb_audio_chunks[0],
},
)
await send_event(ws, {"type": "input_audio_buffer.commit", "final": True})
# (ROCm) Drain all warm-up responses with generous timeout for
# JIT compilation
warmup_done = False
while not warmup_done:
event = await receive_event(ws, timeout=360.0)
if event["type"] in ("transcription.done", "error"):
warmup_done = True
# Now send the real test audio
await send_event(ws, {"type": "input_audio_buffer.commit"})
# Send multiple audio chunks
for chunk in mary_had_lamb_audio_chunks:
await send_event(
ws, {"type": "input_audio_buffer.append", "audio": chunk}
)
# Send commit to end
await send_event(ws, {"type": "input_audio_buffer.commit", "final": True})
# Collect transcription deltas
full_text = ""
done_received = False
while not done_received:
event = await receive_event(ws, timeout=60.0)
if event["type"] == "transcription.delta":
full_text += event["delta"]
elif event["type"] == "transcription.done":
done_received = True
assert "text" in event
elif event["type"] == "error":
pytest.fail(f"Received error: {event}")
# Verify transcription contains expected content
assert event["type"] == "transcription.done"
assert event["text"] == full_text
assert full_text == (
" First words I spoke in the original phonograph."
" A little piece of practical poetry. Mary had a little lamb,"
" it sleeps with quite a flow, and everywhere that Mary went,"
" the lamb was sure to go."
)
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_empty_commit_does_not_crash_engine(
model_name, mary_had_lamb_audio_chunks, rocm_aiter_fa_attention
):
"""Test that committing without audio does not crash the engine.
Regression test for https://github.com/vllm-project/vllm/issues/34532.
An empty commit (no prior input_audio_buffer.append) used to trigger
``AssertionError: For realtime you must provide a multimodal_embedding
at every step`` which killed the entire engine process, disconnecting
every connected client.
"""
server_args = ["--enforce-eager", "--max-model-len", "2048"]
if model_name.startswith("mistralai"):
server_args += MISTRAL_FORMAT_ARGS
add_attention_backend(server_args, rocm_aiter_fa_attention)
with RemoteOpenAIServer(model_name, server_args) as remote_server:
ws_url = _get_websocket_url(remote_server)
# --- First connection: empty commit (no audio appended) ----------
async with websockets.connect(ws_url) as ws:
event = await receive_event(ws, timeout=30.0)
assert event["type"] == "session.created"
await send_event(ws, {"type": "session.update", "model": model_name})
try:
while True:
event = await receive_event(ws, timeout=5.0)
if event["type"] == "session.updated":
break
except TimeoutError:
warnings.warn(
f"session.updated not received within {5.0}s after "
"session.update. The server may not implement this event.",
stacklevel=2,
)
# Start generation without sending any audio
await send_event(ws, {"type": "input_audio_buffer.commit"})
# Immediately signal end-of-audio
await send_event(ws, {"type": "input_audio_buffer.commit", "final": True})
# We should get *some* response (error or empty transcription),
# but the engine must NOT crash.
# (ROCm) Use generous timeout for first request (aiter JIT compilation)
event = await receive_event(ws, timeout=360.0)
assert event["type"] in (
"error",
"transcription.done",
"transcription.delta",
)
# --- Second connection: normal transcription ---------------------
# Verifies the engine is still alive after the empty commit above.
async with websockets.connect(ws_url) as ws:
event = await receive_event(ws, timeout=30.0)
assert event["type"] == "session.created"
await send_event(ws, {"type": "session.update", "model": model_name})
try:
while True:
event = await receive_event(ws, timeout=5.0)
if event["type"] == "session.updated":
break
except TimeoutError:
warnings.warn(
f"session.updated not received within {5.0}s after "
"session.update. The server may not implement this event.",
stacklevel=2,
)
# Start transcription
await send_event(ws, {"type": "input_audio_buffer.commit"})
for chunk in mary_had_lamb_audio_chunks:
await send_event(
ws, {"type": "input_audio_buffer.append", "audio": chunk}
)
await send_event(ws, {"type": "input_audio_buffer.commit", "final": True})
done_received = False
while not done_received:
event = await receive_event(ws, timeout=60.0)
if event["type"] == "transcription.done":
done_received = True
elif event["type"] == "error":
pytest.fail(f"Engine error after empty commit: {event}")
assert done_received
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/entrypoints/openai/test_realtime_validation.py",
"license": "Apache License 2.0",
"lines": 205,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/entrypoints/openai/realtime/api_router.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from typing import TYPE_CHECKING
from fastapi import APIRouter, FastAPI, WebSocket
from vllm.entrypoints.openai.realtime.connection import RealtimeConnection
from vllm.entrypoints.openai.realtime.serving import OpenAIServingRealtime
from vllm.logger import init_logger
logger = init_logger(__name__)
if TYPE_CHECKING:
from argparse import Namespace
from starlette.datastructures import State
from vllm.engine.protocol import EngineClient
from vllm.entrypoints.logger import RequestLogger
from vllm.tasks import SupportedTask
else:
RequestLogger = object
router = APIRouter()
@router.websocket("/v1/realtime")
async def realtime_endpoint(websocket: WebSocket):
"""WebSocket endpoint for realtime audio transcription.
Protocol:
1. Client connects to ws://host/v1/realtime
2. Server sends session.created event
3. Client optionally sends session.update with model/params
4. Client sends input_audio_buffer.commit when ready
5. Client sends input_audio_buffer.append events with base64 PCM16 chunks
6. Server processes and sends transcription.delta events
7. Server sends transcription.done with final text + usage
8. Repeat from step 5 for next utterance
9. Optionally, client sends input_audio_buffer.commit with final=True
to signal audio input is finished. Useful when streaming audio files
Audio format: PCM16, 16kHz, mono, base64-encoded
"""
app = websocket.app
serving = app.state.openai_serving_realtime
connection = RealtimeConnection(websocket, serving)
await connection.handle_connection()
def attach_router(app: FastAPI):
"""Attach the realtime router to the FastAPI app."""
app.include_router(router)
logger.info("Realtime API router attached")
def init_realtime_state(
engine_client: "EngineClient",
state: "State",
args: "Namespace",
request_logger: RequestLogger | None,
supported_tasks: tuple["SupportedTask", ...],
):
state.openai_serving_realtime = (
OpenAIServingRealtime(
engine_client,
state.openai_serving_models,
request_logger=request_logger,
log_error_stack=args.log_error_stack,
)
if "realtime" in supported_tasks
else None
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/entrypoints/openai/realtime/api_router.py",
"license": "Apache License 2.0",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/entrypoints/openai/realtime/connection.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import asyncio
import base64
import json
from collections.abc import AsyncGenerator
from http import HTTPStatus
from uuid import uuid4
import numpy as np
from fastapi import WebSocket
from starlette.websockets import WebSocketDisconnect
from vllm import envs
from vllm.entrypoints.openai.engine.protocol import ErrorResponse, UsageInfo
from vllm.entrypoints.openai.realtime.protocol import (
ErrorEvent,
InputAudioBufferAppend,
InputAudioBufferCommit,
SessionCreated,
TranscriptionDelta,
TranscriptionDone,
)
from vllm.entrypoints.openai.realtime.serving import OpenAIServingRealtime
from vllm.exceptions import VLLMValidationError
from vllm.logger import init_logger
logger = init_logger(__name__)
class RealtimeConnection:
"""Manages WebSocket lifecycle and state for realtime transcription.
This class handles:
- WebSocket connection lifecycle (accept, receive, send, close)
- Event routing (session.update, append, commit)
- Audio buffering via asyncio.Queue
- Generation task management
- Error handling and cleanup
"""
def __init__(self, websocket: WebSocket, serving: OpenAIServingRealtime):
self.websocket = websocket
self.connection_id = f"ws-{uuid4()}"
self.serving = serving
self.audio_queue: asyncio.Queue[np.ndarray | None] = asyncio.Queue()
self.generation_task: asyncio.Task | None = None
self._is_connected = False
self._is_model_validated = False
self._max_audio_filesize_mb = envs.VLLM_MAX_AUDIO_CLIP_FILESIZE_MB
async def handle_connection(self):
"""Main connection loop."""
await self.websocket.accept()
logger.debug("WebSocket connection accepted: %s", self.connection_id)
self._is_connected = True
# Send session created event
await self.send(SessionCreated())
try:
while True:
message = await self.websocket.receive_text()
try:
event = json.loads(message)
await self.handle_event(event)
except json.JSONDecodeError:
await self.send_error("Invalid JSON", "invalid_json")
except Exception as e:
logger.exception("Error handling event: %s", e)
await self.send_error(str(e), "processing_error")
except WebSocketDisconnect:
logger.debug("WebSocket disconnected: %s", self.connection_id)
self._is_connected = False
except Exception as e:
logger.exception("Unexpected error in connection: %s", e)
finally:
await self.cleanup()
def _check_model(self, model: str | None) -> None | ErrorResponse:
if self.serving._is_model_supported(model):
return None
return self.serving.create_error_response(
message=f"The model `{model}` does not exist.",
err_type="NotFoundError",
status_code=HTTPStatus.NOT_FOUND,
param="model",
)
async def handle_event(self, event: dict):
"""Route events to handlers.
Supported event types:
- session.update: Configure model
- input_audio_buffer.append: Add audio chunk to queue
- input_audio_buffer.commit: Start transcription generation
"""
event_type = event.get("type")
if event_type == "session.update":
logger.debug("Session updated: %s", event)
self._check_model(event["model"])
self._is_model_validated = True
elif event_type == "input_audio_buffer.append":
append_event = InputAudioBufferAppend(**event)
try:
audio_bytes = base64.b64decode(append_event.audio)
# Convert PCM16 bytes to float32 numpy array
audio_array = (
np.frombuffer(audio_bytes, dtype=np.int16).astype(np.float32)
/ 32768.0
)
if len(audio_array) / 1024**2 > self._max_audio_filesize_mb:
raise VLLMValidationError(
"Maximum file size exceeded",
parameter="audio_filesize_mb",
value=len(audio_array) / 1024**2,
)
if len(audio_array) == 0:
raise VLLMValidationError("Can't process empty audio.")
# Put audio chunk in queue
self.audio_queue.put_nowait(audio_array)
except Exception as e:
logger.error("Failed to decode audio: %s", e)
await self.send_error("Invalid audio data", "invalid_audio")
elif event_type == "input_audio_buffer.commit":
if not self._is_model_validated:
err_msg = (
"Model not validated. Make sure to validate the"
" model by sending a session.update event."
)
await self.send_error(
err_msg,
"model_not_validated",
)
commit_event = InputAudioBufferCommit(**event)
# final signals that the audio is finished
if commit_event.final:
self.audio_queue.put_nowait(None)
else:
await self.start_generation()
else:
await self.send_error(f"Unknown event type: {event_type}", "unknown_event")
async def audio_stream_generator(self) -> AsyncGenerator[np.ndarray, None]:
"""Generator that yields audio chunks from the queue."""
while True:
audio_chunk = await self.audio_queue.get()
if audio_chunk is None: # Sentinel value to stop
break
yield audio_chunk
async def start_generation(self):
"""Start the transcription generation task."""
if self.generation_task is not None and not self.generation_task.done():
logger.warning("Generation already in progress, ignoring commit")
return
# Create audio stream generator
audio_stream = self.audio_stream_generator()
input_stream = asyncio.Queue[list[int]]()
# Transform to StreamingInput generator
streaming_input_gen = self.serving.transcribe_realtime(
audio_stream, input_stream
)
# Start generation task
self.generation_task = asyncio.create_task(
self._run_generation(streaming_input_gen, input_stream)
)
async def _run_generation(
self,
streaming_input_gen: AsyncGenerator,
input_stream: asyncio.Queue[list[int]],
):
"""Run the generation and stream results back to the client.
This method:
1. Creates sampling parameters from session config
2. Passes the streaming input generator to engine.generate()
3. Streams transcription.delta events as text is generated
4. Sends final transcription.done event with usage stats
5. Feeds generated token IDs back to input_stream for next iteration
6. Cleans up the audio queue
"""
request_id = f"rt-{self.connection_id}-{uuid4()}"
full_text = ""
prompt_token_ids_len: int = 0
completion_tokens_len: int = 0
try:
# Create sampling params
from vllm.sampling_params import RequestOutputKind, SamplingParams
sampling_params = SamplingParams.from_optional(
temperature=0.0,
max_tokens=self.serving.model_cls.realtime_max_tokens,
output_kind=RequestOutputKind.DELTA,
skip_clone=True,
)
# Pass the streaming input generator to the engine
# The engine will consume audio chunks as they arrive and
# stream back transcription results incrementally
result_gen = self.serving.engine_client.generate(
prompt=streaming_input_gen,
sampling_params=sampling_params,
request_id=request_id,
)
# Stream results back to client as they're generated
async for output in result_gen:
if output.outputs and len(output.outputs) > 0:
if not prompt_token_ids_len and output.prompt_token_ids:
prompt_token_ids_len = len(output.prompt_token_ids)
delta = output.outputs[0].text
full_text += delta
# append output to input
input_stream.put_nowait(list(output.outputs[0].token_ids))
await self.send(TranscriptionDelta(delta=delta))
completion_tokens_len += len(output.outputs[0].token_ids)
if not self._is_connected:
# finish because websocket connection was killed
break
usage = UsageInfo(
prompt_tokens=prompt_token_ids_len,
completion_tokens=completion_tokens_len,
total_tokens=prompt_token_ids_len + completion_tokens_len,
)
# Send final completion event
await self.send(TranscriptionDone(text=full_text, usage=usage))
# Clear queue for next utterance
while not self.audio_queue.empty():
self.audio_queue.get_nowait()
except Exception as e:
logger.exception("Error in generation: %s", e)
await self.send_error(str(e), "processing_error")
async def send(
self, event: SessionCreated | TranscriptionDelta | TranscriptionDone
):
"""Send event to client."""
data = event.model_dump_json()
await self.websocket.send_text(data)
async def send_error(self, message: str, code: str | None = None):
"""Send error event to client."""
error_event = ErrorEvent(error=message, code=code)
await self.websocket.send_text(error_event.model_dump_json())
async def cleanup(self):
"""Cleanup resources."""
# Signal audio stream to stop
self.audio_queue.put_nowait(None)
# Cancel generation task if running
if self.generation_task and not self.generation_task.done():
self.generation_task.cancel()
logger.debug("Connection cleanup complete: %s", self.connection_id)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/entrypoints/openai/realtime/connection.py",
"license": "Apache License 2.0",
"lines": 232,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/entrypoints/openai/realtime/protocol.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import time
from typing import Literal
from pydantic import Field
from vllm.entrypoints.openai.engine.protocol import (
OpenAIBaseModel,
UsageInfo,
)
from vllm.utils import random_uuid
# Client -> Server Events
class InputAudioBufferAppend(OpenAIBaseModel):
"""Append audio chunk to buffer"""
type: Literal["input_audio_buffer.append"] = "input_audio_buffer.append"
audio: str # base64-encoded PCM16 @ 16kHz
class InputAudioBufferCommit(OpenAIBaseModel):
"""Process accumulated audio buffer"""
type: Literal["input_audio_buffer.commit"] = "input_audio_buffer.commit"
final: bool = False
# Server -> Client Events
class SessionUpdate(OpenAIBaseModel):
"""Configure session parameters"""
type: Literal["session.update"] = "session.update"
model: str | None = None
class SessionCreated(OpenAIBaseModel):
"""Connection established notification"""
type: Literal["session.created"] = "session.created"
id: str = Field(default_factory=lambda: f"sess-{random_uuid()}")
created: int = Field(default_factory=lambda: int(time.time()))
class TranscriptionDelta(OpenAIBaseModel):
"""Incremental transcription text"""
type: Literal["transcription.delta"] = "transcription.delta"
delta: str # Incremental text
class TranscriptionDone(OpenAIBaseModel):
"""Final transcription with usage stats"""
type: Literal["transcription.done"] = "transcription.done"
text: str # Complete transcription
usage: UsageInfo | None = None
class ErrorEvent(OpenAIBaseModel):
"""Error notification"""
type: Literal["error"] = "error"
error: str
code: str | None = None
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/entrypoints/openai/realtime/protocol.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/entrypoints/openai/realtime/serving.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import asyncio
from collections.abc import AsyncGenerator
from functools import cached_property
from typing import Literal, cast
import numpy as np
from vllm.engine.protocol import EngineClient, StreamingInput
from vllm.entrypoints.logger import RequestLogger
from vllm.entrypoints.openai.engine.serving import OpenAIServing
from vllm.entrypoints.openai.models.serving import OpenAIServingModels
from vllm.inputs.data import PromptType
from vllm.logger import init_logger
from vllm.model_executor.models.interfaces import SupportsRealtime
from vllm.renderers.inputs.preprocess import parse_model_prompt
logger = init_logger(__name__)
class OpenAIServingRealtime(OpenAIServing):
"""Realtime audio transcription service via WebSocket streaming.
Provides streaming audio-to-text transcription by transforming audio chunks
into StreamingInput objects that can be consumed by the engine.
"""
def __init__(
self,
engine_client: EngineClient,
models: OpenAIServingModels,
*,
request_logger: RequestLogger | None,
log_error_stack: bool = False,
):
super().__init__(
engine_client=engine_client,
models=models,
request_logger=request_logger,
log_error_stack=log_error_stack,
)
self.task_type: Literal["realtime"] = "realtime"
logger.info("OpenAIServingRealtime initialized for task: %s", self.task_type)
@cached_property
def model_cls(self) -> type[SupportsRealtime]:
"""Get the model class that supports transcription."""
from vllm.model_executor.model_loader import get_model_cls
model_cls = get_model_cls(self.model_config)
return cast(type[SupportsRealtime], model_cls)
async def transcribe_realtime(
self,
audio_stream: AsyncGenerator[np.ndarray, None],
input_stream: asyncio.Queue[list[int]],
) -> AsyncGenerator[StreamingInput, None]:
"""Transform audio stream into StreamingInput for engine.generate().
Args:
audio_stream: Async generator yielding float32 numpy audio arrays
input_stream: Queue containing context token IDs from previous
generation outputs. Used for autoregressive multi-turn
processing where each generation's output becomes the context
for the next iteration.
Yields:
StreamingInput objects containing audio prompts for the engine
"""
model_config = self.model_config
renderer = self.renderer
# mypy is being stupid
# TODO(Patrick) - fix this
stream_input_iter = cast(
AsyncGenerator[PromptType, None],
self.model_cls.buffer_realtime_audio(
audio_stream, input_stream, model_config
),
)
async for prompt in stream_input_iter:
parsed_prompt = parse_model_prompt(model_config, prompt)
(engine_prompt,) = await renderer.render_cmpl_async([parsed_prompt])
yield StreamingInput(prompt=engine_prompt)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/entrypoints/openai/realtime/serving.py",
"license": "Apache License 2.0",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/openpangu_vl.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
#
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Adapted from vllm/model_executor/models/qwen2_5_vl.py
# Copyright 2025 The vLLM team.
# Copyright 2025 The Qwen Team.
#
# This file is a part of the vllm-ascend project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Callable, Iterable, Iterator, Mapping, Sequence
from functools import lru_cache, partial
from typing import Annotated, Literal, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from torchvision.transforms import v2
from transformers.utils import logging
from vllm.config import VllmConfig
from vllm.distributed import parallel_state
from vllm.distributed import utils as dist_utils
from vllm.model_executor.layers.activation import _ACTIVATION_REGISTRY
from vllm.model_executor.layers.attention.mm_encoder_attention import MMEncoderAttention
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.linear import (
ColumnParallelLinear,
MergedColumnParallelLinear,
QKVParallelLinear,
RowParallelLinear,
)
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.quantization.gptq import GPTQConfig
from vllm.model_executor.layers.quantization.gptq_marlin import GPTQMarlinConfig
from vllm.model_executor.layers.rotary_embedding.common import ApplyRotaryEmb
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
from vllm.model_executor.models.interfaces import (
MultiModalEmbeddings,
SupportsLoRA,
SupportsMRoPE,
SupportsMultiModal,
SupportsPP,
)
from vllm.model_executor.models.module_mapping import MultiModelKeys
from vllm.model_executor.models.qwen2_5_vl import (
Qwen2_5_VLDummyInputsBuilder,
Qwen2_5_VLMultiModalProcessor,
Qwen2_5_VLProcessingInfo,
)
from vllm.model_executor.models.utils import (
AutoWeightsLoader,
WeightsMapper,
init_vllm_registered_model,
maybe_prefix,
)
from vllm.multimodal import MULTIMODAL_REGISTRY
from vllm.multimodal.inputs import (
MultiModalFeatureSpec,
MultiModalKwargsItems,
)
from vllm.multimodal.parse import MultiModalDataItems
from vllm.multimodal.processing import (
PromptReplacement,
PromptUpdate,
PromptUpdateDetails,
)
from vllm.sequence import IntermediateTensors
from vllm.utils.tensor_schema import TensorSchema, TensorShape
from vllm.v1.attention.backends.registry import AttentionBackendEnum
from .vision import get_vit_attn_backend
logger = logging.get_logger(__name__)
class OpenPanguVisionAttention(nn.Module):
def __init__(
self,
embed_dim: int,
num_heads: int,
projection_size: int,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
self.hidden_size_per_attention_head = dist_utils.divide(
projection_size, num_heads
)
self.tp_size = parallel_state.get_tensor_model_parallel_world_size()
self.tp_rank = parallel_state.get_tensor_model_parallel_rank()
self.num_attention_heads_per_partition = dist_utils.divide(
num_heads, self.tp_size
)
self.qkv = QKVParallelLinear(
hidden_size=embed_dim,
head_size=self.hidden_size_per_attention_head,
total_num_heads=num_heads,
total_num_kv_heads=num_heads,
bias=True,
quant_config=quant_config,
prefix=f"{prefix}.qkv",
)
self.proj = RowParallelLinear(
input_size=projection_size,
output_size=embed_dim,
quant_config=quant_config,
prefix=f"{prefix}.proj",
)
self.attn = MMEncoderAttention(
num_heads=self.num_attention_heads_per_partition,
head_size=self.hidden_size_per_attention_head,
scale=self.hidden_size_per_attention_head**-0.5,
prefix=f"{prefix}.attn",
)
self.apply_rotary_emb = ApplyRotaryEmb(enforce_enable=True)
def forward(
self,
x: torch.Tensor,
cu_seqlens: torch.Tensor,
cos: torch.Tensor,
sin: torch.Tensor,
) -> torch.Tensor:
seq_length, _ = x.size()
x, bias = self.qkv(x)
if bias is not None:
x = x + bias
q, k, v = x.chunk(3, dim=1)
q, k, v = (
rearrange(
x, "s (b n d) -> b s n d", d=self.hidden_size_per_attention_head, b=1
).contiguous()
for x in (q, k, v)
)
qk_concat = torch.cat([q, k], dim=0)
qk_rotated = self.apply_rotary_emb(qk_concat, cos, sin)
q, k = torch.chunk(qk_rotated, 2, dim=0)
max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max()
context_layer = self.attn(
query=q,
key=k,
value=v,
cu_seqlens=cu_seqlens,
max_seqlen=max_seqlen,
)
context_layer = rearrange(
context_layer, "b s h d -> s (b h d)", b=1
).contiguous()
output, bias = self.proj(context_layer)
if bias is not None:
output = output + bias
return output
class OpenPanguVisionMLP(nn.Module):
def __init__(
self,
in_features: int,
hidden_features: int,
bias: bool = False,
act_fn: Callable[[torch.Tensor], torch.Tensor] = F.silu,
vision_config=None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
):
super().__init__()
self.hidden_act = vision_config.hidden_act
if self.hidden_act == "silu":
tp_size = parallel_state.get_tensor_model_parallel_world_size()
if hidden_features % tp_size != 0:
hidden_features = (hidden_features + tp_size - 1) // tp_size * tp_size
self.gate_up_proj = MergedColumnParallelLinear(
input_size=in_features,
output_sizes=[hidden_features] * 2,
bias=bias,
quant_config=quant_config,
prefix=f"{prefix}.gate_up_proj",
)
else:
self.up_proj = ColumnParallelLinear(
in_features,
hidden_features,
bias=bias,
quant_config=quant_config,
prefix=f"{prefix}.up_proj",
)
self.down_proj = RowParallelLinear(
hidden_features,
in_features,
bias=bias,
quant_config=quant_config,
prefix=f"{prefix}.down_proj",
)
self.act_fn = act_fn
def forward(self, x: torch.Tensor):
if self.hidden_act == "silu":
x, _ = self.gate_up_proj(x)
else:
x, _ = self.up_proj(x)
x = self.act_fn(x)
x, _ = self.down_proj(x)
return x
class OpenPanguVisionBlock(nn.Module):
def __init__(
self,
dim: int,
num_heads: int,
mlp_hidden_dim: int,
act_fn: Callable[[torch.Tensor], torch.Tensor] = F.silu,
norm_layer: Callable[[int], nn.Module] | None = None,
vision_config=None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = partial(nn.LayerNorm, eps=1e-6)
self.norm1 = norm_layer(dim)
self.norm2 = norm_layer(dim)
self.attn = OpenPanguVisionAttention(
embed_dim=dim,
num_heads=num_heads,
projection_size=dim,
quant_config=quant_config,
prefix=f"{prefix}.attn",
)
self.mlp = OpenPanguVisionMLP(
dim,
mlp_hidden_dim,
act_fn=act_fn,
bias=True,
vision_config=vision_config,
quant_config=quant_config,
prefix=f"{prefix}.mlp",
)
def forward(
self,
hidden_states: torch.Tensor,
cu_seqlens: torch.Tensor,
cos: torch.Tensor,
sin: torch.Tensor,
) -> torch.Tensor:
hidden_states = hidden_states + self.attn(
self.norm1(hidden_states), cu_seqlens=cu_seqlens, cos=cos, sin=sin
)
hidden_states = hidden_states + self.mlp(self.norm2(hidden_states))
return hidden_states
class OpenPanguVisionRotaryEmbedding(nn.Module):
def __init__(self, dim: int, theta: float = 10000.0) -> None:
super().__init__()
self.inv_freq = 1.0 / (
theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim)
)
self._seq_len_cached = 0
self._freqs_cached = None
def update_freqs_cache(self, seqlen: int) -> None:
if seqlen > self._seq_len_cached:
seqlen *= 2
self._seq_len_cached = seqlen
seq = torch.arange(
seqlen, device=self.inv_freq.device, dtype=self.inv_freq.dtype
)
freqs = torch.outer(seq, self.inv_freq)
self._freqs_cached = freqs
def forward(self, seqlen: int) -> torch.Tensor:
self.update_freqs_cache(seqlen)
return (
self._freqs_cached[:seqlen]
if self._freqs_cached is not None
else self._freqs_cached
)
class OpenPanguVisionPatchEmbed(nn.Module):
def __init__(
self,
patch_size: int = 14,
temporal_patch_size: int = 2,
in_channels: int = 3,
hidden_size: int = 1152,
) -> None:
super().__init__()
self.patch_size = patch_size
self.temporal_patch_size = temporal_patch_size
self.hidden_size = hidden_size
self.input_size = (
self.patch_size * self.patch_size * in_channels * self.temporal_patch_size
)
kernel_size = (temporal_patch_size, patch_size, patch_size)
self.proj = nn.Conv3d(
in_channels,
hidden_size,
kernel_size=kernel_size,
stride=kernel_size,
bias=False,
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if x.shape[-1] != self.input_size:
x = torch.cat(
[
x.reshape(-1, self.patch_size * self.patch_size),
x.reshape(-1, self.patch_size * self.patch_size),
],
dim=-1,
).reshape(-1, self.input_size)
x = x.matmul(self.proj.weight.data.view(self.hidden_size, -1).transpose(0, 1))
return x
class OpenPanguVisionPatchMerger(nn.Module):
def __init__(
self,
d_model: int,
context_dim: int,
norm_layer: Callable[[int], nn.Module] | None = None,
spatial_merge_size: int = 2,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = partial(nn.LayerNorm, eps=1e-6)
self.hidden_size = context_dim * (spatial_merge_size**2)
self.ln_q = norm_layer(context_dim)
self.mlp = nn.Sequential(
ColumnParallelLinear(
self.hidden_size,
self.hidden_size,
bias=True,
quant_config=quant_config,
prefix=f"{prefix}.mlp.0",
return_bias=False,
),
nn.GELU(),
RowParallelLinear(
self.hidden_size,
d_model,
bias=True,
quant_config=quant_config,
prefix=f"{prefix}.mlp.2",
return_bias=False,
),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.mlp(self.ln_q(x).view(-1, self.hidden_size))
class OpenPanguVisionTransformer(nn.Module):
def __init__(
self,
vision_config,
out_hidden_size,
hidden_size,
norm_eps: float = 1e-6,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
interleaved=False,
) -> None:
super().__init__()
self.hidden_size = vision_config.hidden_size
self.num_heads = vision_config.num_heads
self.window_size = vision_config.window_size
self.patch_size = vision_config.patch_size
self.spatial_merge_size = vision_config.spatial_merge_size
self.fullatt_block_indexes = vision_config.fullatt_block_indexes
self.spatial_merge_unit = self.spatial_merge_size**2
norm_layer = partial(RMSNorm, eps=norm_eps)
self.interleaved = interleaved
self.out_hidden_size = vision_config.out_hidden_size
self.hidden_act = vision_config.hidden_act
head_dim = self.hidden_size // self.num_heads
self.attn_backend = get_vit_attn_backend(
head_size=head_dim,
dtype=torch.get_default_dtype(),
)
if self.attn_backend not in {
AttentionBackendEnum.FLASH_ATTN,
}:
raise RuntimeError(
f"Pangu-VL does not support {self.attn_backend} backend now."
)
self.rotary_pos_emb = OpenPanguVisionRotaryEmbedding(head_dim // 2)
self.patch_embed = OpenPanguVisionPatchEmbed(
patch_size=vision_config.patch_size,
temporal_patch_size=vision_config.temporal_patch_size,
in_channels=vision_config.in_channels,
hidden_size=self.hidden_size,
)
self.blocks = nn.ModuleList(
[
OpenPanguVisionBlock(
dim=self.hidden_size,
num_heads=self.num_heads,
mlp_hidden_dim=vision_config.intermediate_size,
act_fn=_ACTIVATION_REGISTRY[vision_config.hidden_act],
vision_config=vision_config,
norm_layer=norm_layer,
quant_config=quant_config,
prefix=f"{prefix}.blocks.{layer_idx}",
)
for layer_idx in range(vision_config.depth)
]
)
self.tp_size = parallel_state.get_tensor_model_parallel_world_size()
self.tp_rank = parallel_state.get_tensor_model_parallel_rank()
self.hidden_size_per_attention_head = dist_utils.divide(
self.hidden_size, self.num_heads
)
self.select_layer = getattr(
vision_config, "mm_unit_vision_select_layer", [-1, -3]
)
self.select_index = [vision_config.depth + i for i in self.select_layer]
self.select_index = self.select_index[::-1]
self.select_layer = [-1 * (i + 1) for i in range(len(self.select_index))]
self.take_indices = self.select_index
self.final_layernorm = RMSNorm(self.hidden_size, eps=norm_eps)
self.merger = nn.ModuleList(
[
OpenPanguVisionPatchMerger(
d_model=vision_config.out_hidden_size,
context_dim=self.hidden_size,
norm_layer=norm_layer,
spatial_merge_size=self.spatial_merge_size,
quant_config=quant_config,
prefix=f"{prefix}.merger.{i}",
)
for i in range(len(self.select_layer))
]
)
self.vision_projection = ProjectionSingle(out_hidden_size, hidden_size)
@property
def dtype(self) -> torch.dtype:
return self.patch_embed.proj.weight.dtype
@property
def device(self) -> torch.device:
return self.patch_embed.proj.weight.device
def cal_cos_sin(self, rotary_pos_emb):
cos = rotary_pos_emb.cos()
sin = rotary_pos_emb.sin()
return cos, sin
def rot_pos_emb(self, grid_thw: torch.Tensor) -> torch.Tensor:
# see https://github.com/huggingface/transformers/blob/main/src/transformers/models/qwen2_5_vl/modular_qwen2_5_vl.py for details. #L209 # noqa: E501
pos_ids = []
for t, h, w in grid_thw:
hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w)
wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1)
hpos_ids = (
hpos_ids.reshape(
h // self.spatial_merge_size,
self.spatial_merge_size,
w // self.spatial_merge_size,
self.spatial_merge_size,
)
.permute(0, 2, 1, 3)
.flatten()
)
wpos_ids = (
wpos_ids.reshape(
h // self.spatial_merge_size,
self.spatial_merge_size,
w // self.spatial_merge_size,
self.spatial_merge_size,
)
.permute(0, 2, 1, 3)
.flatten()
)
pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1))
pos_ids = torch.cat(pos_ids, dim=0)
max_grid_size = grid_thw[:, 1:].max()
rotary_pos_emb_full = self.rotary_pos_emb(max_grid_size)
rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1)
return rotary_pos_emb
def get_window_index(self, grid_thw):
# see https://github.com/huggingface/transformers/blob/main/src/transformers/models/qwen2_5_vl/modular_qwen2_5_vl.py for details. #L238 # noqa: E501
window_index: list = []
cu_window_seqlens: list = [0]
window_index_id = 0
vit_merger_window_size = (
self.window_size // self.spatial_merge_size // self.patch_size
)
for grid_t, grid_h, grid_w in grid_thw:
llm_grid_h = grid_h // self.spatial_merge_size
llm_grid_w = grid_w // self.spatial_merge_size
index = torch.arange(grid_t * llm_grid_h * llm_grid_w).reshape(
grid_t, llm_grid_h, llm_grid_w
)
pad_h = vit_merger_window_size - llm_grid_h % vit_merger_window_size
pad_w = vit_merger_window_size - llm_grid_w % vit_merger_window_size
num_windows_h = (llm_grid_h + pad_h) // vit_merger_window_size
num_windows_w = (llm_grid_w + pad_w) // vit_merger_window_size
index_padded = F.pad(index, (0, pad_w, 0, pad_h), "constant", -100)
index_padded = index_padded.reshape(
grid_t,
num_windows_h,
vit_merger_window_size,
num_windows_w,
vit_merger_window_size,
)
index_padded = index_padded.permute(0, 1, 3, 2, 4).reshape(
grid_t,
num_windows_h * num_windows_w,
vit_merger_window_size,
vit_merger_window_size,
)
seqlens = (index_padded != -100).sum([2, 3]).reshape(-1)
index_padded = index_padded.reshape(-1)
index_new = index_padded[index_padded != -100]
window_index.append(index_new + window_index_id)
cu_seqlens_tmp = (
seqlens.cumsum(0) * self.spatial_merge_unit + cu_window_seqlens[-1]
)
cu_window_seqlens.extend(cu_seqlens_tmp.tolist())
window_index_id += (grid_t * llm_grid_h * llm_grid_w).item()
window_index = torch.cat(window_index, dim=0)
return window_index, cu_window_seqlens
def forward(
self,
x: torch.Tensor,
grid_thw: torch.Tensor,
) -> torch.Tensor:
# compute cu_seqlens
cu_seqlens = (
torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0])
.to(torch.int32)
.to(x.device)
)
cu_seqlens = torch.cumsum(cu_seqlens, dim=0, dtype=torch.int32)
cu_seqlens = F.pad(cu_seqlens, (1, 0), "constant", 0)
x = self.patch_embed(x)
rotary_pos_emb = self.rot_pos_emb(grid_thw)
window_index, cu_window_seqlens = self.get_window_index(grid_thw)
cu_window_seqlens = torch.tensor(
cu_window_seqlens,
device=x.device,
dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
)
cu_window_seqlens = torch.unique_consecutive(cu_window_seqlens)
seq_len, _ = x.size()
x = x.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1)
x = x[window_index, :, :]
x = x.reshape(seq_len, -1)
rotary_pos_emb = rotary_pos_emb.reshape(
seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1
)
rotary_pos_emb = rotary_pos_emb[window_index, :, :]
rotary_pos_emb = rotary_pos_emb.reshape(seq_len, -1)
cos, sin = self.cal_cos_sin(rotary_pos_emb.to(x.dtype))
intermediates = []
for layer_num, blk in enumerate(self.blocks):
if layer_num in self.fullatt_block_indexes:
cu_seqlens_now = cu_seqlens
else:
cu_seqlens_now = cu_window_seqlens
x = blk(x, cu_seqlens=cu_seqlens_now, cos=cos, sin=sin)
if layer_num in self.take_indices:
ln_hs = self.final_layernorm(x)
intermediates.append(ln_hs)
image_embeddings_list = []
for idx, sl in enumerate(self.select_layer):
image_embeddings_list.append(self.merger[idx](intermediates[sl]))
x = sum(image_embeddings_list)
reverse_indices = torch.argsort(window_index)
x = x[reverse_indices, :]
x = self.vision_projection(x)
return x
def load_weights(self, weights) -> set[str]:
def _padding_weight(name: str, w: torch.Tensor) -> torch.Tensor:
if "gate_proj" in name or "up_proj" in name:
dim, size = 0, w.size(0)
elif "down_proj" in name:
dim, size = 1, w.size(-1)
else:
return w
pad_len = -size % self.tp_size
if pad_len == 0:
return w
pad = [0] * (w.ndim * 2)
pad[-(dim + 1) * 2 + 1] = pad_len
return F.pad(w, pad, mode="constant", value=0)
stacked_params_mapping = [
("attn.qkv.", "attn.q.", "q"),
("attn.qkv.", "attn.k.", "k"),
("attn.qkv.", "attn.v.", "v"),
]
if self.hidden_act == "silu":
stacked_params_mapping.extend(
[
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
)
params_dict = dict(self.named_parameters(remove_duplicate=False))
loaded_params: set[str] = set()
for name, loaded_weight in weights:
if self.hidden_act == "silu":
loaded_weight = _padding_weight(name, loaded_weight)
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
break
else:
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
loaded_params.add(name)
return loaded_params
class ProjectionSingle(nn.Module):
def __init__(self, i_hidden_size: int, t_hidden_size: int):
super().__init__()
self.act = F.silu
self.fc1 = nn.Linear(i_hidden_size, t_hidden_size, bias=True)
def forward(self, hidden_states):
x = self.act(hidden_states)
return self.fc1(x)
class OpenPanguVLProcessingInfo(Qwen2_5_VLProcessingInfo):
def get_hf_config(self):
return self.ctx.model_config.hf_config
def get_hf_processor(
self,
*,
min_pixels: int | None = None,
max_pixels: int | None = None,
size: dict[str, int] | None = None,
fps: float | list[float] | None = None,
**kwargs: object,
):
if fps is not None:
kwargs["fps"] = fps
return self.ctx.get_hf_processor(
use_fast=kwargs.pop("use_fast", True),
**kwargs,
)
class OpenPanguVLImagePixelInputs(TensorSchema):
type: Literal["pixel_values"]
pixel_values: Annotated[
torch.Tensor,
TensorShape("np", "cps"),
]
image_grid_thw: Annotated[
torch.Tensor,
TensorShape("ni", 3),
]
class OpenPanguVLImageEmbeddingInputs(TensorSchema):
type: Literal["image_embeds"]
image_embeds: Annotated[
torch.Tensor,
TensorShape("nf", "hs"),
]
image_grid_thw: Annotated[
torch.Tensor,
TensorShape("ni", 3),
]
class OpenPanguVLVideoPixelInputs(TensorSchema):
type: Literal["pixel_values_videos"]
pixel_values_videos: Annotated[
torch.Tensor,
TensorShape("np", "ctps"),
]
video_grid_thw: Annotated[
torch.Tensor,
TensorShape("nv", 3),
]
class OpenPanguVLVideoEmbeddingInputs(TensorSchema):
type: Literal["video_embeds"]
video_embeds: Annotated[
torch.Tensor,
TensorShape("nf", "hs"),
]
video_grid_thw: Annotated[
torch.Tensor,
TensorShape("nv", 3),
]
class OpenPanguVLMultiModalProcessor(Qwen2_5_VLMultiModalProcessor):
def _get_prompt_updates(
self,
mm_items: MultiModalDataItems,
hf_processor_mm_kwargs: Mapping[str, any],
out_mm_kwargs: MultiModalKwargsItems,
) -> Sequence[PromptUpdate]:
hf_processor = self.info.get_hf_processor(**hf_processor_mm_kwargs)
image_processor = self.info.get_image_processor(**hf_processor_mm_kwargs)
tokenizer = self.info.get_tokenizer()
vocab = tokenizer.get_vocab()
image_token = hf_processor.image_token
video_token = hf_processor.video_token
vision_start_token = hf_processor.vision_start_token
vision_end_token = hf_processor.vision_end_token
image_token_id = vocab[image_token]
video_token_id = vocab[video_token]
vision_start_token_id = vocab[vision_start_token]
vision_end_token_id = vocab[vision_end_token]
placeholder = {
"image": image_token_id,
"video": video_token_id,
}
merge_length = image_processor.merge_size**2
def get_replacement_openpangu_vision(item_idx: int, modality: str):
out_item = out_mm_kwargs[modality][item_idx]
grid_thw = out_item[f"{modality}_grid_thw"].data
if not isinstance(grid_thw, torch.Tensor):
raise TypeError("Expected 'grid_thw' to be a Tensor")
if modality == "image":
image_token_id_total = [image_token_id] * (
int(grid_thw.prod()) // merge_length
)
return image_token_id_total
else:
# When modality is video
grid_t, grid_h, grid_w = grid_thw
video_seq_length_per_time = (grid_h * grid_w).item() // merge_length
video_token_id_per_time = (
[vision_start_token_id]
+ [video_token_id] * video_seq_length_per_time
+ [vision_end_token_id]
)
video_token_id_total = video_token_id_per_time * grid_t.item()
video_token_id_middle = video_token_id_total[1:-1]
return PromptUpdateDetails.select_token_id(
video_token_id_middle,
embed_token_id=video_token_id,
)
return [
PromptReplacement(
modality=modality,
target=[placeholder[modality]],
replacement=partial(
get_replacement_openpangu_vision, modality=modality
),
)
for modality in ("image", "video")
]
class OpenPanguVLDummyInputsBuilder(Qwen2_5_VLDummyInputsBuilder):
pass
@MULTIMODAL_REGISTRY.register_processor(
OpenPanguVLMultiModalProcessor,
info=OpenPanguVLProcessingInfo,
dummy_inputs=OpenPanguVLDummyInputsBuilder,
)
class OpenPanguVLForConditionalGeneration(
nn.Module, SupportsMultiModal, SupportsLoRA, SupportsPP, SupportsMRoPE
):
hf_to_vllm_mapper = WeightsMapper(
orig_to_new_prefix={
"model.language_model.": "language_model.model.",
"model.visual.": "visual.",
"lm_head.": "language_model.lm_head.",
"model.": "language_model.model.",
}
)
packed_modules_mapping = {
"qkv_proj": ["q_proj", "k_proj", "v_proj"],
"gate_up_proj": ["gate_proj", "up_proj"],
}
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
self.config = config
self.vllm_config = vllm_config
quant_config = vllm_config.quant_config
with self._mark_tower_model(vllm_config, {"image", "video"}):
self.visual = OpenPanguVisionTransformer(
vision_config=config.vision_config,
out_hidden_size=config.vision_config.out_hidden_size,
hidden_size=config.hidden_size,
norm_eps=getattr(config.vision_config, "rms_norm_eps", 1e-6),
quant_config=self._maybe_ignore_quant_config(quant_config),
prefix=maybe_prefix(prefix, "visual"),
)
with self._mark_language_model(vllm_config):
self.language_model = init_vllm_registered_model(
vllm_config=vllm_config,
prefix=maybe_prefix("openpangu", "language_model"),
architectures=["PanguEmbeddedForCausalLM"],
)
self.make_empty_intermediate_tensors = (
self.language_model.make_empty_intermediate_tensors
)
self._parse_preprocess_params(config.vision_config)
def _parse_preprocess_params(self, vision_config):
self.channel = vision_config.in_channels
self.patch_size = vision_config.patch_size
from vllm.multimodal import MULTIMODAL_REGISTRY
image_processor = (
MULTIMODAL_REGISTRY.create_processor(self.vllm_config.model_config)
.info.get_hf_processor()
.image_processor
)
self.do_rescale = image_processor.do_rescale
self.rescale_factor = image_processor.rescale_factor
self.do_normalize = image_processor.do_normalize
self.image_mean = tuple(image_processor.image_mean)
self.image_std = tuple(image_processor.image_std)
def _maybe_ignore_quant_config(self, quant_config: QuantizationConfig):
if isinstance(quant_config, (GPTQConfig, GPTQMarlinConfig)):
return None
return quant_config
def _validate_and_reshape_mm_tensor(
self, mm_input: object, name: str
) -> torch.Tensor:
if not isinstance(mm_input, (torch.Tensor, list)):
raise ValueError(f"Incorrect type of {name}. Got type: {type(mm_input)}")
if isinstance(mm_input, torch.Tensor):
if mm_input.ndim == 2:
return mm_input
if mm_input.ndim != 3:
raise ValueError(
f"{name} should be 2D or batched 3D tensor. "
f"Got ndim: {mm_input.ndim} "
f"(shape={mm_input.shape})"
)
return torch.concat(list(mm_input))
else:
return torch.concat(mm_input)
def _parse_and_validate_image_input(self, **kwargs: object):
pixel_values = kwargs.pop("pixel_values", None)
image_embeds = kwargs.pop("image_embeds", None)
image_grid_thw = kwargs.pop("image_grid_thw", None)
if pixel_values is None and image_embeds is None:
return None
if pixel_values is not None:
pixel_values = self._validate_and_reshape_mm_tensor(
pixel_values, "image pixel values"
)
image_grid_thw = self._validate_and_reshape_mm_tensor(
image_grid_thw, "image grid_thw"
)
if not isinstance(pixel_values, (torch.Tensor, list)):
raise ValueError(
"Incorrect type of image pixel values. "
f"Got type: {type(pixel_values)}"
)
return OpenPanguVLImagePixelInputs(
type="pixel_values",
pixel_values=pixel_values,
image_grid_thw=image_grid_thw,
)
if image_embeds is not None:
image_embeds = self._validate_and_reshape_mm_tensor(
image_embeds, "image embeds"
)
image_grid_thw = self._validate_and_reshape_mm_tensor(
image_grid_thw, "image grid_thw"
)
if not isinstance(image_embeds, torch.Tensor):
raise ValueError(
"Incorrect type of image embeddings. "
f"Got type: {type(image_embeds)}"
)
return OpenPanguVLImageEmbeddingInputs(
type="image_embeds",
image_embeds=image_embeds,
image_grid_thw=image_grid_thw,
)
def _parse_and_validate_video_input(self, **kwargs: object):
pixel_values_videos = kwargs.pop("pixel_values_videos", None)
video_embeds = kwargs.pop("video_embeds", None)
video_grid_thw = kwargs.pop("video_grid_thw", None)
if pixel_values_videos is None and video_embeds is None:
return None
if pixel_values_videos is not None:
pixel_values_videos = self._validate_and_reshape_mm_tensor(
pixel_values_videos, "video pixel values"
)
video_grid_thw = self._validate_and_reshape_mm_tensor(
video_grid_thw, "video grid_thw"
)
return OpenPanguVLVideoPixelInputs(
type="pixel_values_videos",
pixel_values_videos=pixel_values_videos,
video_grid_thw=video_grid_thw,
)
if video_embeds is not None:
video_embeds = self._validate_and_reshape_mm_tensor(
video_embeds, "video embeds"
)
video_grid_thw = self._validate_and_reshape_mm_tensor(
video_grid_thw, "video grid_thw"
)
if not isinstance(video_embeds, torch.Tensor):
raise ValueError(
"Incorrect type of video embeddings. "
f"Got type: {type(video_embeds)}"
)
return OpenPanguVLVideoEmbeddingInputs(
type="video_embeds",
video_embeds=video_embeds,
video_grid_thw=video_grid_thw,
)
def _parse_and_validate_multimodal_inputs(self, **kwargs: object) -> dict:
mm_input_by_modality = {}
for input_key in kwargs:
if (
input_key in ("pixel_values", "image_embeds")
and "image" not in mm_input_by_modality
):
mm_input_by_modality["image"] = self._parse_and_validate_image_input(
**kwargs
)
if (
input_key in ("pixel_values_videos", "video_embeds")
and "video" not in mm_input_by_modality
):
mm_input_by_modality["video"] = self._parse_and_validate_video_input(
**kwargs
)
return mm_input_by_modality
def embed_multimodal(self, **kwargs: object) -> MultiModalEmbeddings | None:
mm_input_by_modality = self._parse_and_validate_multimodal_inputs(**kwargs)
if not mm_input_by_modality:
return None
multimodal_embeddings: tuple[torch.Tensor, ...] = ()
for modality in mm_input_by_modality:
multimodal_input = mm_input_by_modality[modality]
if modality == "image":
vision_embeddings = self._process_image_input(multimodal_input)
multimodal_embeddings = (
multimodal_embeddings
if not vision_embeddings
else (multimodal_embeddings + vision_embeddings)
)
if modality == "video":
video_embeddings = self._process_video_input(multimodal_input)
multimodal_embeddings = (
multimodal_embeddings
if not video_embeddings
else (multimodal_embeddings + video_embeddings)
)
return multimodal_embeddings
def get_input_embeddings(
self,
input_ids: torch.Tensor,
multimodal_embeddings=None,
) -> torch.Tensor:
inputs_embeds = self.language_model.embed_input_ids(input_ids)
if multimodal_embeddings is not None:
inputs_embeds = self.embed_input_ids(
input_ids,
inputs_embeds,
multimodal_embeddings,
[self.config.image_token_id, self.config.video_token_id],
)
return inputs_embeds
def _process_image_input(self, image_input) -> tuple[torch.Tensor, ...]:
grid_thw = image_input["image_grid_thw"]
if grid_thw.ndim != 2:
raise ValueError(f"grid_thw.ndim must be 2, but it is {grid_thw.ndim}")
if image_input["type"] == "image_embeds":
image_embeds = image_input["image_embeds"].type(self.visual.dtype)
else:
pixel_values = image_input["pixel_values"].type(self.visual.dtype)
# rescale and normalize
pixel_values = pixel_values.reshape(
-1, self.channel, self.patch_size, self.patch_size
)
pixel_values = rescale_and_normalize(
pixel_values,
self.do_rescale,
self.rescale_factor,
self.do_normalize,
self.image_mean,
self.image_std,
)
pixel_values = pixel_values.reshape(
-1, self.channel * self.patch_size * self.patch_size
)
image_embeds = self.visual(pixel_values, grid_thw=grid_thw)
# Split concatenated embeddings for each image item.
merge_size = self.visual.spatial_merge_size
sizes = grid_thw.prod(-1) // merge_size // merge_size
return image_embeds.split(sizes.tolist())
def _process_video_input(self, video_input) -> torch.Tensor:
grid_thw = video_input["video_grid_thw"]
if grid_thw.ndim != 2:
raise ValueError(f"grid_thw.ndim must be 2, but it is {grid_thw.ndim}")
if video_input["type"] == "video_embeds":
video_embeds = video_input["video_embeds"].type(self.visual.dtype)
else:
pixel_values_videos = video_input["pixel_values_videos"].type(
self.visual.dtype
)
video_embeds = self.visual(pixel_values_videos, grid_thw=grid_thw)
# Split concatenated embeddings for each video item.
merge_size = self.visual.spatial_merge_size
sizes = grid_thw.prod(-1) // merge_size // merge_size
return video_embeds.split(sizes.tolist())
def forward(
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
**kwargs: object,
) -> torch.Tensor | IntermediateTensors:
if intermediate_tensors is not None:
inputs_embeds = None
hidden_states = self.language_model.model(
input_ids=input_ids,
positions=positions,
intermediate_tensors=intermediate_tensors,
inputs_embeds=inputs_embeds,
)
return hidden_states
def compute_logits(
self,
hidden_states: torch.Tensor,
sampling_metadata=None,
) -> torch.Tensor | None:
return self.language_model.compute_logits(hidden_states)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
loader = AutoWeightsLoader(self)
return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper)
def get_mm_mapping(self) -> MultiModelKeys:
"""
Get the module prefix in multimodal models
"""
return MultiModelKeys.from_string_field(
language_model="language_model",
connector="visual.merger.",
tower_model="visual.",
)
@classmethod
def get_placeholder_str(cls, modality: str, i: int) -> str | None:
if modality.startswith("image"):
return "[unused18][unused19][unused20]"
if modality.startswith("video"):
return "[unused18][unused32][unused20]"
raise ValueError("Only image or video modality is supported")
def iter_mm_grid_thw(
self, mm_features: list[MultiModalFeatureSpec]
) -> Iterator[tuple[str, int, int, int, int]]:
spatial_merge_size = self.config.vision_config.spatial_merge_size
for mm_feature in sorted(mm_features, key=lambda f: f.mm_position.offset):
offset = mm_feature.mm_position.offset
modality = mm_feature.modality
if modality == "image":
t, h, w = mm_feature.data["image_grid_thw"].data.tolist()
assert t == 1, f"Image must have 1 frame, got {t}"
yield (
modality,
offset,
1,
h // spatial_merge_size,
w // spatial_merge_size,
)
elif modality == "video":
t, h, w = mm_feature.data["video_grid_thw"].data.tolist()
yield (
modality,
offset,
t,
h // spatial_merge_size,
w // spatial_merge_size,
)
else:
raise ValueError(f"Unsupported modality: {modality}")
def get_mrope_input_positions(
self,
input_tokens: list[int],
mm_features: list[MultiModalFeatureSpec],
) -> tuple[torch.Tensor, int]:
llm_pos_ids_list: list = []
st = 0
for (
modality,
offset,
llm_grid_t,
llm_grid_h,
llm_grid_w,
) in self.iter_mm_grid_thw(mm_features):
text_len = offset - st
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
llm_pos_ids_list.append(
torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx
)
if modality == "video":
eot_bot_pos = torch.full((3, 1), 0, dtype=torch.long)
offset_pos = max(llm_grid_h, llm_grid_w)
current_pos = text_len + st_idx
grid_h = (
torch.arange(llm_grid_h)
.view(-1, 1)
.expand(-1, llm_grid_w)
.flatten()
)
grid_w = (
torch.arange(llm_grid_w)
.view(1, -1)
.expand(llm_grid_h, -1)
.flatten()
)
frame_pos = torch.stack(
[
torch.full_like(grid_h, 0, dtype=torch.long), # t
grid_h, # h
grid_w, # w
]
)
llm_pos_ids_list.append(frame_pos + current_pos)
for _ in range(llm_grid_t - 1):
current_pos = current_pos + offset_pos
llm_pos_ids_list.append(eot_bot_pos + current_pos)
llm_pos_ids_list.append(eot_bot_pos + current_pos + 1)
llm_pos_ids_list.append(frame_pos + current_pos + 2)
current_pos += 2
st = (
offset + llm_grid_t * llm_grid_h * llm_grid_w + (llm_grid_t - 1) * 2
)
else:
t_index = (
(
torch.arange(llm_grid_t)
.view(-1, 1)
.expand(-1, llm_grid_h * llm_grid_w)
)
.long()
.flatten()
)
h_index = (
torch.arange(llm_grid_h)
.view(1, -1, 1)
.expand(llm_grid_t, -1, llm_grid_w)
.flatten()
)
w_index = (
torch.arange(llm_grid_w)
.view(1, 1, -1)
.expand(llm_grid_t, llm_grid_h, -1)
.flatten()
)
llm_pos_ids_list.append(
torch.stack([t_index, h_index, w_index]) + text_len + st_idx
)
st = offset + llm_grid_t * llm_grid_h * llm_grid_w
if st < len(input_tokens):
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
text_len = len(input_tokens) - st
llm_pos_ids_list.append(
torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx
)
llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
mrope_position_delta = (llm_positions.max() + 1 - len(input_tokens)).item()
return llm_positions, mrope_position_delta
def rescale(image, scale):
return image * scale
def normalize(image, mean, std):
return v2.functional.normalize(image, mean, std)
@lru_cache(maxsize=10)
def _fuse_mean_std_and_rescale_factor(
do_normalize: bool | None = None,
image_mean: float | list[float] | None = None,
image_std: float | list[float] | None = None,
do_rescale: bool | None = None,
rescale_factor: float | None = None,
device: Optional["torch.device"] = None,
) -> tuple:
if do_rescale and do_normalize:
# Fused rescale and normalize
image_mean = torch.tensor(image_mean, device=device) * (1.0 / rescale_factor)
image_std = torch.tensor(image_std, device=device) * (1.0 / rescale_factor)
do_rescale = False
return image_mean, image_std, do_rescale
def rescale_and_normalize(
images: "torch.Tensor",
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: float | list[float],
image_std: float | list[float],
dtype: torch.dtype = torch.bfloat16,
) -> "torch.Tensor":
"""
Rescale and normalize images.
"""
image_mean, image_std, do_rescale = _fuse_mean_std_and_rescale_factor(
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
device=images.device,
)
# if/elif as we use fused rescale and normalize if both are set to True
if do_normalize:
images = normalize(images.to(dtype=torch.float32), image_mean, image_std)
elif do_rescale:
images = rescale(images, rescale_factor)
images = images.to(dtype)
return images
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/openpangu_vl.py",
"license": "Apache License 2.0",
"lines": 1190,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/musicflamingo.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""MusicFlamingo model adapter.
MusicFlamingo shares the AudioFlamingo3 architecture, so we reuse the same
implementation and multimodal processor, while accepting MusicFlamingo config
and processor classes when available.
"""
from collections.abc import Mapping
from transformers.models.audioflamingo3 import (
AudioFlamingo3Config,
AudioFlamingo3Processor,
)
from vllm.multimodal import MULTIMODAL_REGISTRY
from vllm.multimodal.processing import BaseProcessingInfo
from .audioflamingo3 import (
AudioFlamingo3DummyInputsBuilder,
AudioFlamingo3ForConditionalGeneration,
AudioFlamingo3MultiModalProcessor,
)
try:
# Optional dependency: use MusicFlamingo classes when transformers provides them.
from transformers.models.musicflamingo import (
MusicFlamingoConfig,
MusicFlamingoProcessor,
)
except Exception: # pragma: no cover - optional dependency
MusicFlamingoConfig = None
MusicFlamingoProcessor = None
class MusicFlamingoProcessingInfo(BaseProcessingInfo):
def get_hf_config(self):
if MusicFlamingoConfig is None:
return self.ctx.get_hf_config(AudioFlamingo3Config)
return self.ctx.get_hf_config((MusicFlamingoConfig, AudioFlamingo3Config))
def get_hf_processor(self, **kwargs: object):
if MusicFlamingoProcessor is None:
return self.ctx.get_hf_processor(AudioFlamingo3Processor, **kwargs)
# Tuple triggers AutoProcessor path and accepts either processor class.
return self.ctx.get_hf_processor(
(MusicFlamingoProcessor, AudioFlamingo3Processor), **kwargs
)
def get_feature_extractor(self, **kwargs: object):
hf_processor = self.get_hf_processor(**kwargs)
return hf_processor.feature_extractor
def get_supported_mm_limits(self) -> Mapping[str, int | None]:
return {"audio": None}
class MusicFlamingoDummyInputsBuilder(AudioFlamingo3DummyInputsBuilder):
pass
@MULTIMODAL_REGISTRY.register_processor(
AudioFlamingo3MultiModalProcessor,
info=MusicFlamingoProcessingInfo,
dummy_inputs=MusicFlamingoDummyInputsBuilder,
)
class MusicFlamingoForConditionalGeneration(AudioFlamingo3ForConditionalGeneration):
"""MusicFlamingo model for conditional generation."""
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/musicflamingo.py",
"license": "Apache License 2.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/multimodal/media/connector.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import asyncio
import atexit
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from typing import Any, TypeVar
from urllib.request import url2pathname
import numpy as np
import numpy.typing as npt
import torch
from PIL import Image, UnidentifiedImageError
from urllib3.util import Url, parse_url
import vllm.envs as envs
from vllm.connections import HTTPConnection, global_http_connection
from vllm.utils.registry import ExtensionManager
from .audio import AudioEmbeddingMediaIO, AudioMediaIO
from .base import MediaIO
from .image import ImageEmbeddingMediaIO, ImageMediaIO
from .video import VideoMediaIO
_M = TypeVar("_M")
global_thread_pool = ThreadPoolExecutor(
max_workers=envs.VLLM_MEDIA_LOADING_THREAD_COUNT
)
atexit.register(global_thread_pool.shutdown)
MEDIA_CONNECTOR_REGISTRY = ExtensionManager()
@MEDIA_CONNECTOR_REGISTRY.register("http")
class MediaConnector:
def __init__(
self,
media_io_kwargs: dict[str, dict[str, Any]] | None = None,
connection: HTTPConnection = global_http_connection,
*,
allowed_local_media_path: str = "",
allowed_media_domains: list[str] | None = None,
) -> None:
"""
Args:
media_io_kwargs: Additional args passed to process media
inputs, keyed by modalities. For example,
to set num_frames for video, set
`--media-io-kwargs '{"video":{"num_frames":40}}'`
connection: HTTP connection client to download media contents.
allowed_local_media_path: A local directory to load media files from.
allowed_media_domains: If set, only media URLs that belong to this
domain can be used for multi-modal inputs.
"""
super().__init__()
self.media_io_kwargs: dict[str, dict[str, Any]] = (
media_io_kwargs if media_io_kwargs else {}
)
self.connection = connection
if allowed_local_media_path:
allowed_local_media_path_ = Path(allowed_local_media_path)
if not allowed_local_media_path_.exists():
raise ValueError(
"Invalid `--allowed-local-media-path`: The path "
f"{allowed_local_media_path_} does not exist."
)
if not allowed_local_media_path_.is_dir():
raise ValueError(
"Invalid `--allowed-local-media-path`: The path "
f"{allowed_local_media_path_} must be a directory."
)
else:
allowed_local_media_path_ = None
self.allowed_local_media_path = allowed_local_media_path_
if allowed_media_domains is None:
allowed_media_domains = []
self.allowed_media_domains = allowed_media_domains
def _load_data_url(
self,
url_spec: Url,
media_io: MediaIO[_M],
) -> _M: # type: ignore[type-var]
url_spec_path = url_spec.path or ""
data_spec, data = url_spec_path.split(",", 1)
media_type, data_type = data_spec.split(";", 1)
# media_type starts with a leading "/" (e.g., "/video/jpeg")
media_type = media_type.lstrip("/")
if data_type != "base64":
msg = "Only base64 data URLs are supported for now."
raise NotImplementedError(msg)
return media_io.load_base64(media_type, data)
def _load_file_url(
self,
url_spec: Url,
media_io: MediaIO[_M],
) -> _M: # type: ignore[type-var]
allowed_local_media_path = self.allowed_local_media_path
if allowed_local_media_path is None:
raise RuntimeError(
"Cannot load local files without `--allowed-local-media-path`."
)
url_spec_path = url_spec.path or ""
url_spec_netloc = url_spec.netloc or ""
filepath = Path(url2pathname(url_spec_netloc + url_spec_path))
if allowed_local_media_path not in filepath.resolve().parents:
raise ValueError(
f"The file path {filepath} must be a subpath "
f"of `--allowed-local-media-path {allowed_local_media_path}`."
)
return media_io.load_file(filepath)
def _assert_url_in_allowed_media_domains(self, url_spec: Url) -> None:
if (
self.allowed_media_domains
and url_spec.hostname not in self.allowed_media_domains
):
raise ValueError(
f"The URL must be from one of the allowed domains: "
f"{self.allowed_media_domains}. Input URL domain: "
f"{url_spec.hostname}"
)
def load_from_url(
self,
url: str,
media_io: MediaIO[_M],
*,
fetch_timeout: int | None = None,
) -> _M: # type: ignore[type-var]
url_spec = parse_url(url)
if url_spec.scheme and url_spec.scheme.startswith("http"):
self._assert_url_in_allowed_media_domains(url_spec)
connection = self.connection
data = connection.get_bytes(
url_spec.url,
timeout=fetch_timeout,
allow_redirects=envs.VLLM_MEDIA_URL_ALLOW_REDIRECTS,
)
return media_io.load_bytes(data)
if url_spec.scheme == "data":
return self._load_data_url(url_spec, media_io)
if url_spec.scheme == "file":
return self._load_file_url(url_spec, media_io)
msg = "The URL must be either a HTTP, data or file URL."
raise ValueError(msg)
async def load_from_url_async(
self,
url: str,
media_io: MediaIO[_M],
*,
fetch_timeout: int | None = None,
) -> _M:
url_spec = parse_url(url)
loop = asyncio.get_running_loop()
if url_spec.scheme and url_spec.scheme.startswith("http"):
self._assert_url_in_allowed_media_domains(url_spec)
connection = self.connection
data = await connection.async_get_bytes(
url_spec.url,
timeout=fetch_timeout,
allow_redirects=envs.VLLM_MEDIA_URL_ALLOW_REDIRECTS,
)
future = loop.run_in_executor(global_thread_pool, media_io.load_bytes, data)
return await future
if url_spec.scheme == "data":
future = loop.run_in_executor(
global_thread_pool, self._load_data_url, url_spec, media_io
)
return await future
if url_spec.scheme == "file":
future = loop.run_in_executor(
global_thread_pool, self._load_file_url, url_spec, media_io
)
return await future
msg = "The URL must be either a HTTP, data or file URL."
raise ValueError(msg)
def fetch_audio(
self,
audio_url: str,
) -> tuple[np.ndarray, int | float]:
"""
Load audio from a URL.
"""
audio_io = AudioMediaIO(**self.media_io_kwargs.get("audio", {}))
return self.load_from_url(
audio_url,
audio_io,
fetch_timeout=envs.VLLM_AUDIO_FETCH_TIMEOUT,
)
async def fetch_audio_async(
self,
audio_url: str,
) -> tuple[np.ndarray, int | float]:
"""
Asynchronously fetch audio from a URL.
"""
audio_io = AudioMediaIO(**self.media_io_kwargs.get("audio", {}))
return await self.load_from_url_async(
audio_url,
audio_io,
fetch_timeout=envs.VLLM_AUDIO_FETCH_TIMEOUT,
)
def fetch_image(
self,
image_url: str,
*,
image_mode: str = "RGB",
) -> Image.Image:
"""
Load a PIL image from an HTTP or base64 data URL.
By default, the image is converted into RGB format.
"""
image_io = ImageMediaIO(
image_mode=image_mode, **self.media_io_kwargs.get("image", {})
)
try:
return self.load_from_url(
image_url,
image_io,
fetch_timeout=envs.VLLM_IMAGE_FETCH_TIMEOUT,
)
except UnidentifiedImageError as e:
# convert to ValueError to be properly caught upstream
raise ValueError(str(e)) from e
async def fetch_image_async(
self,
image_url: str,
*,
image_mode: str = "RGB",
) -> Image.Image:
"""
Asynchronously load a PIL image from an HTTP or base64 data URL.
By default, the image is converted into RGB format.
"""
image_io = ImageMediaIO(
image_mode=image_mode, **self.media_io_kwargs.get("image", {})
)
try:
return await self.load_from_url_async(
image_url,
image_io,
fetch_timeout=envs.VLLM_IMAGE_FETCH_TIMEOUT,
)
except UnidentifiedImageError as e:
# convert to ValueError to be properly caught upstream
raise ValueError(str(e)) from e
def fetch_video(
self,
video_url: str,
*,
image_mode: str = "RGB",
) -> tuple[npt.NDArray, dict[str, Any]]:
"""
Load video from an HTTP or base64 data URL.
"""
image_io = ImageMediaIO(
image_mode=image_mode, **self.media_io_kwargs.get("image", {})
)
video_io = VideoMediaIO(image_io, **self.media_io_kwargs.get("video", {}))
return self.load_from_url(
video_url,
video_io,
fetch_timeout=envs.VLLM_VIDEO_FETCH_TIMEOUT,
)
async def fetch_video_async(
self,
video_url: str,
*,
image_mode: str = "RGB",
) -> tuple[npt.NDArray, dict[str, Any]]:
"""
Asynchronously load video from an HTTP or base64 data URL.
By default, the image is converted into RGB format.
"""
image_io = ImageMediaIO(
image_mode=image_mode, **self.media_io_kwargs.get("image", {})
)
video_io = VideoMediaIO(image_io, **self.media_io_kwargs.get("video", {}))
return await self.load_from_url_async(
video_url,
video_io,
fetch_timeout=envs.VLLM_VIDEO_FETCH_TIMEOUT,
)
def fetch_image_embedding(
self,
data: str,
) -> torch.Tensor:
"""
Load image embedding from a URL.
"""
image_embedding_io = ImageEmbeddingMediaIO()
return image_embedding_io.load_base64("", data)
def fetch_audio_embedding(
self,
data: str,
) -> torch.Tensor:
"""
Load audio embedding from a URL.
"""
audio_embedding_io = AudioEmbeddingMediaIO()
return audio_embedding_io.load_base64("", data)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/multimodal/media/connector.py",
"license": "Apache License 2.0",
"lines": 292,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/qwen3_asr.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# Copyright 2026 The Qwen team.
# Copyright 2023 The vLLM team.
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inference-only Qwen3-ASR model."""
from collections.abc import Iterable, Mapping, Sequence
from typing import Any, Literal
import numpy as np
import torch
import torch.nn as nn
from transformers.feature_extraction_utils import BatchFeature
from transformers.models.whisper import WhisperFeatureExtractor
from vllm.config import ModelConfig, SpeechToTextConfig, VllmConfig
from vllm.config.multimodal import BaseDummyOptions
from vllm.inputs.data import PromptType, TokensPrompt
from vllm.logger import init_logger
from vllm.model_executor.models.interfaces import (
MultiModalEmbeddings,
SupportsMRoPE,
SupportsMultiModal,
SupportsPP,
SupportsTranscription,
)
from vllm.model_executor.models.module_mapping import MultiModelKeys
from vllm.model_executor.models.qwen3 import Qwen3ForCausalLM
from vllm.model_executor.models.qwen3_omni_moe_thinker import (
Qwen2_5OmniAudioFeatureInputs,
Qwen3OmniMoeAudioEncoder,
Qwen3OmniMoeThinkerMultiModalProcessor,
)
from vllm.model_executor.models.utils import (
AutoWeightsLoader,
WeightsMapper,
_merge_multimodal_embeddings,
maybe_prefix,
)
from vllm.model_executor.models.whisper import ISO639_1_SUPPORTED_LANGS
from vllm.multimodal import MULTIMODAL_REGISTRY
from vllm.multimodal.inputs import (
AudioItem,
ModalityData,
MultiModalDataDict,
MultiModalFeatureSpec,
MultiModalFieldConfig,
MultiModalKwargsItems,
)
from vllm.multimodal.parse import (
AudioProcessorItems,
DictEmbeddingItems,
ModalityDataItems,
MultiModalDataItems,
MultiModalDataParser,
)
from vllm.multimodal.processing import (
BaseDummyInputsBuilder,
BaseProcessingInfo,
PromptReplacement,
PromptUpdate,
)
from vllm.sequence import IntermediateTensors
from vllm.tokenizers import cached_tokenizer_from_config
from vllm.transformers_utils.configs.qwen3_asr import (
Qwen3ASRConfig,
Qwen3ASRThinkerConfig,
)
from vllm.transformers_utils.processor import cached_processor_from_config
from vllm.transformers_utils.processors.qwen3_asr import (
Qwen3ASRProcessor,
)
logger = init_logger(__name__)
_ASR_TEXT_TAG = "<asr_text>"
def _get_feat_extract_output_lengths(input_lengths: torch.Tensor):
input_lengths_leave = input_lengths % 100
feat_lengths = (input_lengths_leave - 1) // 2 + 1
output_lengths = (
((feat_lengths - 1) // 2 + 1 - 1) // 2 + 1 + (input_lengths // 100) * 13
)
return output_lengths
class Qwen3ASRProcessingInfo(BaseProcessingInfo):
def get_hf_config(self):
return self.ctx.get_hf_config(Qwen3ASRConfig).thinker_config
def get_hf_processor(self, **kwargs: object) -> Qwen3ASRProcessor:
processor = self.ctx.get_hf_processor(
Qwen3ASRProcessor,
use_fast=kwargs.pop("use_fast", True),
**kwargs,
)
if not hasattr(processor, "audio_token"):
processor.audio_token = "<|audio_pad|>"
return processor
def get_feature_extractor(self, **kwargs: object) -> WhisperFeatureExtractor:
hf_processor = self.get_hf_processor(**kwargs)
feature_extractor = hf_processor.feature_extractor
assert isinstance(feature_extractor, WhisperFeatureExtractor)
return feature_extractor
def get_supported_mm_limits(self) -> Mapping[str, int | None]:
return {"audio": None}
def get_data_parser(self) -> MultiModalDataParser:
feature_extractor = self.get_feature_extractor()
return Qwen3ASRMultiModalDataParser(
target_sr=feature_extractor.sampling_rate,
expected_hidden_size=self._get_expected_hidden_size(),
)
class Qwen3ASRDummyInputsBuilder(BaseDummyInputsBuilder[Qwen3ASRProcessingInfo]):
def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str:
num_audios = mm_counts.get("audio", 0)
hf_processor = self.info.get_hf_processor()
audio_token = hf_processor.audio_token
return audio_token * num_audios
def get_dummy_mm_data(
self,
seq_len: int,
mm_counts: Mapping[str, int],
mm_options: Mapping[str, BaseDummyOptions],
) -> MultiModalDataDict:
num_audios = mm_counts.get("audio", 0)
feature_extractor = self.info.get_feature_extractor()
target_audio_length = (
min(
feature_extractor.chunk_length,
30,
)
* feature_extractor.sampling_rate
)
audio_overrides = mm_options.get("audio")
return {
"audio": self._get_dummy_audios(
length=target_audio_length,
num_audios=num_audios,
overrides=audio_overrides,
),
}
def _qwen3asr_field_config(hf_inputs: Mapping[str, torch.Tensor]):
audio_feature_lengths = hf_inputs.get("audio_feature_lengths", torch.empty((0,)))
return dict(
input_audio_features=MultiModalFieldConfig.flat_from_sizes(
"audio", audio_feature_lengths, dim=1
),
feature_attention_mask=MultiModalFieldConfig.batched("audio"),
audio_feature_lengths=MultiModalFieldConfig.batched("audio"),
)
class Qwen3ASRMultiModalDataParser(MultiModalDataParser):
def _parse_audio_data(
self,
data: dict[str, torch.Tensor] | ModalityData[AudioItem],
) -> ModalityDataItems[Any, Any] | None:
if isinstance(data, dict):
return DictEmbeddingItems(
data,
modality="audio",
required_fields={"input_audio_features", "audio_feature_lengths"},
fields_factory=_qwen3asr_field_config,
)
return super()._parse_audio_data(data)
class Qwen3ASRMultiModalProcessor(
Qwen3OmniMoeThinkerMultiModalProcessor,
):
def _get_mm_fields_config(
self,
hf_inputs: BatchFeature,
hf_processor_mm_kwargs: Mapping[str, object],
) -> Mapping[str, MultiModalFieldConfig]:
return _qwen3asr_field_config(hf_inputs)
def _get_prompt_updates(
self,
mm_items: MultiModalDataItems,
hf_processor_mm_kwargs: Mapping[str, Any],
out_mm_kwargs: MultiModalKwargsItems,
) -> Sequence[PromptUpdate]:
processor = self.info.get_hf_processor(**hf_processor_mm_kwargs)
tokenizer = self.info.get_tokenizer()
vocab = tokenizer.get_vocab()
audio_token = processor.audio_token
audio_token_id = vocab[audio_token]
out_mm_data = out_mm_kwargs.get_data()
audio_feature_lengths = out_mm_data.get("audio_feature_lengths")
feature_attention_mask = out_mm_data.get("feature_attention_mask")
if audio_feature_lengths is None and feature_attention_mask is None:
audio_output_lengths = []
elif audio_feature_lengths is not None:
audio_output_lens = _get_feat_extract_output_lengths(audio_feature_lengths)
audio_output_lengths = audio_output_lens.tolist()
elif feature_attention_mask is not None:
assert isinstance(feature_attention_mask, torch.Tensor)
audio_output_lens = _get_feat_extract_output_lengths(
feature_attention_mask.sum(-1)
)
audio_output_lengths = audio_output_lens.tolist()
def get_replacement_qwen2_audio(item_idx: int):
num_features = audio_output_lengths[item_idx]
if num_features == 0:
audios = mm_items.get_items("audio", AudioProcessorItems)
audio = audios.get(item_idx)
raise ValueError(
f"The audio {audio} (len={len(audio)}) is too short "
"to be represented inside the model"
)
return [audio_token_id] * num_features
return [
PromptReplacement(
modality="audio",
target=audio_token,
replacement=get_replacement_qwen2_audio,
),
]
@MULTIMODAL_REGISTRY.register_processor(
Qwen3ASRMultiModalProcessor,
info=Qwen3ASRProcessingInfo,
dummy_inputs=Qwen3ASRDummyInputsBuilder,
)
class Qwen3ASRForConditionalGeneration(
nn.Module,
SupportsMultiModal,
SupportsPP,
SupportsMRoPE,
SupportsTranscription,
):
supported_languages = ISO639_1_SUPPORTED_LANGS
hf_to_vllm_mapper = WeightsMapper(
orig_to_new_prefix={
"thinker.lm_head.": "language_model.lm_head.",
"thinker.model.": "language_model.model.",
"thinker.": "",
}
)
@classmethod
def get_placeholder_str(cls, modality: str, i: int) -> str | None:
if modality.startswith("audio"):
return "<|audio_start|><|audio_pad|><|audio_end|>"
raise ValueError("Only audio modality is supported")
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
self.vllm_config = vllm_config # needed for torch compile forward context
thinker_config: Qwen3ASRThinkerConfig = (
vllm_config.model_config.hf_config.thinker_config
)
quant_config = vllm_config.quant_config
multimodal_config = vllm_config.model_config.multimodal_config
self.config = thinker_config
self.multimodal_config = multimodal_config
self.quant_config = quant_config
with self._mark_tower_model(vllm_config, "audio"):
self.audio_tower = Qwen3OmniMoeAudioEncoder(
thinker_config.audio_config,
prefix=maybe_prefix(prefix, "audio_tower"),
)
with self._mark_language_model(vllm_config):
self.language_model = Qwen3ForCausalLM(
vllm_config=vllm_config.with_hf_config(
thinker_config.text_config, architectures=["Qwen3ForCausalLM"]
),
prefix=maybe_prefix(prefix, "language_model"),
)
self.make_empty_intermediate_tensors = (
self.language_model.make_empty_intermediate_tensors
)
def _parse_and_validate_audio_input(
self, **kwargs: object
) -> Qwen2_5OmniAudioFeatureInputs | None:
input_audio_features = kwargs.pop("input_audio_features", None)
audio_feature_lengths = kwargs.pop("audio_feature_lengths", None)
feature_attention_mask = kwargs.pop("feature_attention_mask", None)
if input_audio_features is None:
return None
return Qwen2_5OmniAudioFeatureInputs(
type="audio_features",
input_features=input_audio_features,
audio_feature_lengths=audio_feature_lengths,
feature_attention_mask=feature_attention_mask,
)
def _parse_and_validate_multimodal_inputs(self, **kwargs: object) -> dict:
mm_input_by_modality = {}
# Preserve the order of modalities if there are multiple of them
# from the order of kwargs.
for input_key in kwargs:
if (
input_key in ("input_audio_features")
and "audio" not in mm_input_by_modality
):
mm_input_by_modality["audio"] = self._parse_and_validate_audio_input(
**kwargs
)
return mm_input_by_modality
def _process_audio_input(
self,
audio_input: Qwen2_5OmniAudioFeatureInputs,
audio_hashes: list[str] | None = None,
cached_audio_features: torch.Tensor | None = None,
) -> torch.Tensor:
input_features = audio_input["input_features"]
audio_feature_lengths = audio_input["audio_feature_lengths"]
audio_output_lengths = _get_feat_extract_output_lengths(audio_feature_lengths)
audio_features = self.audio_tower(
input_features.to(self.audio_tower.dtype),
feature_lens=audio_feature_lengths,
aftercnn_lens=audio_output_lengths,
)
return audio_features.split(audio_output_lengths.tolist())
def embed_multimodal(self, **kwargs: object) -> MultiModalEmbeddings | None:
mm_input_by_modality = self._parse_and_validate_multimodal_inputs(**kwargs)
if not mm_input_by_modality:
return []
# The result multimodal_embeddings is tuple of tensors, with each
# tensor correspoending to a multimodal data item (image or video).
multimodal_embeddings: tuple[torch.Tensor, ...] = ()
# NOTE: It is important to iterate over the keys in this dictionary
# to preserve the order of the modalities.
for modality in mm_input_by_modality:
multimodal_input = mm_input_by_modality[modality]
if modality == "audio":
audio_embeddings = self._process_audio_input(multimodal_input)
multimodal_embeddings += tuple(audio_embeddings)
return multimodal_embeddings
def embed_input_ids(
self,
input_ids: torch.Tensor,
multimodal_embeddings: MultiModalEmbeddings | None = None,
*,
is_multimodal: torch.Tensor | None = None,
handle_oov_mm_token: bool = False,
) -> torch.Tensor:
inputs_embeds = self._embed_text_input_ids(
input_ids,
self.language_model.embed_input_ids,
is_multimodal=is_multimodal,
handle_oov_mm_token=handle_oov_mm_token,
)
if multimodal_embeddings is None or len(multimodal_embeddings) == 0:
return inputs_embeds
inputs_embeds = _merge_multimodal_embeddings(
inputs_embeds=inputs_embeds,
multimodal_embeddings=multimodal_embeddings,
is_multimodal=is_multimodal,
)
return inputs_embeds
def forward(
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
**kwargs: object,
) -> torch.Tensor | IntermediateTensors:
if intermediate_tensors is not None:
inputs_embeds = None
hidden_states = self.language_model.model(
input_ids,
positions,
intermediate_tensors,
inputs_embeds=inputs_embeds,
)
return hidden_states
def compute_logits(
self,
hidden_states: torch.Tensor,
) -> torch.Tensor | None:
return self.language_model.compute_logits(hidden_states)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
loader = AutoWeightsLoader(
self,
skip_prefixes=["talker.", "code2wav."],
)
loaded_weights = loader.load_weights(weights, mapper=self.hf_to_vllm_mapper)
return loaded_weights
def get_mrope_input_positions(
self,
input_tokens: list[int],
mm_features: list[MultiModalFeatureSpec],
) -> tuple[torch.Tensor, int]:
seq_len = len(input_tokens)
if not mm_features:
# No audio features, just return linear positions
llm_positions = (
torch.arange(seq_len, dtype=torch.long).view(1, -1).expand(3, -1)
)
return llm_positions.clone(), 0
llm_pos_ids_list: list[torch.Tensor] = []
st = 0
for mm_feature in sorted(mm_features, key=lambda f: f.mm_position.offset):
offset = mm_feature.mm_position.offset
# Get audio feature length from mm_feature data
audio_feature_length = mm_feature.data["audio_feature_lengths"].data
if isinstance(audio_feature_length, torch.Tensor):
audio_feature_length = audio_feature_length.item()
audio_len = _get_feat_extract_output_lengths(
torch.tensor(audio_feature_length)
).item()
# Text segment before audio (includes audio_start token)
text_len = offset - st
st_idx = llm_pos_ids_list[-1].max() + 1 if llm_pos_ids_list else 0
text_positions = (
torch.arange(text_len, dtype=torch.long).view(1, -1).expand(3, -1)
+ st_idx
)
llm_pos_ids_list.append(text_positions)
st_idx = st_idx + text_len
# Audio token segment
audio_positions = (
torch.arange(audio_len, dtype=torch.long).view(1, -1).expand(3, -1)
+ st_idx
)
llm_pos_ids_list.append(audio_positions)
st = offset + audio_len
# Handle remaining text (includes audio_end and any trailing text)
if st < seq_len:
st_idx = llm_pos_ids_list[-1].max() + 1 if llm_pos_ids_list else 0
text_len = seq_len - st
final_text_positions = (
torch.arange(text_len, dtype=torch.long).view(1, -1).expand(3, -1)
+ st_idx
)
llm_pos_ids_list.append(final_text_positions)
llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
if llm_positions.shape[1] != seq_len:
raise RuntimeError("Position ids length mismatch with input ids length")
mrope_position_delta = (llm_positions.max() + 1 - seq_len).item()
return llm_positions, mrope_position_delta
def get_mm_mapping(self) -> MultiModelKeys:
"""
Get the module prefix in multimodal models
"""
return MultiModelKeys.from_string_field(
language_model="language_model",
tower_model=["audio_tower."],
)
@classmethod
def get_speech_to_text_config(
cls, model_config: ModelConfig, task_type: str
) -> SpeechToTextConfig:
processor = cached_processor_from_config(model_config)
feature_extractor: WhisperFeatureExtractor = processor.feature_extractor
return SpeechToTextConfig(
max_audio_clip_s=feature_extractor.chunk_length,
sample_rate=feature_extractor.sampling_rate,
)
@classmethod
def get_generation_prompt(
cls,
audio: np.ndarray,
model_config: ModelConfig,
stt_config: SpeechToTextConfig,
language: str | None,
task_type: Literal["transcribe", "translate"],
request_prompt: str,
to_language: str | None,
) -> PromptType:
"""Get the generation prompt to be used for transcription requests."""
tokenizer = cached_tokenizer_from_config(model_config)
audio_placeholder = cls.get_placeholder_str("audio", 0)
if task_type not in ("transcribe", "translate"):
raise ValueError(
f"Unsupported task_type '{task_type}'. "
"Supported task types are 'transcribe' and 'translate'."
)
full_lang_name_to = cls.supported_languages.get(to_language, to_language)
if to_language is None:
prompt = (
f"<|im_start|>user\n{audio_placeholder}<|im_end|>\n"
f"<|im_start|>assistant\n"
)
else:
prompt = (
f"<|im_start|>user\n{audio_placeholder}<|im_end|>\n"
f"<|im_start|>assistant\nlanguage {full_lang_name_to}{_ASR_TEXT_TAG}"
)
prompt_token_ids = tokenizer.encode(prompt)
return TokensPrompt(
prompt_token_ids=prompt_token_ids,
multi_modal_data={"audio": audio},
)
@classmethod
def post_process_output(cls, text: str) -> str:
"""
Post-process Qwen3-ASR raw output to extract clean transcription.
The model outputs in format: "language {lang}<asr_text>{transcription}"
This method strips the language prefix and asr_text tags.
"""
if not text:
return ""
if _ASR_TEXT_TAG not in text:
return text
# Split on <asr_text> tag and take the transcription part
_, text_part = text.rsplit(_ASR_TEXT_TAG, 1)
return text_part
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/qwen3_asr.py",
"license": "Apache License 2.0",
"lines": 502,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/transformers_utils/configs/qwen3_asr.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# ruff: noqa
# mypy: ignore-errors
# coding=utf-8
# Copyright 2026 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers.configuration_utils import PretrainedConfig
from transformers.modeling_rope_utils import rope_config_validation
from transformers.utils import logging
logger = logging.get_logger(__name__)
class Qwen3ASRAudioEncoderConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Qwen3ASRAudioEncoder`]. It is used to instantiate a
Qwen3-ASR audio encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the audio encoder of the Qwen2-Audio
architecture.
e.g. [Qwen/Qwen3-ASR-1.7B](https://huggingface.co/Qwen/Qwen3-ASR-1.7B)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_mel_bins (`int`, *optional*, defaults to 128):
Number of mel features used per input features. Should correspond to the value used in the
`Qwen3ASRProcessor` class.
encoder_layers (`int`, *optional*, defaults to 32):
Number of encoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 20):
Number of attention heads for each attention layer in the Transformer encoder.
encoder_ffn_dim (`int`, *optional*, defaults to 5120):
Dimensionality of the "intermediate" (often named feed-forward) layer in encoder.
d_model (`int`, *optional*, defaults to 1280):
Dimensionality of the layers.
dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_function (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
scale_embedding (`bool`, *optional*, defaults to `False`):
Scale embeddings by diving by sqrt(d_model).
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
max_source_positions (`int`, *optional*, defaults to 1500):
The maximum sequence length of log-mel filter-bank features that this model might ever be used with.
n_window (`int`, *optional*, defaults to 100):
The chunk for conv and flash attn in AudioEncoder.
output_dim (`int`, *optional*, defaults to 3584):
The output dimension of AudioEncoder.
Example:
```python
>>> from transformers import Qwen3ASRAudioEncoderConfig, Qwen3ASRAudioEncoder
>>> # Initializing a Qwen3ASRAudioEncoderConfig
>>> configuration = Qwen3ASRAudioEncoderConfig()
>>> # Initializing a Qwen3ASRAudioEncoder (with random weights)
>>> model = Qwen3ASRAudioEncoder(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "qwen3_asr_audio_encoder"
def __init__(
self,
num_mel_bins=128,
encoder_layers=32,
encoder_attention_heads=20,
encoder_ffn_dim=5120,
d_model=1280,
dropout=0,
attention_dropout=0,
activation_function="gelu",
activation_dropout=0,
scale_embedding=False,
initializer_range=0.02,
max_source_positions=1500,
n_window=100,
output_dim=3584,
n_window_infer=400,
conv_chunksize=500,
downsample_hidden_size=480,
**kwargs,
):
super().__init__(**kwargs)
self.num_mel_bins = num_mel_bins
self.d_model = d_model
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.encoder_ffn_dim = encoder_ffn_dim
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_function = activation_function
self.activation_dropout = activation_dropout
self.num_hidden_layers = encoder_layers
self.initializer_range = initializer_range
self.scale_embedding = (
scale_embedding # scale factor will be sqrt(d_model) if True
)
self.max_source_positions = max_source_positions
self.n_window = n_window
self.output_dim = output_dim
self.n_window_infer = n_window_infer
self.conv_chunksize = conv_chunksize
self.downsample_hidden_size = downsample_hidden_size
class Qwen3ASRTextConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Qwen3ASRTextModel`]. It is used to instantiate a
Qwen3-ASR model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of
Qwen3-ASR-1.7B [Qwen/Qwen3-ASR-1.7B](https://huggingface.co/Qwen/Qwen3-ASR-1.7B)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 151936):
Vocabulary size of the Qwen3ASR model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Qwen3ASRModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 22016):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 32):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `32`.
head_dim (`int`, *optional*, defaults to 128):
The dimension of the head. If not specified, will default to `hidden_size // num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 128000):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
rope_theta (`float`, *optional*, defaults to 5000000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'llama3'], with 'default' being the original RoPE implementation.
`factor` (`float`, *optional*):
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
original maximum pre-trained length.
`original_max_position_embeddings` (`int`, *optional*):
Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
pretraining.
`attention_factor` (`float`, *optional*):
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
computation. If unspecified, it defaults to value recommended by the implementation, using the
`factor` field to infer the suggested value.
`beta_fast` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
ramp function. If unspecified, it defaults to 32.
`beta_slow` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
ramp function. If unspecified, it defaults to 1.
`short_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`long_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`low_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
`high_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
```python
>>> from transformers import Qwen3ASRTextModel, Qwen3ASRTextConfig
>>> # Initializing a Qwen3ASR style configuration
>>> configuration = Qwen3ASRTextConfig()
>>> # Initializing a model from the Qwen3-VL-7B style configuration
>>> model = Qwen3ASRTextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "qwen3_asr_text"
base_config_key = "text_config"
def __init__(
self,
vocab_size=151936,
hidden_size=4096,
intermediate_size=22016,
num_hidden_layers=32,
num_attention_heads=32,
num_key_value_heads=32,
head_dim=128,
hidden_act="silu",
max_position_embeddings=128000,
initializer_range=0.02,
rms_norm_eps=1e-6,
use_cache=True,
tie_word_embeddings=False,
rope_theta=5000000.0,
rope_scaling=None,
attention_bias=False,
attention_dropout=0.0,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.head_dim = head_dim
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
# Validate the correctness of rotary position embeddings parameters
# BC: if there is a 'type' field, move it to 'rope_type'.
if self.rope_scaling is not None and "type" in self.rope_scaling:
self.rope_scaling["rope_type"] = self.rope_scaling["type"]
rope_config_validation(self)
super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
class Qwen3ASRThinkerConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Qwen3ASRThinker`]. It is used to instantiate a
Qwen3-ASR-Thinker model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the thinker component of the Qwen3-Omni
architecture.
e.g. [Qwen/Qwen3-ASR-1.7B](https://huggingface.co/Qwen/Qwen3-ASR-1.7B)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
audio_config (`dict`, *optional*):
The config dictionary of the audio backbone.
text_config (`dict`, *optional*):
The config dictionary of the text backbone.
audio_token_id (`int`, *optional*, defaults to 151646):
The audio token id to encode the audio prompt.
audio_start_token_id (`int`, *optional*, defaults to 151647):
The audio start token id to encode the audio prompt.
user_token_id (`int`, *optional*, defaults to 872):
The user token id to encode the user token.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
Example:
```python
>>> from transformers import Qwen3ASRThinkerModel, Qwen3ASRThinkerConfig
>>> # Initializing a default Qwen3ASRThinkerConfig
>>> configuration = Qwen3ASRThinkerConfig()
>>> # Initializing a model (with random weights) from the default configuration
>>> model = Qwen3ASRThinkerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "qwen3_asr_thinker"
attribute_map = {}
sub_configs = {
"audio_config": Qwen3ASRAudioEncoderConfig,
"text_config": Qwen3ASRTextConfig,
}
def __init__(
self,
audio_config=None,
text_config=None,
audio_token_id=151646,
audio_start_token_id=151647,
user_token_id=872,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
self.user_token_id = user_token_id
self.audio_start_token_id = audio_start_token_id
self.initializer_range = initializer_range
if isinstance(audio_config, dict):
audio_config = Qwen3ASRAudioEncoderConfig(**audio_config)
elif audio_config is None:
audio_config = Qwen3ASRAudioEncoderConfig()
self.audio_config = audio_config
if isinstance(text_config, dict):
text_config = Qwen3ASRTextConfig(**text_config)
elif text_config is None:
text_config = Qwen3ASRTextConfig()
self.text_config = text_config
self.audio_token_id = audio_token_id
class Qwen3ASRConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`Qwen3ASRForConditionalGeneration`]. It is used to instantiate a Qwen3ASR
model according to the specified sub-models configurations, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the
[Qwen/Qwen3-ASR-1.7B](https://huggingface.co/Qwen/Qwen3-ASR-1.7B) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
thinker_config (`dict`, *optional*): Configuration of the underlying thinker sub-model.
support_languages (`List[str]`, *optional*): The languages supported by the model.
Example:
```python
>>> from transformers import (
... Qwen3ASRThinkerConfig,
... Qwen3ASRForConditionalGeneration,
... Qwen3ASRConfig,
... )
>>> # Initializing a Qwen3ASR style configuration
>>> configuration = Qwen3ASRConfig()
>>> # Initializing a model from the configuration
>>> model = Qwen3ASRForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "qwen3_asr"
sub_configs = {
"thinker_config": Qwen3ASRThinkerConfig,
}
def __init__(
self,
thinker_config=None,
support_languages=None,
**kwargs,
):
super().__init__(**kwargs)
if thinker_config is None:
thinker_config = {}
logger.info(
"thinker_config is None. Initializing thinker model with default values"
)
self.thinker_config = Qwen3ASRThinkerConfig(**thinker_config)
self.support_languages = support_languages
def get_text_config(self, decoder=False) -> "PretrainedConfig":
"""
Returns the config that is meant to be used with text IO. On most models, it is the original config instance
itself. On specific composite models, it is under a set of valid names.
Args:
decoder (`Optional[bool]`, *optional*, defaults to `False`):
If set to `True`, then only search for decoder config names.
"""
# Overridden for deeply nested config like Qwen2.5-Omni. We don't have any omni model
# except for Qwen yet. This has to be generalized if more deeply nested configs are
# added. NOTE: currently method used only by vLLM
return self.thinker_config.get_text_config()
__all__ = ["Qwen3ASRConfig", "Qwen3ASRThinkerConfig", "Qwen3ASRAudioEncoderConfig"]
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/transformers_utils/configs/qwen3_asr.py",
"license": "Apache License 2.0",
"lines": 376,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/entrypoints/pooling/score/test_online_score_vision.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import json
import pytest
import requests
from tests.utils import VLLM_PATH, RemoteOpenAIServer
from vllm.entrypoints.pooling.score.protocol import RerankResponse, ScoreResponse
from vllm.multimodal.utils import encode_image_url, fetch_image
from vllm.platforms import current_platform
MODEL_NAME = "Qwen/Qwen3-VL-Reranker-2B"
HF_OVERRIDES = {
"architectures": ["Qwen3VLForSequenceClassification"],
"classifier_from_token": ["no", "yes"],
"is_original_qwen3_reranker": True,
}
ROCM_ATTN_BACKENDS = [
"ROCM_ATTN",
"ROCM_AITER_FA",
"TRITON_ATTN",
"FLEX_ATTENTION",
]
ATTN_BACKENDS = ROCM_ATTN_BACKENDS if current_platform.is_rocm() else []
# Per-backend tolerance with explicit entries; "default" is the fallback
BACKEND_TOL: dict[str, float] = {
"default": 0.05, # 5% tolerance for other backends (e.g. FLASH_ATTN)
# Relaxed tolerances for ROCm attn
# See: https://github.com/vllm-project/vllm/issues/35569
"ROCM_ATTN": 0.09, # gfx950:~8.45%, gfx942:~3.70%
"ROCM_AITER_FA": 0.045, # gfx950:~2.00%, gfx942:~0.80%
"TRITON_ATTN": 0.045, # gfx950:~3.00%, gfx942:~2.20%
"FLEX_ATTENTION": 0.045, # gfx950:~3.25%, gfx942:~1.10%
}
# ROCm: disable skinny GEMM to avoid non-deterministic results from
# atomic reductions in wvSplitKrc kernel.
# See: https://github.com/vllm-project/vllm/pull/33493#issuecomment-3906083975
ROCM_ENV_OVERRIDES = (
{"VLLM_ROCM_USE_SKINNY_GEMM": "0"} if current_platform.is_rocm() else {}
)
# ROCm: disable prefix caching and eliminate batch variance to reduce
# test flakiness.
ROCM_EXTRA_ARGS = (
["--no-enable-prefix-caching", "--max-num-seqs", "1"]
if current_platform.is_rocm()
else []
)
def get_tol(backend: str) -> float:
return BACKEND_TOL.get(backend, BACKEND_TOL["default"])
def assert_score(actual: float, expected: float, backend: str, label: str):
tol = get_tol(backend)
diff = abs(actual - expected)
rel_diff = diff / abs(expected) if expected != 0 else diff
print(
f"[{backend}] {label}: actual={actual:.6f} expected={expected:.6f} "
f"diff={diff:.6f} rel_diff={rel_diff:.4f} tol={tol}"
)
assert actual == pytest.approx(expected, rel=tol), (
f"[{backend}] {label}: score mismatch — "
f"actual={actual:.6f}, expected={expected:.6f}, "
f"rel_diff={rel_diff:.4f}, tol={tol}"
)
query = "A cat standing in the snow."
document = "This product was excellent and exceeded my expectations."
image_url = "https://vllm-public-assets.s3.us-west-2.amazonaws.com/multimodal_asset/cat_snow.jpg"
documents = [
{
"type": "text",
"text": document,
},
{
"type": "image_url",
"image_url": {"url": image_url},
},
{
"type": "image_url",
"image_url": {"url": encode_image_url(fetch_image(image_url))},
},
]
TEXT_VS_TEXT = 0.10040374100208282
TEXT_VS_IMAGE = 0.7423753142356873
TEXT_VS_TEXT_PLUS_IMAGE = 0.5298863053321838
@pytest.fixture(scope="module", params=ATTN_BACKENDS)
def server(request):
backend = request.param
print(f"\n=== Starting server with attention backend: {backend} ===")
args = [
"--enforce-eager",
"--max-model-len",
"8192",
"--chat-template",
str(VLLM_PATH / "examples/pooling/score/template/qwen3_vl_reranker.jinja"),
"--attention-config",
json.dumps({"backend": backend}),
] + ROCM_EXTRA_ARGS
env = dict(ROCM_ENV_OVERRIDES)
if backend != "ROCM_AITER_FA":
env["VLLM_ROCM_USE_AITER"] = "0"
with RemoteOpenAIServer(
MODEL_NAME, args, override_hf_configs=HF_OVERRIDES, env_dict=env
) as remote_server:
print(f"=== Server ready with backend: {backend} ===")
yield remote_server, backend
def test_score_api_queries_str_documents_str(server: tuple[RemoteOpenAIServer, str]):
remote_server, backend = server
score_response = requests.post(
remote_server.url_for("score"),
json={
"model": MODEL_NAME,
"queries": query,
"documents": document,
},
)
score_response.raise_for_status()
score = ScoreResponse.model_validate(score_response.json())
assert score.id is not None
assert score.data is not None
assert len(score.data) == 1
assert score.usage.prompt_tokens == 81
assert_score(score.data[0].score, TEXT_VS_TEXT, backend, "text_vs_text")
def test_score_api_queries_str_documents_text_content(
server: tuple[RemoteOpenAIServer, str],
):
remote_server, backend = server
score_response = requests.post(
remote_server.url_for("score"),
json={
"model": MODEL_NAME,
"queries": query,
"documents": {"content": [documents[0]]},
},
)
score_response.raise_for_status()
score = ScoreResponse.model_validate(score_response.json())
assert score.id is not None
assert score.data is not None
assert len(score.data) == 1
assert score.usage.prompt_tokens == 81
assert_score(score.data[0].score, TEXT_VS_TEXT, backend, "text_vs_text")
def test_score_api_queries_str_documents_image_url_content(
server: tuple[RemoteOpenAIServer, str],
):
remote_server, backend = server
score_response = requests.post(
remote_server.url_for("score"),
json={
"model": MODEL_NAME,
"queries": query,
"documents": {"content": [documents[1]]},
},
)
score_response.raise_for_status()
score = ScoreResponse.model_validate(score_response.json())
assert score.id is not None
assert score.data is not None
assert len(score.data) == 1
assert score.usage.prompt_tokens == 98
assert_score(score.data[0].score, TEXT_VS_IMAGE, backend, "text_vs_image")
def test_score_api_queries_str_documents_image_base64_content(
server: tuple[RemoteOpenAIServer, str],
):
remote_server, backend = server
score_response = requests.post(
remote_server.url_for("score"),
json={
"model": MODEL_NAME,
"queries": query,
"documents": {"content": [documents[2]]},
},
)
score_response.raise_for_status()
score = ScoreResponse.model_validate(score_response.json())
assert score.id is not None
assert score.data is not None
assert len(score.data) == 1
assert score.usage.prompt_tokens == 98
assert_score(score.data[0].score, TEXT_VS_IMAGE, backend, "text_vs_image_base64")
def test_score_api_queries_str_documents_image_url_plus_text_content(
server: tuple[RemoteOpenAIServer, str],
):
remote_server, backend = server
score_response = requests.post(
remote_server.url_for("score"),
json={
"model": MODEL_NAME,
"queries": query,
"documents": {"content": [documents[0], documents[1]]},
},
)
score_response.raise_for_status()
score = ScoreResponse.model_validate(score_response.json())
assert score.id is not None
assert score.data is not None
assert len(score.data) == 1
assert score.usage.prompt_tokens == 108
assert_score(
score.data[0].score, TEXT_VS_TEXT_PLUS_IMAGE, backend, "text_vs_text_plus_image"
)
def test_score_api_queries_str_documents_list(
server: tuple[RemoteOpenAIServer, str],
):
remote_server, backend = server
score_response = requests.post(
remote_server.url_for("score"),
json={
"model": MODEL_NAME,
"queries": query,
"documents": [
document,
{"content": [documents[0]]},
{"content": [documents[1]]},
{"content": [documents[0], documents[1]]},
],
},
)
score_response.raise_for_status()
score = ScoreResponse.model_validate(score_response.json())
assert score.id is not None
assert score.data is not None
assert len(score.data) == 4
assert score.usage.prompt_tokens == 368
assert_score(score.data[0].score, TEXT_VS_TEXT, backend, "list[0]_text_vs_text")
assert_score(score.data[1].score, TEXT_VS_TEXT, backend, "list[1]_text_vs_text")
assert_score(score.data[2].score, TEXT_VS_IMAGE, backend, "list[2]_text_vs_image")
assert_score(
score.data[3].score,
TEXT_VS_TEXT_PLUS_IMAGE,
backend,
"list[3]_text_vs_text_plus_image",
)
def test_rerank_api_queries_str_documents_list(
server: tuple[RemoteOpenAIServer, str],
):
remote_server, backend = server
rerank_response = requests.post(
remote_server.url_for("rerank"),
json={
"model": MODEL_NAME,
"query": query,
"documents": [
document,
{"content": [documents[0]]},
{"content": [documents[1]]},
{"content": [documents[0], documents[1]]},
],
},
)
rerank_response.raise_for_status()
rerank = RerankResponse.model_validate(rerank_response.json())
assert rerank.id is not None
assert rerank.model is not None
assert rerank.usage is not None
assert len(rerank.results) == 4
rerank.results.sort(key=lambda x: x.index)
assert_score(
rerank.results[0].relevance_score,
TEXT_VS_TEXT,
backend,
"rerank[0]_text_vs_text",
)
assert_score(
rerank.results[1].relevance_score,
TEXT_VS_TEXT,
backend,
"rerank[1]_text_vs_text",
)
assert_score(
rerank.results[2].relevance_score,
TEXT_VS_IMAGE,
backend,
"rerank[2]_text_vs_image",
)
assert_score(
rerank.results[3].relevance_score,
TEXT_VS_TEXT_PLUS_IMAGE,
backend,
"rerank[3]_text_vs_text_plus_image",
)
def test_score_api_queries_list_documents_list(
server: tuple[RemoteOpenAIServer, str],
):
remote_server, backend = server
score_response = requests.post(
remote_server.url_for("score"),
json={
"model": MODEL_NAME,
"queries": [query] * 4,
"documents": [
document,
{"content": [documents[0]]},
{"content": [documents[1]]},
{"content": [documents[0], documents[1]]},
],
},
)
score_response.raise_for_status()
score = ScoreResponse.model_validate(score_response.json())
assert score.id is not None
assert score.data is not None
assert len(score.data) == 4
assert score.usage.prompt_tokens == 368
assert_score(score.data[0].score, TEXT_VS_TEXT, backend, "paired[0]_text_vs_text")
assert_score(score.data[1].score, TEXT_VS_TEXT, backend, "paired[1]_text_vs_text")
assert_score(score.data[2].score, TEXT_VS_IMAGE, backend, "paired[2]_text_vs_image")
assert_score(
score.data[3].score,
TEXT_VS_TEXT_PLUS_IMAGE,
backend,
"paired[3]_text_vs_text_plus_image",
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/entrypoints/pooling/score/test_online_score_vision.py",
"license": "Apache License 2.0",
"lines": 308,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tools/pre_commit/generate_attention_backend_docs.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Generates documentation table for attention backends showing feature support.
This script parses all registered attention backends using AST (no imports needed)
and generates a markdown table showing what features each backend supports,
based on the checks in AttentionBackend.validate_configuration().
This approach avoids requiring CUDA/ROCm/GPU libraries to be installed.
When used as a pre-commit hook, this script receives filenames as arguments
and only runs the check if any of the relevant files were modified.
"""
import argparse
import ast
import fnmatch
import sys
from collections.abc import Callable
from pathlib import Path
from typing import Any
# ---------------------------------------------------------------------------
# Constants and file paths
# ---------------------------------------------------------------------------
REPO_ROOT = Path(__file__).parent.parent.parent
RELEVANT_PATTERNS = [
"vllm/v1/attention/backends/*.py",
"vllm/v1/attention/backends/**/*.py",
"vllm/v1/attention/backends/fa_utils.py",
"vllm/model_executor/layers/attention/mla_attention.py",
"vllm/platforms/cuda.py",
"tools/pre_commit/generate_attention_backend_docs.py",
"docs/design/attention_backends.md",
]
BACKENDS_DIR = REPO_ROOT / "vllm" / "v1" / "attention" / "backends"
REGISTRY_FILE = BACKENDS_DIR / "registry.py"
CUDA_PLATFORM_FILE = REPO_ROOT / "vllm" / "platforms" / "cuda.py"
FA_UTILS_FILE = BACKENDS_DIR / "fa_utils.py"
FLASHINFER_UTILS_FILE = REPO_ROOT / "vllm" / "utils" / "flashinfer.py"
MLA_ATTENTION_FILE = (
REPO_ROOT / "vllm" / "model_executor" / "layers" / "attention" / "mla_attention.py"
)
# Backends to skip during doc generation
SKIP_BACKENDS = {"CUSTOM", "TORCH_SDPA"}
def is_relevant_file(filepath: str) -> bool:
"""Check if a file matches any of the relevant patterns."""
path = Path(filepath)
if path.is_absolute():
try:
path = path.relative_to(REPO_ROOT)
except ValueError:
return False
path_str = str(path)
return any(fnmatch.fnmatch(path_str, pattern) for pattern in RELEVANT_PATTERNS)
# ---------------------------------------------------------------------------
# AST utility helpers
# ---------------------------------------------------------------------------
def find_class_in_ast(tree: ast.AST, class_name: str) -> ast.ClassDef | None:
"""Find a class definition in an AST."""
for node in ast.walk(tree):
if isinstance(node, ast.ClassDef) and node.name == class_name:
return node
return None
def find_method(node: ast.ClassDef, method_name: str) -> ast.FunctionDef | None:
"""Find a method in a class definition."""
for item in node.body:
if isinstance(item, ast.FunctionDef) and item.name == method_name:
return item
return None
def method_returns_true(method: ast.FunctionDef | None) -> bool:
"""Check if a method simply returns True."""
if method is None:
return False
for node in ast.walk(method):
if (
isinstance(node, ast.Return)
and isinstance(node.value, ast.Constant)
and node.value.value is True
):
return True
return False
def check_method_overrides(node: ast.ClassDef, method_name: str) -> bool:
"""Check if a method is overridden and returns True."""
return method_returns_true(find_method(node, method_name))
def _find_bool_class_var(class_node: ast.ClassDef, var_name: str) -> bool | None:
"""Find a bool class variable in a class definition. Returns None if not found."""
for item in class_node.body:
# Check for annotated assignment: attr: bool = True/False
if (
isinstance(item, ast.AnnAssign)
and isinstance(item.target, ast.Name)
and item.target.id == var_name
and isinstance(item.value, ast.Constant)
and isinstance(item.value.value, bool)
):
return item.value.value
# Check for plain assignment: attr = True/False
if isinstance(item, ast.Assign):
for target in item.targets:
if (
isinstance(target, ast.Name)
and target.id == var_name
and isinstance(item.value, ast.Constant)
and isinstance(item.value.value, bool)
):
return item.value.value
return None
def _parse_list_class_var(node: ast.ClassDef, var_name: str) -> list[str] | None:
"""Parse a list-type class variable, returning None if not found."""
for item in node.body:
if not isinstance(item, ast.AnnAssign):
continue
if not isinstance(item.target, ast.Name):
continue
if item.target.id != var_name:
continue
if not (item.value and isinstance(item.value, ast.List)):
continue
result = []
for elt in item.value.elts:
if isinstance(elt, ast.Attribute):
result.append(elt.attr)
elif isinstance(elt, ast.Constant):
result.append(str(elt.value))
return result
return None
def _parse_return_list(
method: ast.FunctionDef | None, handle_multiple_of: bool = False
) -> list[str]:
"""Extract list items from a method's return statement."""
if method is None:
return []
for stmt in ast.walk(method):
if not isinstance(stmt, ast.Return):
continue
if not isinstance(stmt.value, ast.List):
continue
sizes = []
for elt in stmt.value.elts:
if isinstance(elt, ast.Constant):
sizes.append(str(elt.value))
elif (
handle_multiple_of
and isinstance(elt, ast.Call)
and isinstance(elt.func, ast.Name)
and elt.func.id == "MultipleOf"
and elt.args
and isinstance(elt.args[0], ast.Constant)
):
sizes.append(f"%{elt.args[0].value}")
if sizes:
return sizes
return []
def _get_parent_class_name(class_node: ast.ClassDef) -> str | None:
"""Get the first parent class name (simple name only).
Handles both simple inheritance (class Foo(Bar)) and generic
inheritance (class Foo(Bar[T])).
"""
if not class_node.bases:
return None
base = class_node.bases[0]
if isinstance(base, ast.Name):
return base.id
if isinstance(base, ast.Subscript) and isinstance(base.value, ast.Name):
return base.value.id
return None
def _resolve_import_to_file(
tree: ast.AST, class_name: str, source_file: Path | None = None
) -> Path | None:
"""Try to resolve a class name to its source file via imports in the AST.
Handles both absolute imports (from vllm.foo import Bar) and relative
imports (from .foo import Bar) when source_file is provided.
"""
for node in ast.walk(tree):
if not isinstance(node, ast.ImportFrom):
continue
for alias in node.names:
actual_name = alias.asname or alias.name
if actual_name != class_name:
continue
if not node.module:
continue
if node.level and node.level > 0 and source_file:
# Relative import: resolve from the source file's directory
base_dir = source_file.parent
for _ in range(node.level - 1):
base_dir = base_dir.parent
module_path = node.module.replace(".", "/")
py_file = base_dir / f"{module_path}.py"
else:
# Absolute import
module_path = node.module.replace(".", "/")
py_file = REPO_ROOT / f"{module_path}.py"
if py_file.exists():
return py_file
return None
def _find_cc_in_function(tree: ast.AST, func_name: str) -> str | None:
"""Find a compute capability from is_device_capability_family() calls in a function.
Looks for the pattern: current_platform.is_device_capability_family(N)
and converts N (e.g. 100) to a CC string (e.g. "10.x").
"""
for node in ast.walk(tree):
if not isinstance(node, ast.FunctionDef) or node.name != func_name:
continue
for n in ast.walk(node):
if (
isinstance(n, ast.Call)
and isinstance(n.func, ast.Attribute)
and n.func.attr == "is_device_capability_family"
and n.args
and isinstance(n.args[0], ast.Constant)
and isinstance(n.args[0].value, int)
):
return f"{n.args[0].value // 10}.x"
return None
# ---------------------------------------------------------------------------
# Registry and file resolution
# ---------------------------------------------------------------------------
def parse_registry() -> dict[str, str]:
"""Parse the registry.py file to get backend names and their class paths."""
tree = ast.parse(REGISTRY_FILE.read_text())
for node in ast.walk(tree):
if isinstance(node, ast.ClassDef) and node.name == "AttentionBackendEnum":
return _extract_enum_values(node)
return {}
def _extract_enum_values(node: ast.ClassDef) -> dict[str, str]:
"""Extract enum name -> value mapping from a class definition."""
result: dict[str, str] = {}
for item in node.body:
if not isinstance(item, ast.Assign):
continue
for target in item.targets:
if not isinstance(target, ast.Name):
continue
if isinstance(item.value, ast.Constant) and item.value.value:
result[target.id] = item.value.value
return result
def get_file_from_class_path(class_path: str) -> Path | None:
"""Convert a class path to a file path."""
if not class_path:
return None
module_path = class_path.rsplit(".", 1)[0].replace(".", "/")
py_file = REPO_ROOT / f"{module_path}.py"
return py_file if py_file.exists() else None
# ---------------------------------------------------------------------------
# Backend feature extraction from AST
# ---------------------------------------------------------------------------
def parse_supported_dtypes(node: ast.ClassDef) -> str:
"""Parse supported_dtypes class variable."""
dtype_map = {"float16": "fp16", "bfloat16": "bf16", "float32": "fp32"}
dtypes = _parse_list_class_var(node, "supported_dtypes")
if dtypes is None:
return "fp16, bf16"
return ", ".join(dtype_map.get(d, d) for d in dtypes)
def parse_kv_cache_dtypes(node: ast.ClassDef) -> str:
"""Parse supported_kv_cache_dtypes class var or supports_kv_cache_dtype method."""
# First try the class variable
dtypes = _parse_list_class_var(node, "supported_kv_cache_dtypes")
if dtypes:
return ", ".join(dtypes)
# Fall back to parsing the supports_kv_cache_dtype method
# Look for `kv_cache_dtype in ["auto", "bfloat16"]` pattern
method = find_method(node, "supports_kv_cache_dtype")
if method:
for n in ast.walk(method):
if (
isinstance(n, ast.Compare)
and len(n.ops) == 1
and isinstance(n.ops[0], ast.In)
and len(n.comparators) == 1
and isinstance(n.comparators[0], ast.List)
):
dtypes = [
e.value
for e in n.comparators[0].elts
if isinstance(e, ast.Constant) and isinstance(e.value, str)
]
if dtypes:
return ", ".join(dtypes)
return "auto"
def parse_block_sizes(node: ast.ClassDef) -> str:
"""Parse get_supported_kernel_block_sizes method."""
method = find_method(node, "get_supported_kernel_block_sizes")
sizes = _parse_return_list(method, handle_multiple_of=True)
return ", ".join(sizes) if sizes else "Any"
def parse_head_sizes(node: ast.ClassDef) -> str:
"""Parse get_supported_head_sizes method."""
method = find_method(node, "get_supported_head_sizes")
sizes = _parse_return_list(method)
return ", ".join(sizes) if sizes else "Any"
def parse_compute_capability(node: ast.ClassDef) -> str:
"""Parse supports_compute_capability method."""
method = find_method(node, "supports_compute_capability")
if method is None:
return "Any"
min_cap: tuple[int, int] | None = None
max_cap: tuple[int, int] | None = None
major_list: list[int] = []
for n in ast.walk(method):
if not isinstance(n, ast.Compare):
continue
# Handle `capability >= DeviceCapability(...)` or `capability <= ...`
for op, comp in zip(n.ops, n.comparators):
if not (
isinstance(comp, ast.Call)
and isinstance(comp.func, ast.Name)
and comp.func.id == "DeviceCapability"
and comp.args
and isinstance(comp.args[0], ast.Constant)
):
continue
major = comp.args[0].value
minor = 0
if len(comp.args) > 1 and isinstance(comp.args[1], ast.Constant):
minor = comp.args[1].value
if isinstance(op, ast.GtE):
min_cap = (major, minor)
elif isinstance(op, ast.LtE):
max_cap = (major, minor)
# Handle `capability.major == N` or `capability.major in [N, M]`
if (
isinstance(n.left, ast.Attribute)
and n.left.attr == "major"
and len(n.ops) == 1
and len(n.comparators) == 1
):
comp = n.comparators[0]
if isinstance(n.ops[0], ast.Eq) and isinstance(comp, ast.Constant):
major_list.append(comp.value)
elif isinstance(n.ops[0], ast.In) and isinstance(comp, ast.List):
major_list.extend(
e.value
for e in comp.elts
if isinstance(e, ast.Constant) and isinstance(e.value, int)
)
if major_list:
major_list.sort()
if len(major_list) == 1:
return f"{major_list[0]}.x"
return f"{major_list[0]}.x-{major_list[-1]}.x"
if min_cap:
if max_cap:
return f"{min_cap[0]}.x-{max_cap[0]}.x"
return f"≥{min_cap[0]}.{min_cap[1]}"
return "Any"
def parse_attention_types(node: ast.ClassDef) -> str:
"""Parse supports_attn_type method."""
method = find_method(node, "supports_attn_type")
if method is None:
return "Decoder"
type_map = {
"DECODER": "Decoder",
"ENCODER": "Encoder",
"ENCODER_ONLY": "Encoder Only",
"ENCODER_DECODER": "Enc-Dec",
}
types: set[str] = set()
for n in ast.walk(method):
# Handle `attn_type in (AttentionType.DECODER, ...)`
if not (
isinstance(n, ast.Compare)
and len(n.ops) == 1
and isinstance(n.ops[0], ast.In)
and len(n.comparators) == 1
and isinstance(n.comparators[0], ast.Tuple | ast.Set)
):
continue
for elt in n.comparators[0].elts:
if isinstance(elt, ast.Attribute) and elt.attr in type_map:
types.add(type_map[elt.attr])
if not types:
return "Decoder"
return "All" if len(types) >= 3 else ", ".join(sorted(types))
def parse_impl_bool_attr(
tree: ast.AST,
class_name: str,
attr_name: str,
default: bool = False,
source_file: Path | None = None,
_visited: set[str] | None = None,
) -> bool:
"""Parse a boolean class attribute from an impl class, following inheritance.
Walks up the inheritance chain within the same file and across files
(by resolving imports) to find the attribute value.
"""
if _visited is None:
_visited = set()
if class_name in _visited:
return default
_visited.add(class_name)
class_node = find_class_in_ast(tree, class_name)
if class_node is None:
return default
# Check directly on this class
value = _find_bool_class_var(class_node, attr_name)
if value is not None:
return value
# Check parent class
parent_name = _get_parent_class_name(class_node)
if parent_name:
# Try parent in same file first
parent_node = find_class_in_ast(tree, parent_name)
if parent_node is not None:
return parse_impl_bool_attr(
tree, parent_name, attr_name, default, source_file, _visited
)
# Try resolving cross-file import
parent_file = _resolve_import_to_file(tree, parent_name, source_file)
if parent_file:
try:
parent_tree = ast.parse(parent_file.read_text())
return parse_impl_bool_attr(
parent_tree,
parent_name,
attr_name,
default,
parent_file,
_visited,
)
except Exception:
pass
return default
def analyze_backend(backend_name: str, class_path: str) -> dict[str, Any] | None:
"""Analyze a backend class and extract feature information."""
file_path = get_file_from_class_path(class_path)
if file_path is None:
return None
try:
tree = ast.parse(file_path.read_text())
except Exception as e:
print(f" Warning: Could not parse {file_path}: {e}", file=sys.stderr)
return None
class_name = class_path.rsplit(".", 1)[1]
class_node = find_class_in_ast(tree, class_name)
if class_node is None:
return None
# Check if this is an MLA backend by parent class or naming
parent = _get_parent_class_name(class_node)
mla_parents = {"MLACommonBackend", "FlashMLABackend", "FlashMLASparseBackend"}
is_mla_backend = (
parent in mla_parents
or ".mla." in class_path.lower()
or "_mla" in backend_name.lower()
)
# Determine compute capability - use N/A for non-CUDA backends
is_non_cuda = backend_name.startswith(("CPU_", "ROCM_"))
compute_cap = "N/A" if is_non_cuda else parse_compute_capability(class_node)
# Parse impl class features (DCP support)
impl_method = find_method(class_node, "get_impl_cls")
impl_class_name = None
if impl_method:
for stmt in ast.walk(impl_method):
if isinstance(stmt, ast.Return) and isinstance(stmt.value, ast.Name):
impl_class_name = stmt.value.id
break
supports_dcp = False
if impl_class_name:
supports_dcp = parse_impl_bool_attr(
tree, impl_class_name, "can_return_lse_for_decode", False, file_path
)
return {
"name": backend_name,
"dtypes": parse_supported_dtypes(class_node),
"kv_cache_dtypes": parse_kv_cache_dtypes(class_node),
"block_sizes": parse_block_sizes(class_node),
"head_sizes": parse_head_sizes(class_node),
"attn_types": parse_attention_types(class_node),
"compute_capability": compute_cap,
"is_mla": is_mla_backend or check_method_overrides(class_node, "is_mla"),
"supports_sink": check_method_overrides(class_node, "supports_sink"),
"is_sparse": check_method_overrides(class_node, "is_sparse"),
"supports_mm_prefix": check_method_overrides(class_node, "supports_mm_prefix"),
"supports_dcp": supports_dcp,
}
# ---------------------------------------------------------------------------
# Special backend variant parsers (FA2/FA3/FA4, FlashInfer TRTLLM, MLA prefill)
# ---------------------------------------------------------------------------
def _parse_fa4_supported_caps() -> str | None:
"""Parse flash_attn_interface.py for FA4 supported compute capabilities.
Looks for `cc not in [9, 10, 11]` pattern in _is_fa4_supported().
"""
fa_interface_file = (
REPO_ROOT / "vllm" / "vllm_flash_attn" / "flash_attn_interface.py"
)
if not fa_interface_file.exists():
return None
try:
tree = ast.parse(fa_interface_file.read_text())
except Exception:
return None
for node in ast.walk(tree):
if not isinstance(node, ast.FunctionDef) or node.name != "_is_fa4_supported":
continue
for n in ast.walk(node):
if not (
isinstance(n, ast.Compare)
and len(n.ops) == 1
and isinstance(n.ops[0], ast.NotIn)
and isinstance(n.comparators[0], ast.List)
):
continue
caps: list[int] = [
e.value
for e in n.comparators[0].elts
if isinstance(e, ast.Constant) and isinstance(e.value, int)
]
if caps:
caps.sort()
return f"{caps[0]}.x-{caps[-1]}.x"
return None
def parse_flash_attn_features() -> dict[str, dict[str, Any]]:
"""Parse fa_utils.py to detect FA2 vs FA3 vs FA4 feature differences.
Returns a dict with 'fa2', 'fa3', and 'fa4' keys containing their respective
feature overrides for compute capability, KV cache dtypes, and sink support.
"""
if not FA_UTILS_FILE.exists():
return {}
try:
tree = ast.parse(FA_UTILS_FILE.read_text())
except Exception:
return {}
# Analyze the functions to determine FA3-specific features
fa3_supports_fp8 = False
fa3_supports_sinks = False
fa3_compute_cap: str | None = None
fa4_compute_cap: str | None = None
for node in ast.walk(tree):
if not isinstance(node, ast.FunctionDef):
continue
# Check flash_attn_supports_fp8 - looks for `get_flash_attn_version() == 3`
if node.name == "flash_attn_supports_fp8":
for n in ast.walk(node):
if (
isinstance(n, ast.Compare)
and isinstance(n.left, ast.Call)
and isinstance(n.left.func, ast.Name)
and n.left.func.id == "get_flash_attn_version"
):
fa3_supports_fp8 = True
break
# Check flash_attn_supports_sinks - looks for `get_flash_attn_version() == 3`
if node.name == "flash_attn_supports_sinks":
for n in ast.walk(node):
if (
isinstance(n, ast.Compare)
and isinstance(n.left, ast.Call)
and isinstance(n.left.func, ast.Name)
and n.left.func.id == "get_flash_attn_version"
):
fa3_supports_sinks = True
break
# Check get_flash_attn_version for FA3/FA4 compute capability
if node.name == "get_flash_attn_version":
for n in ast.walk(node):
# Handle IfExp (ternary) with `device_capability.major == 9`
if isinstance(n, ast.IfExp):
test = n.test
if isinstance(test, ast.BoolOp):
for val in test.values:
if (
isinstance(val, ast.Compare)
and isinstance(val.left, ast.Attribute)
and val.left.attr == "major"
and val.comparators
and isinstance(val.comparators[0], ast.Constant)
):
fa3_compute_cap = f"{val.comparators[0].value}.x"
break
# Handle If statements for FA3/FA4 detection
# e.g. `if device_capability.major == 9` -> FA3
# `elif device_capability.major >= 10` -> FA4
if isinstance(n, ast.If):
test = n.test
comparisons = (
[v for v in test.values if isinstance(v, ast.Compare)]
if isinstance(test, ast.BoolOp)
else [test]
if isinstance(test, ast.Compare)
else []
)
for comp in comparisons:
if not (
isinstance(comp.left, ast.Attribute)
and comp.left.attr == "major"
and comp.comparators
and isinstance(comp.comparators[0], ast.Constant)
and isinstance(comp.comparators[0].value, int)
):
continue
op = comp.ops[0]
val = comp.comparators[0].value
if isinstance(op, ast.Eq) and fa3_compute_cap is None:
fa3_compute_cap = f"{val}.x"
elif isinstance(op, ast.GtE) and fa4_compute_cap is None:
fa4_compute_cap = f"≥{val}.0"
# Fallback: try to parse FA4 compute caps from flash_attn_interface.py
if fa4_compute_cap is None:
fa4_compute_cap = _parse_fa4_supported_caps()
return {
"fa2": {
"supports_fp8": False,
"supports_sink": False,
},
"fa3": {
"compute_capability": fa3_compute_cap,
"supports_fp8": fa3_supports_fp8,
"supports_sink": fa3_supports_sinks,
},
"fa4": {
"compute_capability": fa4_compute_cap,
"supports_fp8": False,
"supports_sink": False,
},
}
def parse_flashinfer_trtllm_features() -> dict[str, dict[str, Any]]:
"""Parse flashinfer.py to detect TRTLLM-specific features.
FLASHINFER uses TRTLLM attention on SM100 (Blackwell), which has different
capabilities (e.g., sink support) than native FlashInfer on earlier GPUs.
"""
if not FLASHINFER_UTILS_FILE.exists():
return {}
try:
tree = ast.parse(FLASHINFER_UTILS_FILE.read_text())
except Exception:
return {}
trtllm_compute_cap = _find_cc_in_function(tree, "supports_trtllm_attention")
if not trtllm_compute_cap:
return {}
return {
"native": {
# Native FlashInfer: everything except SM100
"supports_sink": False,
},
"trtllm": {
# TRTLLM pathway on Blackwell
"compute_capability": trtllm_compute_cap,
"supports_sink": True,
},
}
def parse_mla_prefill_backends() -> list[dict[str, Any]]:
"""Parse MLA prefill backend options from mla_attention.py.
MLA uses different backends for prefill vs decode. The decode backends are
registered in the registry, but prefill backends are selected at runtime
based on conditions in MLACommonImpl.__init__.
Returns a list of prefill backend info dicts with their requirements.
"""
if not MLA_ATTENTION_FILE.exists():
return []
try:
tree = ast.parse(MLA_ATTENTION_FILE.read_text())
except Exception:
return []
# Find compute capability requirements by parsing use_* functions
trtllm_cc = _find_cc_in_function(tree, "use_trtllm_ragged_deepseek_prefill")
flashinfer_cc = _find_cc_in_function(tree, "use_flashinfer_prefill")
cudnn_cc = _find_cc_in_function(tree, "use_cudnn_prefill")
# Build prefill backend list based on what we found
# Order matches the priority in MLACommonImpl.__init__
prefill_backends: list[dict[str, Any]] = []
# TRT-LLM Ragged (highest priority if available)
if trtllm_cc:
prefill_backends.append(
{
"name": "TRT-LLM Ragged‡",
"description": "TensorRT-LLM ragged attention",
"compute_capability": trtllm_cc,
"enable": "Default on SM100",
"disable": "`-ac.use_trtllm_ragged_deepseek_prefill=0`",
"notes": "DeepSeek R1 dims only",
}
)
# FlashInfer prefill
if flashinfer_cc:
prefill_backends.append(
{
"name": "FlashInfer",
"description": "FlashInfer CUTLASS backend",
"compute_capability": flashinfer_cc,
"enable": "`-ac.disable_flashinfer_prefill=0`",
"disable": "`-ac.disable_flashinfer_prefill=1`",
"notes": "DeepSeek R1 dims only",
}
)
# cuDNN prefill
if cudnn_cc:
prefill_backends.append(
{
"name": "cuDNN",
"description": "cuDNN-based attention",
"compute_capability": cudnn_cc,
"enable": "`-ac.use_cudnn_prefill=1`",
"disable": "`-ac.use_cudnn_prefill=0`",
"notes": "",
}
)
# FlashAttention is always available as fallback
prefill_backends.append(
{
"name": "FlashAttention",
"description": "FlashAttention varlen (FA2/FA3)",
"compute_capability": "Any",
"enable": "Default fallback",
"disable": "Use other backends",
"notes": "FA3 on SM90, FA2 otherwise",
}
)
return prefill_backends
# ---------------------------------------------------------------------------
# Backend variant expansion (FA2/FA3/FA4, FlashInfer native/TRTLLM)
# ---------------------------------------------------------------------------
def _expand_flash_attn_variants(
all_backends: list[dict[str, Any]],
fa_features: dict[str, dict[str, Any]],
) -> list[dict[str, Any]]:
"""Expand FLASH_ATTN into FA2, FA3, and FA4 variants."""
expanded = []
for backend in all_backends:
if backend["name"] != "FLASH_ATTN":
backend.setdefault("_sort_key", backend["name"])
backend.setdefault("_sort_order", 0)
backend.setdefault("version", "")
expanded.append(backend)
continue
# Create FA2 entry (keeps base backend's compute_capability)
fa2 = backend.copy()
fa2["version"] = "FA2*"
fa2["_sort_key"] = "FLASH_ATTN"
fa2["_sort_order"] = 0
fa2["supports_sink"] = fa_features["fa2"]["supports_sink"]
# Create FA3 entry (uses parsed compute_capability from fa_utils)
fa3 = backend.copy()
fa3["version"] = "FA3*"
fa3["_sort_key"] = "FLASH_ATTN"
fa3["_sort_order"] = 1
if fa_features["fa3"]["compute_capability"]:
fa3["compute_capability"] = fa_features["fa3"]["compute_capability"]
fa3["supports_sink"] = fa_features["fa3"]["supports_sink"]
if fa_features["fa3"]["supports_fp8"]:
base_dtypes = backend["kv_cache_dtypes"].split(", ")
fp8_dtypes = ["fp8", "fp8_e4m3", "fp8_e5m2"]
new_dtypes = [d for d in fp8_dtypes if d not in base_dtypes]
fa3["kv_cache_dtypes"] = ", ".join(base_dtypes + new_dtypes)
expanded.append(fa2)
expanded.append(fa3)
# Create FA4 entry if FA4 features are available
if "fa4" in fa_features:
fa4 = backend.copy()
fa4["version"] = "FA4*"
fa4["_sort_key"] = "FLASH_ATTN"
fa4["_sort_order"] = 2
if fa_features["fa4"].get("compute_capability"):
fa4["compute_capability"] = fa_features["fa4"]["compute_capability"]
fa4["supports_sink"] = fa_features["fa4"]["supports_sink"]
expanded.append(fa4)
return expanded
def _expand_flashinfer_variants(
all_backends: list[dict[str, Any]],
fi_features: dict[str, dict[str, Any]],
) -> list[dict[str, Any]]:
"""Expand FLASHINFER into native and TRTLLM variants."""
expanded = []
for backend in all_backends:
if backend["name"] != "FLASHINFER":
expanded.append(backend)
continue
# Parse original compute capability to get min CC
orig_cap = backend["compute_capability"]
parts = orig_cap.replace(".x", "").split("-")
min_cc = parts[0] if parts else "7"
trtllm_cc = fi_features["trtllm"]["compute_capability"]
# Create native entry (pre-Blackwell GPUs)
native = backend.copy()
native["version"] = "Native†"
native["_sort_key"] = "FLASHINFER"
native["_sort_order"] = 0
native["supports_sink"] = fi_features["native"]["supports_sink"]
native["compute_capability"] = f"{min_cc}.x-9.x"
# Create TRTLLM entry
trtllm = backend.copy()
trtllm["version"] = "TRTLLM†"
trtllm["_sort_key"] = "FLASHINFER"
trtllm["_sort_order"] = 1
trtllm["compute_capability"] = trtllm_cc
trtllm["supports_sink"] = fi_features["trtllm"]["supports_sink"]
expanded.append(native)
expanded.append(trtllm)
return expanded
# ---------------------------------------------------------------------------
# CUDA priority list parsing
# ---------------------------------------------------------------------------
def parse_cuda_priority_lists() -> dict[str, list[str]]:
"""Parse priority lists from cuda.py using AST.
The structure of _get_backend_priorities is:
if use_mla:
if device_capability.major == 10:
return [MLA list for SM100]
else:
return [MLA list for default]
else:
if device_capability.major == 10:
return [Standard list for SM100]
else:
return [Standard list for default]
"""
if not CUDA_PLATFORM_FILE.exists():
return {}
try:
source = CUDA_PLATFORM_FILE.read_text()
tree = ast.parse(source)
except Exception:
return {}
priorities: dict[str, list[str]] = {}
# Find the _get_backend_priorities function
for node in ast.walk(tree):
if not isinstance(node, ast.FunctionDef):
continue
if node.name != "_get_backend_priorities":
continue
# Process the function body directly
for stmt in node.body:
if not isinstance(stmt, ast.If):
continue
# Check if this is the "if use_mla:" branch
is_mla_branch = (
isinstance(stmt.test, ast.Name) and stmt.test.id == "use_mla"
)
if is_mla_branch:
_extract_priorities(stmt.body, priorities, "mla")
if stmt.orelse:
_extract_priorities(stmt.orelse, priorities, "standard")
else:
_extract_priorities([stmt], priorities, "standard")
return priorities
def _get_backends_from_return(stmts: list) -> list[str]:
"""Extract backend names from return statements in a list of statements.
Handles starred unpacking (e.g. ``*sparse_backends``) by resolving the
variable from assignments found in the same statement list. When the
variable is conditionally assigned (inside an ``if/else``), the ``else``
branch value is used as the representative default.
"""
# Collect variable assignments so we can resolve starred expressions.
# For conditional assignments, last-written (else branch) wins.
var_assigns: dict[str, list[str]] = {}
for stmt in stmts:
if isinstance(stmt, ast.Assign) and isinstance(stmt.value, ast.List):
for target in stmt.targets:
if isinstance(target, ast.Name):
var_assigns[target.id] = [
e.attr for e in stmt.value.elts if isinstance(e, ast.Attribute)
]
elif isinstance(stmt, ast.If):
for branch in (stmt.body, stmt.orelse):
for branch_stmt in branch:
if isinstance(branch_stmt, ast.Assign) and isinstance(
branch_stmt.value, ast.List
):
for target in branch_stmt.targets:
if isinstance(target, ast.Name):
var_assigns[target.id] = [
e.attr
for e in branch_stmt.value.elts
if isinstance(e, ast.Attribute)
]
for stmt in stmts:
if isinstance(stmt, ast.Return) and isinstance(stmt.value, ast.List):
backends: list[str] = []
for e in stmt.value.elts:
if isinstance(e, ast.Attribute):
backends.append(e.attr)
elif (
isinstance(e, ast.Starred)
and isinstance(e.value, ast.Name)
and e.value.id in var_assigns
):
backends.extend(var_assigns[e.value.id])
return backends
return []
def _is_sm100_check(test: ast.expr) -> bool:
"""Check if test is `something.major == 10`."""
return (
isinstance(test, ast.Compare)
and isinstance(test.left, ast.Attribute)
and test.left.attr == "major"
and len(test.ops) == 1
and isinstance(test.ops[0], ast.Eq)
and len(test.comparators) == 1
and isinstance(test.comparators[0], ast.Constant)
and test.comparators[0].value == 10
)
def _extract_priorities(body: list, priorities: dict[str, list[str]], prefix: str):
"""Extract priority lists from if/else statement body."""
for stmt in body:
if isinstance(stmt, ast.If):
is_sm100 = _is_sm100_check(stmt.test)
if_key = f"{prefix}_sm100" if is_sm100 else f"{prefix}_default"
else_key = f"{prefix}_default" if is_sm100 else f"{prefix}_sm100"
if backends := _get_backends_from_return(stmt.body):
priorities[if_key] = backends
if backends := _get_backends_from_return(stmt.orelse):
priorities[else_key] = backends
elif isinstance(stmt, ast.Return) and isinstance(stmt.value, ast.List):
backends = [e.attr for e in stmt.value.elts if isinstance(e, ast.Attribute)]
priorities[f"{prefix}_default"] = backends
# ---------------------------------------------------------------------------
# Data-driven table rendering
#
# Each column is a (header, formatter) pair. The formatter takes a backend
# info dict and returns the cell string. Tables are assembled by selecting
# which columns to include, then calling _render_table().
# ---------------------------------------------------------------------------
# Column type alias for readability
TableColumn = tuple[str, Callable[[dict[str, Any]], str]]
# Shared column definitions -- order here matches the output table order
_COL_BACKEND: TableColumn = ("Backend", lambda b: f"`{b['name']}`")
_COL_VERSION: TableColumn = ("Version", lambda b: b.get("version", ""))
_COL_DTYPES: TableColumn = ("Dtypes", lambda b: b["dtypes"])
_COL_KV_DTYPES: TableColumn = (
"KV Dtypes",
lambda b: add_literal_quotes(b["kv_cache_dtypes"]),
)
_COL_BLOCK_SIZES: TableColumn = ("Block Sizes", lambda b: b["block_sizes"])
_COL_HEAD_SIZES: TableColumn = ("Head Sizes", lambda b: b["head_sizes"])
_COL_SINK: TableColumn = ("Sink", lambda b: bool_to_emoji(b["supports_sink"]))
_COL_SPARSE: TableColumn = ("Sparse", lambda b: bool_to_emoji(b["is_sparse"]))
_COL_MM_PREFIX: TableColumn = (
"MM Prefix",
lambda b: bool_to_emoji(b["supports_mm_prefix"]),
)
_COL_DCP: TableColumn = ("DCP", lambda b: bool_to_emoji(b["supports_dcp"]))
_COL_ATTN_TYPES: TableColumn = ("Attention Types", lambda b: b["attn_types"])
_COL_COMPUTE_CAP: TableColumn = ("Compute Cap.", lambda b: b["compute_capability"])
def add_literal_quotes(value: str) -> str:
"""Add literal backticks around all comma-separated items in a string."""
items = [item.strip() for item in value.split(",")]
return ", ".join(f"`{item}`" for item in items)
def bool_to_emoji(value: bool) -> str:
"""Convert a boolean to a checkmark or X emoji."""
return "✅" if value else "❌"
def _build_columns(is_mla: bool, has_versions: bool) -> list[TableColumn]:
"""Build the column list for a backend feature table.
The column selection depends on whether it's an MLA table (includes
Sparse column) and whether any backend has version variants (includes
Version column).
"""
cols: list[TableColumn] = [_COL_BACKEND]
if has_versions:
cols.append(_COL_VERSION)
cols.extend([_COL_DTYPES, _COL_KV_DTYPES, _COL_BLOCK_SIZES, _COL_HEAD_SIZES])
cols.append(_COL_SINK)
if is_mla:
cols.append(_COL_SPARSE)
cols.extend([_COL_MM_PREFIX, _COL_DCP, _COL_ATTN_TYPES, _COL_COMPUTE_CAP])
return cols
def _sort_key(x: dict[str, Any]) -> tuple[str, int]:
"""Sort key that keeps parent/child rows together in order."""
return (x.get("_sort_key", x["name"]), x.get("_sort_order", 0))
def _render_table(
columns: list[TableColumn],
backends: list[dict[str, Any]],
) -> list[str]:
"""Render a markdown table from column specs and backend data."""
header = "| " + " | ".join(name for name, _ in columns) + " |"
sep = "|" + "|".join("-" * (len(name) + 2) for name, _ in columns) + "|"
lines = [header, sep]
for info in sorted(backends, key=_sort_key):
row = "| " + " | ".join(fmt(info) for _, fmt in columns) + " |"
lines.append(row)
return lines
def generate_markdown_table(
backends: list[dict[str, Any]], title: str, is_mla_table: bool = False
) -> str:
"""Generate a titled markdown table from backend info."""
if not backends:
return f"## {title}\n\nNo backends found.\n"
has_versions = any(b.get("version") for b in backends)
columns = _build_columns(is_mla_table, has_versions)
lines = [f"## {title}", ""]
lines.extend(_render_table(columns, backends))
lines.append("")
return "\n".join(lines)
# ---------------------------------------------------------------------------
# Markdown section generators (usage, priority, legend, MLA)
# ---------------------------------------------------------------------------
def generate_usage_section() -> str:
"""Generate the usage documentation section."""
return """## Setting the Attention Backend
### Command Line
There are two ways to specify the backend from the command line:
**Option 1: Using `--attention-backend` (simple)**
```bash
vllm serve <model> --attention-backend FLASH_ATTN
```
**Option 2: Using `--attention-config.backend` / `-ac.backend` (structured config)**
```bash
# Dot notation
vllm serve <model> --attention-config.backend FLASH_ATTN
vllm serve <model> -ac.backend FLASH_ATTN
# JSON format
vllm serve <model> --attention-config '{"backend": "FLASH_ATTN"}'
vllm serve <model> -ac '{"backend": "FLASH_ATTN"}'
```
> **Note:** `--attention-backend` and `--attention-config.backend` are mutually
> exclusive. Use one or the other, not both.
### Python API
Use `AttentionConfig` with the `LLM` class:
```python
from vllm import LLM
from vllm.config import AttentionConfig
from vllm.v1.attention.backends.registry import AttentionBackendEnum
# Method 1: Using AttentionConfig with enum
llm = LLM(
model="Qwen/Qwen3-0.6B",
attention_config=AttentionConfig(backend=AttentionBackendEnum.FLASH_ATTN),
)
# Method 2: Using attention_backend parameter with string
llm = LLM(
model="Qwen/Qwen3-0.6B",
attention_backend="FLASH_ATTN",
)
```
## Backend Selection Behavior
### Manual Selection
When you explicitly set a backend via `--attention-backend` or `AttentionConfig`:
1. The backend is **validated** against your configuration (model dtype, head
size, compute capability, etc.)
2. If the backend **doesn't support** your configuration, an error is raised
with the specific reason
3. If valid, the backend is used
Example error when selecting an incompatible backend:
```text
ValueError: Selected backend FLASHMLA is not valid for this configuration.
Reason: ['compute capability not supported']
```
### Automatic Selection
When no backend is specified (the default):
1. vLLM iterates through backends in **priority order** (see tables below)
2. Each backend is validated against your configuration
3. The **first compatible backend** is selected
4. If no backend is compatible, an error is raised listing all backends and
their incompatibility reasons
"""
def _priority_table(title: str, backends: list[str]) -> list[str]:
"""Generate a priority table for a list of backends."""
return [
f"**{title}:**",
"",
"| Priority | Backend |",
"|----------|---------|",
*[f"| {i} | `{b}` |" for i, b in enumerate(backends, 1)],
"",
]
def generate_priority_section(priorities: dict[str, list[str]]) -> str:
"""Generate the priority ranking section."""
lines = [
"## Backend Priority (CUDA)",
"",
"When no backend is explicitly selected, vLLM chooses the first",
"compatible backend from these priority-ordered lists.",
"",
"Priority is **1 = highest** (tried first).",
"",
"### Standard Attention (MHA, MQA, GQA)",
"",
]
sm100 = "Blackwell (SM 10.x)"
ampere = "Ampere/Hopper (SM 8.x-9.x)"
if "standard_sm100" in priorities:
lines.extend(_priority_table(sm100, priorities["standard_sm100"]))
if "standard_default" in priorities:
lines.extend(_priority_table(ampere, priorities["standard_default"]))
lines.extend(["### MLA Attention (DeepSeek-style)", ""])
if "mla_sm100" in priorities:
lines.extend(_priority_table(sm100, priorities["mla_sm100"]))
if "mla_default" in priorities:
lines.extend(_priority_table(ampere, priorities["mla_default"]))
lines.append(
"> **Note:** ROCm and CPU platforms have their own selection logic. "
"See the platform-specific documentation for details."
)
lines.append("")
return "\n".join(lines)
def generate_legend() -> str:
"""Generate a legend explaining the table columns."""
return """## Legend
| Column | Description |
|--------|-------------|
| **Dtypes** | Supported model data types (fp16, bf16, fp32) |
| **KV Dtypes** | Supported KV cache data types (`auto`, `fp8`, `fp8_e4m3`, etc.) |
| **Block Sizes** | Supported KV cache block sizes (%N means multiples of N) |
| **Head Sizes** | Supported attention head sizes |
| **Sink** | Attention sink support (for StreamingLLM) |
| **Sparse** | Sparse attention support (MLA only) |
| **MM Prefix** | Multimodal prefix full attention support |
| **DCP** | Decode Context Parallelism support (`--decode-context-parallel-size`) |
| **Attention Types** | Supported attention patterns (Decoder, Encoder, Enc-Dec) |
| **Compute Cap.** | Required CUDA compute capability (N/A for non-CUDA backends) |
**Symbols:** ✅ = Supported, ❌ = Not supported
"""
def generate_mla_section(
prefill_backends: list[dict[str, Any]], decode_backends: list[dict[str, Any]]
) -> str:
"""Generate the complete MLA section with prefill and decode tables."""
lines = [
"## MLA (Multi-head Latent Attention) Backends",
"",
"MLA uses separate backends for prefill and decode phases.",
"",
"### Prefill Backends",
"",
"The prefill backend is selected at runtime based on hardware and",
"configuration.",
"",
"| Backend | Description | Compute Cap. | Enable | Disable | Notes |",
"|---------|-------------|--------------|--------|---------|-------|",
]
for backend in prefill_backends:
row = "| {} | {} | {} | {} | {} | {} |".format(
backend["name"],
backend["description"],
backend["compute_capability"],
backend["enable"],
backend["disable"],
backend.get("notes", ""),
)
lines.append(row)
lines.extend(
[
"",
"> **‡** TRT-LLM Ragged is the default on Blackwell (SM100).",
"> On other GPUs, FlashAttention is used as the default.",
"",
"### Decode Backends",
"",
]
)
# Reuse data-driven table rendering for decode backends
columns = _build_columns(is_mla=True, has_versions=False)
lines.extend(_render_table(columns, decode_backends))
lines.append("")
return "\n".join(lines)
# ---------------------------------------------------------------------------
# Top-level orchestration
# ---------------------------------------------------------------------------
def generate_docs() -> str:
"""Generate the complete documentation."""
attention_backends_map = parse_registry()
# Parse priority lists from cuda.py
priorities = parse_cuda_priority_lists()
# Parse FlashAttention FA2/FA3 feature differences
fa_features = parse_flash_attn_features()
# Parse FlashInfer TRTLLM feature differences (native vs TRTLLM on Blackwell)
fi_features = parse_flashinfer_trtllm_features()
# Parse MLA prefill backends
mla_prefill_backends = parse_mla_prefill_backends()
# Collect backend info
all_backends = []
for backend_name, class_path in attention_backends_map.items():
if backend_name in SKIP_BACKENDS:
continue
info = analyze_backend(backend_name, class_path)
if info:
all_backends.append(info)
# Expand backends into version variants
if fa_features:
all_backends = _expand_flash_attn_variants(all_backends, fa_features)
if fi_features:
all_backends = _expand_flashinfer_variants(all_backends, fi_features)
# Split into MLA and non-MLA
mla_backends = [b for b in all_backends if b["is_mla"]]
non_mla_backends = [b for b in all_backends if not b["is_mla"]]
# Generate documentation
script_path = "tools/pre_commit/generate_attention_backend_docs.py"
doc_lines = [
"# Attention Backend Feature Support",
"",
f"This document is auto-generated by `{script_path}`.",
"It shows the feature support for each registered attention backend",
"based on the checks in `AttentionBackend.validate_configuration()`.",
"",
"**Do not edit this file manually.** Run the following command to",
"regenerate it:",
"",
"```bash",
f"python {script_path}",
"```",
"",
]
# Add usage documentation
doc_lines.append(generate_usage_section())
# Add priority section
doc_lines.append(generate_priority_section(priorities))
# Add legend and feature tables
doc_lines.append(generate_legend())
standard_title = "Standard Attention (MHA, MQA, GQA) Backends"
doc_lines.append(
generate_markdown_table(non_mla_backends, standard_title, is_mla_table=False)
)
# Add footnotes for version/variant distinctions (in table order)
footnotes = []
if fi_features:
footnotes.append(
"> **†** FlashInfer uses TRTLLM attention on Blackwell (SM100), which "
"supports sinks. Disable via `--attention-config.use_trtllm_attention=0`."
)
if fa_features:
footnotes.append(
"> **\\*** Specify the FlashAttention version via "
"`--attention-config.flash_attn_version=2`, `3`, or `4`. "
"Default is FA4 on SM100+ (Blackwell), FA3 on SM90 (Hopper), "
"FA2 otherwise."
)
if footnotes:
doc_lines.append("\n>\n".join(footnotes) + "\n")
# Add MLA section with prefill and decode backends
doc_lines.append(generate_mla_section(mla_prefill_backends, mla_backends))
return "\n".join(doc_lines)
def main():
parser = argparse.ArgumentParser(
description="Generate attention backend documentation table"
)
parser.add_argument(
"--output",
"-o",
type=str,
default=str(REPO_ROOT / "docs" / "design" / "attention_backends.md"),
help="Output file path (default: docs/design/attention_backends.md)",
)
parser.add_argument(
"--check",
action="store_true",
help="Check if the documentation is up to date (for pre-commit)",
)
parser.add_argument(
"files",
nargs="*",
help="Files to check (passed by pre-commit). If none are relevant, skip.",
)
args = parser.parse_args()
if args.files and not any(is_relevant_file(f) for f in args.files):
sys.exit(0)
output_path = Path(args.output)
new_content = generate_docs()
if args.check:
needs_update = (
not output_path.exists() or output_path.read_text() != new_content
)
if needs_update:
output_path.parent.mkdir(parents=True, exist_ok=True)
output_path.write_text(new_content)
print(f"🔄 Regenerated: {output_path}")
sys.exit(1)
print(f"✅ Up to date: {output_path}")
sys.exit(0)
output_path.parent.mkdir(parents=True, exist_ok=True)
output_path.write_text(new_content)
print(f"Generated: {output_path}")
if __name__ == "__main__":
main()
| {
"repo_id": "vllm-project/vllm",
"file_path": "tools/pre_commit/generate_attention_backend_docs.py",
"license": "Apache License 2.0",
"lines": 1255,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/funaudiochat.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Inference-only FunAudioChat model compatible with HuggingFace weights.
FunAudioChat is a Qwen3 text model augmented with:
- a continuous audio encoder (Whisper-mel frontend + transformer)
- a discrete audio encoder (speech tokenizer + projector)
In the HF implementation, audio features are scattered into `<|AUDIO|>` token
positions via `inputs_embeds`, while `position_ids` (RoPE) remains standard 1D.
"""
from __future__ import annotations
import os
from collections.abc import Iterable, Mapping, Sequence
from functools import cached_property
from typing import Any
import numpy as np
import torch
import torch.nn as nn
from transformers import PreTrainedTokenizerFast, WhisperFeatureExtractor
from transformers.activations import get_activation
from transformers.feature_extraction_utils import BatchFeature
from transformers.modeling_outputs import BaseModelOutput
from vllm.config import VllmConfig
from vllm.config.multimodal import BaseDummyOptions
from vllm.model_executor.layers.attention.mm_encoder_attention import MMEncoderAttention
from vllm.model_executor.layers.linear import QKVParallelLinear, RowParallelLinear
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
from vllm.multimodal import MULTIMODAL_REGISTRY
from vllm.multimodal.inputs import (
MultiModalDataDict,
MultiModalFieldConfig,
MultiModalKwargsItems,
)
from vllm.multimodal.parse import (
AudioProcessorItems,
MultiModalDataItems,
MultiModalDataParser,
)
from vllm.multimodal.processing import (
BaseDummyInputsBuilder,
BaseMultiModalProcessor,
BaseProcessingInfo,
PromptReplacement,
PromptUpdate,
PromptUpdateDetails,
)
from vllm.sequence import IntermediateTensors
from vllm.utils.import_utils import _has_module
from .interfaces import MultiModalEmbeddings, SupportsMultiModal, SupportsPP
from .utils import AutoWeightsLoader, init_vllm_registered_model, maybe_prefix
class _SinusoidsPositionEmbedding(nn.Module):
def __init__(self, length: int, channels: int, max_timescale: float = 10000.0):
super().__init__()
if channels % 2 != 0:
raise ValueError("SinusoidsPositionEmbedding needs even channels input")
log_timescale_increment = np.log(max_timescale) / (channels // 2 - 1)
inv_timescales = torch.exp(
-log_timescale_increment * torch.arange(channels // 2).float()
)
scaled_time = (
torch.arange(length)[:, np.newaxis] * inv_timescales[np.newaxis, :]
)
self.register_buffer(
"positional_embedding",
torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1),
persistent=False,
)
class FunAudioChatAudioAttention(nn.Module):
"""Multi-headed attention used inside the continuous audio tower."""
def __init__(self, config: Any):
super().__init__()
self.embed_dim = int(config.d_model)
self.total_num_heads = int(config.encoder_attention_heads)
self.dropout = float(getattr(config, "attention_dropout", 0.0))
self.head_dim = self.embed_dim // self.total_num_heads
self.num_key_value_groups = 1 # needed for eager attention
self.config = config
if self.head_dim * self.total_num_heads != self.embed_dim:
raise ValueError(
"embed_dim must be divisible by num_heads "
f"(got embed_dim={self.embed_dim}, "
f"num_heads={self.total_num_heads})."
)
self.scaling = self.head_dim**-0.5
self.attention_dropout = 0.0
self.is_decoder = False
self.is_causal = False
self.qkv_proj = QKVParallelLinear(
self.embed_dim,
self.head_dim,
self.total_num_heads,
bias=True,
)
self.num_heads = self.qkv_proj.num_heads
self.num_kv_heads = self.qkv_proj.num_kv_heads
self.q_size = self.num_heads * self.head_dim
self.kv_size = self.num_kv_heads * self.head_dim
self.attn = MMEncoderAttention(
num_heads=self.num_heads,
head_size=self.head_dim,
scale=self.scaling,
num_kv_heads=self.num_kv_heads,
prefix="funaudiochat_audio_tower.attn",
)
self.out_proj = RowParallelLinear(
self.embed_dim,
self.embed_dim,
bias=True,
)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
stacked_params_mapping = [
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
]
params_dict = dict(self.named_parameters())
with torch.no_grad():
if self.qkv_proj.bias is not None:
# HF FunAudioChat uses bias=False for k_proj. Ensure the missing
# shard starts as zeros, while allowing q/v shards to load.
self.qkv_proj.bias.zero_()
loaded_params: set[str] = set()
for name, loaded_weight in weights:
for param_name, shard_name, shard_id in stacked_params_mapping:
if shard_name not in name:
continue
name = name.replace(shard_name, param_name)
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight, shard_id)
break
else:
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
loaded_params.add(name)
return loaded_params
def forward(
self,
hidden_states: torch.Tensor,
cu_seqlens: torch.Tensor | None = None,
attention_mask: torch.Tensor | None = None,
**kwargs: object,
) -> torch.Tensor:
del kwargs
del attention_mask
seq_length, _ = hidden_states.size()
qkv, _ = self.qkv_proj(hidden_states)
query_states, key_states, value_states = qkv.split(
[self.q_size, self.kv_size, self.kv_size], dim=-1
)
max_seqlen: torch.Tensor | None = None
if cu_seqlens is not None:
max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max()
attn_output = self.attn(
query_states.reshape(1, seq_length, self.q_size),
key_states.reshape(1, seq_length, self.kv_size),
value_states.reshape(1, seq_length, self.kv_size),
cu_seqlens=cu_seqlens,
max_seqlen=max_seqlen,
).reshape(seq_length, -1)
output, _ = self.out_proj(attn_output)
return output
class FunAudioChatAudioEncoderLayer(nn.Module):
def __init__(self, config: Any):
super().__init__()
self.embed_dim = int(config.d_model)
self.self_attn = FunAudioChatAudioAttention(config)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = float(config.dropout)
self.activation_fn = get_activation(str(config.activation_function))
self.activation_dropout = float(config.activation_dropout)
self.fc1 = nn.Linear(self.embed_dim, int(config.encoder_ffn_dim))
self.fc2 = nn.Linear(int(config.encoder_ffn_dim), self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
cu_seqlens: torch.Tensor,
attention_mask: torch.Tensor | None = None,
**kwargs: object,
) -> tuple[torch.Tensor]:
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states = self.self_attn(
hidden_states=hidden_states,
cu_seqlens=cu_seqlens,
attention_mask=attention_mask,
**kwargs,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(
hidden_states, p=self.activation_dropout, training=self.training
)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(
hidden_states, p=self.dropout, training=self.training
)
hidden_states = residual + hidden_states
return (hidden_states,)
class FunAudioChatAudioEncoder(nn.Module):
"""Continuous audio tower."""
def __init__(self, config: Any):
super().__init__()
self.config = config
embed_dim = int(config.d_model)
self.num_mel_bins = int(config.num_mel_bins)
self.max_source_positions = int(config.max_source_positions)
self.embed_scale = (embed_dim**0.5) if bool(config.scale_embedding) else 1.0
self.n_window = int(config.n_window)
self.conv1 = nn.Conv1d(self.num_mel_bins, embed_dim, kernel_size=3, padding=1)
self.conv2 = nn.Conv1d(embed_dim, embed_dim, kernel_size=3, stride=2, padding=1)
self.layers = nn.ModuleList(
[
FunAudioChatAudioEncoderLayer(config)
for _ in range(int(config.encoder_layers))
]
)
self.ln_post = nn.LayerNorm(embed_dim)
self.avg_pooler = nn.AvgPool1d(2, stride=2)
self.proj = nn.Linear(embed_dim, int(config.output_dim))
self.positional_embedding = _SinusoidsPositionEmbedding(
self.max_source_positions, embed_dim
)
# Present in HF weights even if unused during S2T.
self.audio_bos_eos_token = nn.Embedding(2, int(config.output_dim))
@property
def dtype(self) -> torch.dtype:
return self.conv1.weight.dtype
def _prepare_attention_mask(
self, inputs_tensor: torch.Tensor, cu_seqlens: torch.Tensor
) -> torch.Tensor | None:
if getattr(self.config, "_attn_implementation", "eager") == "flash_attention_2":
return None
seq_length = inputs_tensor.shape[0]
attention_mask = torch.full(
(1, 1, seq_length, seq_length),
torch.finfo(inputs_tensor.dtype).min,
device=inputs_tensor.device,
dtype=inputs_tensor.dtype,
)
for i in range(1, len(cu_seqlens)):
start = int(cu_seqlens[i - 1].item())
end = int(cu_seqlens[i].item())
attention_mask[..., start:end, start:end] = 0
return attention_mask
def forward(
self,
input_features: torch.Tensor,
feature_lens: torch.Tensor,
aftercnn_lens: torch.Tensor,
speech_maxlen: int,
**kwargs: object,
) -> BaseModelOutput:
# For max-length audio (300s => ~7500 speech frames at 25Hz), the
# Torch SDPA path can be prohibitively memory hungry (~O(n^2) inside the
# longest chunks). Require FlashAttention for such inputs to avoid OOM
# and performance cliffs.
if int(speech_maxlen) >= 7500:
if not _has_module("flash_attn"):
raise RuntimeError(
"FunAudioChat long audio (~300s) requires FlashAttention-2 "
"for the continuous audio tower, but `flash_attn` is not "
"installed in the runtime environment."
)
if not getattr(
self.layers[0].self_attn.attn, "is_flash_attn_backend", False
):
raise RuntimeError(
"FunAudioChat long audio (~300s) requires FlashAttention for the "
"continuous audio tower, but the selected MM encoder attention "
"backend is not FlashAttention."
)
# Handle empty / invalid items (feature_lens == 0) without crashing.
original_batch_size = int(feature_lens.size(0))
device = input_features.device
valid_mask = feature_lens > 0
valid_indices = torch.where(valid_mask)[0]
if valid_indices.numel() == 0:
output_dim = int(self.proj.out_features)
return BaseModelOutput(
last_hidden_state=torch.zeros(
(original_batch_size, speech_maxlen, output_dim),
device=device,
dtype=self.proj.weight.dtype,
)
)
input_features_list = input_features.split(feature_lens.tolist(), dim=1)
valid_input_features_list = [input_features_list[int(i)] for i in valid_indices]
valid_input_features = torch.cat(valid_input_features_list, dim=1)
valid_feature_lens = feature_lens[valid_mask]
valid_aftercnn_lens = aftercnn_lens[valid_mask]
chunk_num = torch.ceil(valid_feature_lens / (self.n_window * 2)).long()
chunk_lengths_list: list[int] = []
full_chunk_len = self.n_window * 2
for i, length in enumerate(valid_feature_lens):
num_chunks_for_sample = int(chunk_num[i].item())
if num_chunks_for_sample == 0:
continue
chunk_lengths_list.extend([full_chunk_len] * (num_chunks_for_sample - 1))
last_chunk_len = int(length.item()) % full_chunk_len
if last_chunk_len == 0:
last_chunk_len = full_chunk_len
chunk_lengths_list.append(last_chunk_len)
chunk_lengths = torch.tensor(
chunk_lengths_list, dtype=torch.long, device=device
)
chunk_list = valid_input_features.split(chunk_lengths.tolist(), dim=1)
padded_feature, padded_mask, padded_mask_after_cnn = (
self.padded_and_mask_function(
chunk_list, chunk_lengths, padding_value=0, padding_side="right"
)
)
padded_embed = nn.functional.gelu(self.conv1(padded_feature)) * padded_mask
padded_embed = nn.functional.gelu(self.conv2(padded_embed)).transpose(1, 2)
padded_embed = padded_embed + self.positional_embedding.positional_embedding[
: padded_embed.shape[1], :
].unsqueeze(0).to(padded_embed.dtype)
hidden_states = padded_embed[padded_mask_after_cnn]
cu_seqlens = torch.cat(
(
torch.zeros(1, device=padded_mask_after_cnn.device, dtype=torch.int32),
padded_mask_after_cnn.sum(1).cumsum(0),
)
).to(torch.int32)
for encoder_layer in self.layers:
(hidden_states,) = encoder_layer(
hidden_states,
cu_seqlens=cu_seqlens,
**kwargs,
)
hidden_states_list = hidden_states.split(valid_aftercnn_lens.tolist(), dim=0)
pooled_list: list[torch.Tensor] = []
pooled_lengths: list[int] = []
for each_audio_states in hidden_states_list:
seq_len = int(each_audio_states.shape[0])
if seq_len >= 2:
pooled = nn.functional.avg_pool1d(
each_audio_states.transpose(0, 1), kernel_size=2, stride=2
).transpose(0, 1)
else:
pooled = each_audio_states
pooled_list.append(pooled)
pooled_lengths.append(int(pooled.shape[0]))
pooled_concat = torch.cat(pooled_list, dim=0)
processed_concat = self.proj(self.ln_post(pooled_concat))
processed_audio_list = list(processed_concat.split(pooled_lengths, dim=0))
output_dim = (
int(processed_audio_list[0].shape[-1])
if processed_audio_list
else int(self.proj.out_features)
)
output_hidden_states = torch.zeros(
(original_batch_size, speech_maxlen, output_dim),
dtype=processed_audio_list[0].dtype
if processed_audio_list
else self.proj.weight.dtype,
device=device,
)
for valid_idx, processed in zip(valid_indices, processed_audio_list):
seq_len = min(int(processed.shape[0]), int(speech_maxlen))
output_hidden_states[int(valid_idx), :seq_len] = processed[:seq_len]
return BaseModelOutput(last_hidden_state=output_hidden_states)
def padded_and_mask_function(
self,
tensor_list: Sequence[torch.Tensor],
tensor_len: torch.Tensor,
padding_value: float = 0.0,
padding_side: str = "right",
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
max_len = int(tensor_len.max().item())
dim = int(tensor_list[0].shape[0])
padded_tensor = torch.full(
size=(len(tensor_list), dim, max_len),
fill_value=padding_value,
dtype=self.dtype,
device=tensor_list[0].device,
)
batch_mask = torch.zeros(
(len(tensor_len), max_len), dtype=torch.long, device=padded_tensor.device
)
for i, length in enumerate(tensor_len):
length_val = int(length.item())
batch_mask[i, :length_val] = 1
padded_tensor[i, :, :length_val] = tensor_list[i]
feature_lens_after_cnn = (tensor_len - 1) // 2 + 1
max_len_after_cnn = int(feature_lens_after_cnn.max().item())
batch_mask_after_cnn = torch.zeros(
(len(tensor_len), max_len_after_cnn),
dtype=torch.long,
device=padded_tensor.device,
)
for i, length in enumerate(feature_lens_after_cnn):
batch_mask_after_cnn[i, : int(length.item())] = 1
if padding_side != "right":
raise NotImplementedError("Only right padding is supported.")
return (
padded_tensor,
batch_mask.unsqueeze(1).to(padded_tensor.dtype),
batch_mask_after_cnn.bool(),
)
# From the HF FunAudioChat implementation.
def _get_feat_extract_output_lengths(
self, input_lengths: torch.LongTensor
) -> tuple[torch.LongTensor, torch.LongTensor]:
input_lengths = (input_lengths - 1) // 2 + 1
output_lengths = (input_lengths - 2) // 2 + 1
return input_lengths, output_lengths
class FunAudioChatDiscreteEncoder(nn.Module):
"""Discrete audio encoder (speech tokenizer -> grouped embeddings)."""
def __init__(self, config: Any):
super().__init__()
self.padding_idx = int(config.pad_token_id)
self.group_size = int(config.group_size)
self.hidden_size = int(config.output_dim)
self.continuous_features_mode = getattr(
config, "continuous_features_mode", "add"
)
self.embed_tokens = nn.Embedding(
int(config.codebook_size), self.hidden_size, self.padding_idx
)
self.output_matching = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
self.continual_output_matching = nn.Linear(
self.hidden_size, self.hidden_size, bias=False
)
def forward(
self,
audio_ids: torch.Tensor,
continuous_audio_features: torch.Tensor | None = None,
continuous_audio_output_lengths: torch.Tensor | None = None,
feature_exist_mask: torch.Tensor | None = None,
) -> torch.Tensor:
del continuous_audio_output_lengths
inputs_embeds = self.embed_tokens(audio_ids)
hidden_states = inputs_embeds.reshape(
inputs_embeds.shape[0], -1, self.group_size * self.hidden_size
)
hidden_states = hidden_states.reshape(
hidden_states.shape[0], -1, self.group_size, self.hidden_size
).mean(dim=2)
hidden_states = self.output_matching(hidden_states)
if continuous_audio_features is not None:
continuous_audio_features = continuous_audio_features.reshape(
continuous_audio_features.shape[0],
-1,
self.group_size,
self.hidden_size,
).mean(dim=2)
continuous_audio_hidden_states = self.continual_output_matching(
continuous_audio_features
)
if feature_exist_mask is None:
feature_exist_mask = torch.ones(
(hidden_states.shape[0],),
dtype=torch.bool,
device=hidden_states.device,
)
if self.continuous_features_mode == "add":
hidden_states[feature_exist_mask] += continuous_audio_hidden_states
else:
hidden_states[feature_exist_mask] = continuous_audio_hidden_states
return hidden_states
def _get_feat_extract_output_lengths(
self, input_lengths: torch.LongTensor
) -> tuple[torch.LongTensor, torch.LongTensor]:
output_lengths = (input_lengths + self.group_size - 1) // self.group_size
return input_lengths, output_lengths
class FunAudioChatProcessingInfo(BaseProcessingInfo):
token_fps: int = 25
@cached_property
def feature_extractor(self) -> WhisperFeatureExtractor:
return WhisperFeatureExtractor.from_pretrained(self.model_id)
@cached_property
def speech_tokenizer(self) -> PreTrainedTokenizerFast:
return PreTrainedTokenizerFast.from_pretrained(
self.model_id, subfolder="speech_tokenizer"
)
def get_feature_extractor(self) -> WhisperFeatureExtractor:
return self.feature_extractor
def get_speech_tokenizer(self) -> PreTrainedTokenizerFast:
return self.speech_tokenizer
def get_data_parser(self):
return MultiModalDataParser(
target_sr=int(self.feature_extractor.sampling_rate),
target_channels=self.get_target_channels(),
expected_hidden_size=self._get_expected_hidden_size(),
)
def get_supported_mm_limits(self) -> Mapping[str, int | None]:
return {"audio": None}
def get_target_channels(self) -> int:
return 1
def get_mm_max_tokens_per_item(
self,
seq_len: int,
mm_counts: Mapping[str, int],
) -> Mapping[str, int] | None:
# The discrete audio encoder downsamples 25Hz frames with group_size=5,
# so for a 300s clip the max number of `<|AUDIO|>` placeholders is 1500.
cfg = self.get_hf_config()
audio_cfg = getattr(cfg, "audio_config", None)
max_audio_tokens = int(getattr(audio_cfg, "max_source_positions", 1500))
return {"audio": max_audio_tokens}
def get_audio_group_size(self) -> int:
cfg = self.get_hf_config()
audio_cfg = getattr(cfg, "audio_config", None)
return int(getattr(audio_cfg, "group_size", 5))
class FunAudioChatDummyInputsBuilder(
BaseDummyInputsBuilder[FunAudioChatProcessingInfo]
):
def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str:
num_audios = mm_counts.get("audio", 0)
return "<|audio_bos|><|AUDIO|><|audio_eos|>" * int(num_audios)
def get_dummy_mm_data(
self,
seq_len: int,
mm_counts: Mapping[str, int],
mm_options: Mapping[str, BaseDummyOptions],
) -> MultiModalDataDict:
feature_extractor = self.info.get_feature_extractor()
sampling_rate = int(feature_extractor.sampling_rate)
# Dummy inputs are used for profiling; construct the worst-case audio
# length that maximizes the number of encoder tokens.
cfg = self.info.get_hf_config()
audio_cfg = getattr(cfg, "audio_config", None)
max_audio_tokens = int(getattr(audio_cfg, "max_source_positions", 1500))
group_size = self.info.get_audio_group_size()
token_fps = int(getattr(self.info, "token_fps", 25))
target_num_frames = max(1, max_audio_tokens) * max(1, group_size)
audio_len = max(
1,
(target_num_frames * sampling_rate + token_fps - 1) // token_fps,
)
num_audios = int(mm_counts.get("audio", 0))
audio_overrides = mm_options.get("audio")
return {
"audio": self._get_dummy_audios(
length=audio_len,
num_audios=num_audios,
overrides=audio_overrides,
)
}
class FunAudioChatMultiModalProcessor(
BaseMultiModalProcessor[FunAudioChatProcessingInfo]
):
def _call_hf_processor(
self,
prompt: str,
mm_data: Mapping[str, object],
mm_kwargs: Mapping[str, object],
tok_kwargs: Mapping[str, object],
) -> BatchFeature:
tokenizer = self.info.get_tokenizer()
input_ids = torch.tensor([tokenizer.encode(prompt, **tok_kwargs)])
audios = mm_data.get("audios", [])
if not audios:
return BatchFeature({"input_ids": input_ids})
feature_extractor = self.info.get_feature_extractor(**mm_kwargs)
sr = int(feature_extractor.sampling_rate)
min_samples = int(getattr(feature_extractor, "n_fft", 400) or 400)
wavs: list[np.ndarray] = []
speech_strs: list[str] = []
speech_tokenizer = self.info.get_speech_tokenizer()
pad_token = speech_tokenizer.pad_token or "<|audio_pad|>"
for audio in audios:
if isinstance(audio, torch.Tensor):
audio = audio.detach().cpu().numpy()
audio_np = np.asarray(audio, dtype=np.float32)
if min_samples > 0 and audio_np.shape[0] < min_samples:
audio_np = np.pad(
audio_np, (0, min_samples - audio_np.shape[0]), mode="constant"
)
wavs.append(audio_np)
num_frames = int(
(float(audio_np.shape[0]) / float(sr)) * float(self.info.token_fps)
)
speech_strs.append(pad_token * max(1, int(num_frames)))
audio_group_size = self.info.get_audio_group_size()
speech_inputs = speech_tokenizer(
speech_strs,
return_attention_mask=True,
return_token_type_ids=False,
padding=True,
pad_to_multiple_of=audio_group_size,
return_tensors="pt",
)
wav_inputs = feature_extractor(
wavs,
sampling_rate=sr,
return_attention_mask=True,
padding="max_length",
return_tensors="pt",
)
mm_inputs: dict[str, torch.Tensor] = {
"speech_ids": speech_inputs["input_ids"],
"speech_attention_mask": speech_inputs["attention_mask"],
"input_features": wav_inputs["input_features"],
"feature_attention_mask": wav_inputs["attention_mask"],
"feature_exist_mask": torch.ones((len(wavs),), dtype=torch.bool),
}
return BatchFeature({"input_ids": input_ids, **mm_inputs})
def _hf_processor_applies_updates(
self,
prompt_text: str,
mm_items: MultiModalDataItems,
hf_processor_mm_kwargs: Mapping[str, object],
tokenization_kwargs: Mapping[str, object],
) -> bool:
return False
def _get_mm_fields_config(
self,
hf_inputs: BatchFeature,
hf_processor_mm_kwargs: Mapping[str, object],
) -> Mapping[str, MultiModalFieldConfig]:
return {
"speech_ids": MultiModalFieldConfig.batched("audio"),
"speech_attention_mask": MultiModalFieldConfig.batched("audio"),
"input_features": MultiModalFieldConfig.batched("audio"),
"feature_attention_mask": MultiModalFieldConfig.batched("audio"),
"feature_exist_mask": MultiModalFieldConfig.batched("audio"),
}
def _get_prompt_updates(
self,
mm_items: MultiModalDataItems,
hf_processor_mm_kwargs: Mapping[str, object],
out_mm_kwargs: MultiModalKwargsItems,
) -> Sequence[PromptUpdate]:
tokenizer = self.info.get_tokenizer()
vocab = tokenizer.get_vocab()
audio_token = "<|AUDIO|>"
audio_token_id = vocab[audio_token]
out_mm_data = out_mm_kwargs.get_data()
speech_attention_mask = out_mm_data.get("speech_attention_mask")
if speech_attention_mask is None:
audio_output_lengths: list[int] = []
else:
assert isinstance(speech_attention_mask, torch.Tensor)
speech_lengths = speech_attention_mask.sum(-1)
group_size = self.info.get_audio_group_size()
audio_output_lengths = (
(speech_lengths + group_size - 1) // group_size
).tolist()
def get_replacement_funaudiochat(item_idx: int):
num_features = (
int(audio_output_lengths[item_idx]) if audio_output_lengths else 1
)
if num_features <= 0:
audios = mm_items.get_items("audio", AudioProcessorItems)
audio_len = audios.get_audio_length(item_idx)
raise ValueError(
f"The audio (len={audio_len}) is too short to be "
"represented inside the model"
)
audio_tokens = [audio_token_id] * num_features
return PromptUpdateDetails.select_token_id(
audio_tokens,
embed_token_id=audio_token_id,
)
return [
PromptReplacement(
modality="audio",
target=audio_token,
replacement=get_replacement_funaudiochat,
)
]
@MULTIMODAL_REGISTRY.register_processor(
FunAudioChatMultiModalProcessor,
info=FunAudioChatProcessingInfo,
dummy_inputs=FunAudioChatDummyInputsBuilder,
)
class FunAudioChatForConditionalGeneration(nn.Module, SupportsMultiModal, SupportsPP):
@classmethod
def get_placeholder_str(cls, modality: str, i: int) -> str | None:
if modality.startswith("audio"):
return "<|audio_bos|><|AUDIO|><|audio_eos|>"
raise ValueError("Only audio modality is supported")
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
multimodal_config = vllm_config.model_config.multimodal_config
self.config = config
self.multimodal_config = multimodal_config
self.quant_config = quant_config
with self._mark_tower_model(vllm_config, "audio"):
self.continuous_audio_tower = FunAudioChatAudioEncoder(config.audio_config)
self.audio_tower = FunAudioChatDiscreteEncoder(config.audio_config)
with self._mark_language_model(vllm_config):
self.language_model = init_vllm_registered_model(
vllm_config=vllm_config,
hf_config=config.text_config,
prefix=maybe_prefix(prefix, "language_model"),
architectures=["Qwen3ForCausalLM"],
)
self.make_empty_intermediate_tensors = (
self.language_model.make_empty_intermediate_tensors
)
def _get_continuous_audio_features(
self,
input_features: torch.Tensor,
feature_attention_mask: torch.Tensor,
speech_maxlen: int,
) -> tuple[torch.Tensor, torch.Tensor]:
# Align mask and features to avoid indexing errors when padding differs.
if (
input_features.dim() == 3
and feature_attention_mask.shape[1] != input_features.shape[-1]
):
min_len = min(
int(feature_attention_mask.shape[1]), int(input_features.shape[-1])
)
feature_attention_mask = feature_attention_mask[:, :min_len]
input_features = input_features[:, :, :min_len]
feature_lens = torch.sum(feature_attention_mask, dim=1)
flat_features = input_features.permute(0, 2, 1)[
feature_attention_mask.bool()
].permute(1, 0)
audio_feat_lengths, audio_output_lengths = (
self.continuous_audio_tower._get_feat_extract_output_lengths(feature_lens)
)
audio_outputs = self.continuous_audio_tower(
flat_features,
feature_lens=feature_lens,
aftercnn_lens=audio_feat_lengths,
speech_maxlen=speech_maxlen,
)
return audio_outputs.last_hidden_state, audio_output_lengths
def embed_multimodal(self, **kwargs: object) -> MultiModalEmbeddings:
speech_ids = kwargs.get("speech_ids")
speech_attention_mask = kwargs.get("speech_attention_mask")
input_features = kwargs.get("input_features")
feature_attention_mask = kwargs.get("feature_attention_mask")
feature_exist_mask = kwargs.get("feature_exist_mask")
if speech_ids is None:
return []
pad_id = int(getattr(self.audio_tower, "padding_idx", 0))
if not isinstance(speech_ids, torch.Tensor):
if (
isinstance(speech_ids, (list, tuple))
and len(speech_ids) > 0
and all(isinstance(t, torch.Tensor) for t in speech_ids)
):
speech_ids_tensors = []
for t in speech_ids:
if t.dim() == 2 and t.shape[0] == 1:
t = t.squeeze(0)
if t.dim() != 1:
raise TypeError(
"FunAudioChat speech_ids must be a 1D tensor per item "
f"(got shape={tuple(t.shape)})"
)
speech_ids_tensors.append(t)
speech_ids = nn.utils.rnn.pad_sequence(
speech_ids_tensors,
batch_first=True,
padding_value=pad_id,
)
else:
raise TypeError(
"FunAudioChat speech_ids must be a Tensor or a sequence of Tensors "
f"(got {type(speech_ids)})"
)
if speech_attention_mask is None:
speech_attention_mask = speech_ids.ne(pad_id).to(dtype=torch.int64)
if not isinstance(speech_attention_mask, torch.Tensor):
if (
isinstance(speech_attention_mask, (list, tuple))
and len(speech_attention_mask) > 0
and all(isinstance(t, torch.Tensor) for t in speech_attention_mask)
):
mask_tensors = []
for t in speech_attention_mask:
if t.dim() == 2 and t.shape[0] == 1:
t = t.squeeze(0)
if t.dim() != 1:
raise TypeError(
"FunAudioChat speech_attention_mask must be a 1D tensor "
f"per item (got shape={tuple(t.shape)})"
)
mask_tensors.append(t)
speech_attention_mask = nn.utils.rnn.pad_sequence(
mask_tensors,
batch_first=True,
padding_value=0,
)
else:
raise TypeError(
"FunAudioChat speech_attention_mask must be a Tensor or a "
f"sequence of Tensors (got {type(speech_attention_mask)})"
)
debug = os.getenv("VLLM_FUN_AUDIOCHAT_DEBUG", "") == "1"
if debug:
print(
f"[FunAudioChat] embed_multimodal speech_ids={tuple(speech_ids.shape)} "
f"speech_attention_mask={tuple(speech_attention_mask.shape)}",
flush=True,
)
attn_impl = getattr(
self.continuous_audio_tower.config, "_attn_implementation", None
)
print(
f"[FunAudioChat] audio_attn_impl={attn_impl}",
flush=True,
)
if hasattr(self.continuous_audio_tower, "conv1"):
conv1_w = self.continuous_audio_tower.conv1.weight
print(
f"[FunAudioChat] conv1_w_norm={float(conv1_w.norm().item()):.6g}",
flush=True,
)
try:
attn0 = self.continuous_audio_tower.layers[0].self_attn
q_norm = float(attn0.q_proj.weight.norm().item())
k_norm = float(attn0.k_proj.weight.norm().item())
v_norm = float(attn0.v_proj.weight.norm().item())
o_norm = float(attn0.out_proj.weight.norm().item())
print(
f"[FunAudioChat] attn0_q_norm={q_norm:.6g} "
f"k_norm={k_norm:.6g} "
f"v_norm={v_norm:.6g} "
f"o_norm={o_norm:.6g}",
flush=True,
)
except Exception:
pass
if isinstance(input_features, torch.Tensor):
print(
f"[FunAudioChat] input_features={tuple(input_features.shape)}",
flush=True,
)
if isinstance(feature_attention_mask, torch.Tensor):
print(
"[FunAudioChat] feature_attention_mask="
f"{tuple(feature_attention_mask.shape)}",
flush=True,
)
group_size = int(self.audio_tower.group_size)
speech_maxlen = int(speech_ids.shape[-1])
# Ensure token length is divisible by group_size.
target_len = ((speech_maxlen + group_size - 1) // group_size) * group_size
if target_len > speech_maxlen:
pad_id = int(self.audio_tower.padding_idx)
pad_len = target_len - speech_maxlen
speech_ids = nn.functional.pad(speech_ids, (0, pad_len), value=pad_id)
speech_attention_mask = nn.functional.pad(
speech_attention_mask, (0, pad_len), value=0
)
speech_maxlen = int(speech_ids.shape[-1])
continuous_audio_features = None
continuous_audio_output_lengths = None
if input_features is not None and feature_attention_mask is not None:
assert isinstance(input_features, torch.Tensor)
assert isinstance(feature_attention_mask, torch.Tensor)
continuous_audio_features, continuous_audio_output_lengths = (
self._get_continuous_audio_features(
input_features=input_features,
feature_attention_mask=feature_attention_mask,
speech_maxlen=speech_maxlen,
)
)
if feature_exist_mask is None:
feature_exist_mask = torch.ones(
(speech_ids.shape[0],), dtype=torch.bool, device=speech_ids.device
)
assert isinstance(feature_exist_mask, torch.Tensor)
audio_features = self.audio_tower(
speech_ids,
continuous_audio_features=continuous_audio_features,
continuous_audio_output_lengths=continuous_audio_output_lengths,
feature_exist_mask=feature_exist_mask,
)
_, audio_output_lengths = self.audio_tower._get_feat_extract_output_lengths(
speech_attention_mask.sum(-1)
)
lengths = audio_output_lengths.tolist()
embeds = tuple(
audio_features[i, : int(length)] for i, length in enumerate(lengths)
)
if debug:
embed_lens = [int(t.shape[0]) for t in embeds]
print(f"[FunAudioChat] embed_multimodal out_lens={embed_lens}", flush=True)
if embeds:
t0 = embeds[0]
print(
f"[FunAudioChat] embed0 dtype={t0.dtype} device={t0.device} "
f"nan={bool(torch.isnan(t0).any())} "
f"norm={float(t0.norm().item()):.6g}",
flush=True,
)
dump_path = os.getenv("VLLM_FUN_AUDIOCHAT_DUMP_PATH", "")
if (
dump_path
and speech_ids.shape[0] == 1
and len(embeds) == 1
and embed_lens[0] > 10
):
if not os.path.exists(dump_path):
np.save(dump_path, embeds[0].detach().float().cpu().numpy())
print(f"[FunAudioChat] dumped embeds to {dump_path}", flush=True)
cont_path = dump_path.replace(".npy", "_cont.npy")
if continuous_audio_features is not None and not os.path.exists(
cont_path
):
np.save(
cont_path,
continuous_audio_features.detach().float().cpu().numpy(),
)
print(
f"[FunAudioChat] dumped continuous to {cont_path}", flush=True
)
return embeds
def forward(
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
**kwargs: object,
) -> torch.Tensor | IntermediateTensors:
del kwargs
if intermediate_tensors is not None:
inputs_embeds = None
return self.language_model.model(
input_ids,
positions,
intermediate_tensors,
inputs_embeds=inputs_embeds,
)
def compute_logits(self, hidden_states: torch.Tensor) -> torch.Tensor | None:
return self.language_model.compute_logits(hidden_states)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
loader = AutoWeightsLoader(self, skip_prefixes=["audio_invert_tower."])
return loader.load_weights(weights)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/funaudiochat.py",
"license": "Apache License 2.0",
"lines": 937,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/transformers_utils/configs/funaudiochat.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from __future__ import annotations
from transformers import PretrainedConfig
# NOTE: Temporary shim for FunAudioChat checkpoints.
# These checkpoints use `model_type="funaudiochat"`, which is not currently
# recognized by released Transformers, and the public checkpoint does not
# provide an `auto_map` to enable `trust_remote_code=True`.
# Remove this file once Transformers adds native support (or the checkpoint
# provides an `auto_map`) and vLLM can rely on `AutoConfig.from_pretrained()`.
class FunAudioChatAudioEncoderConfig(PretrainedConfig):
model_type = "funaudiochat_audio_encoder"
def __init__(
self,
_attn_implementation: str | None = None,
num_mel_bins: int = 128,
encoder_layers: int = 32,
encoder_attention_heads: int = 20,
encoder_ffn_dim: int = 5120,
d_model: int = 1280,
dropout: float = 0.0,
attention_dropout: float = 0.0,
activation_function: str = "gelu",
activation_dropout: float = 0.0,
scale_embedding: bool = False,
initializer_range: float = 0.02,
max_source_positions: int = 1500,
n_window: int = 100,
output_dim: int = 3584,
bos_token_id: int | None = None,
codebook_size: int | None = None,
continuous_features_mode: str = "replace",
crq_transformer_config: dict | None = None,
eos_token_id: int | None = None,
group_size: int = 5,
enable_audio_invert_tower: bool = True,
pad_token_id: int | None = None,
**kwargs,
) -> None:
attn_impl = kwargs.pop("_attn_implementation", None) or _attn_implementation
super().__init__(**kwargs)
# Match HF default for attention implementation selection.
self._attn_implementation = attn_impl or "sdpa"
self.num_mel_bins = num_mel_bins
self.d_model = d_model
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.encoder_ffn_dim = encoder_ffn_dim
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_function = activation_function
self.activation_dropout = activation_dropout
self.num_hidden_layers = encoder_layers
self.initializer_range = initializer_range
self.scale_embedding = scale_embedding
self.max_source_positions = max_source_positions
self.n_window = n_window
self.output_dim = output_dim
self.bos_token_id = bos_token_id
self.codebook_size = codebook_size
self.continuous_features_mode = continuous_features_mode
self.crq_transformer_config = crq_transformer_config
self.eos_token_id = eos_token_id
self.group_size = group_size
self.enable_audio_invert_tower = enable_audio_invert_tower
self.pad_token_id = pad_token_id
class FunAudioChatConfig(PretrainedConfig):
model_type = "funaudiochat"
attribute_map = {
"audio_token_id": "audio_token_index",
}
def __init__(
self,
audio_config: PretrainedConfig | dict | None = None,
text_config: PretrainedConfig | dict | None = None,
audio_token_index: int = 151646,
ignore_index: int = -100,
hidden_size: int | None = None,
**kwargs,
) -> None:
self.audio_token_index = audio_token_index
self.ignore_index = ignore_index
if isinstance(audio_config, dict):
audio_config.setdefault(
"model_type", FunAudioChatAudioEncoderConfig.model_type
)
audio_config = FunAudioChatAudioEncoderConfig(**audio_config)
elif audio_config is None:
audio_config = FunAudioChatAudioEncoderConfig()
self.audio_config = audio_config
if isinstance(text_config, dict):
# Default to qwen2 for backwards compatibility; FunAudioChat uses
# qwen3 in practice for recent checkpoints.
text_config.setdefault("model_type", "qwen2")
import transformers
text_cls = transformers.CONFIG_MAPPING[text_config["model_type"]]
text_config = text_cls(**text_config)
elif text_config is None:
import transformers
text_config = transformers.CONFIG_MAPPING["qwen2"]()
self.text_config = text_config
self.hidden_size = (
int(self.text_config.hidden_size)
if hidden_size is None
else int(hidden_size)
)
super().__init__(**kwargs)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/transformers_utils/configs/funaudiochat.py",
"license": "Apache License 2.0",
"lines": 107,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:benchmarks/attention_benchmarks/batch_spec.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Simplified batch specification grammar for attention benchmarks.
Grammar (underscore-separated segments):
Format: (<count>?) q<q_len>(k?) (s<seq_len>(k?))?
- count: Number of identical requests (optional, default=1)
- q_len: Query length (number of new tokens)
- seq_len: Total sequence length (optional, defaults to q_len for prefill)
- 'k' suffix: Multiplies value by 1024
Common patterns:
- Prefill: q_len == seq_len (e.g., "q2k" → 2048 new tokens, 2048 seq)
- Decode: q_len == 1 (e.g., "q1s1k" → 1 token, 1024 seq length)
- Extend: q_len < seq_len (e.g., "q4s1k" → 4 tokens, 1024 seq length)
Examples:
q2k -> [(2048, 2048)] # Prefill: 2048 tokens
q1s1k -> [(1, 1024)] # Decode: 1 token, 1K sequence
8q1s1k -> [(1, 1024)] * 8 # 8 decode requests
q4s1k -> [(4, 1024)] # 4-token extend (spec decode)
2q1k_32q1s1k -> [(1024, 1024)] * 2 + [(1, 1024)] * 32 # Mixed batch
16q4s1k -> [(4, 1024)] * 16 # 16 spec decode requests
"""
from collections import Counter
from dataclasses import dataclass
import regex as re
@dataclass
class BatchRequest:
"""Represents a single request in a batch."""
q_len: int # Query length (number of new tokens)
kv_len: int # Total KV cache length
@property
def is_decode(self) -> bool:
"""True if this is a decode request (q_len == 1)."""
return self.q_len == 1
@property
def is_prefill(self) -> bool:
"""True if this is a pure prefill (q_len == kv_len)."""
return self.q_len == self.kv_len
@property
def is_extend(self) -> bool:
"""True if this is context extension (q_len > 1, kv_len > q_len)."""
return self.q_len > 1 and self.kv_len > self.q_len
@property
def context_len(self) -> int:
"""Context length (KV cache - query)."""
return self.kv_len - self.q_len
def as_tuple(self) -> tuple[int, int]:
"""Return as (q_len, kv_len) tuple for compatibility."""
return (self.q_len, self.kv_len)
def _parse_size(size_str: str, k_suffix: str) -> int:
"""Parse size string with optional 'k' suffix."""
size = int(size_str)
return size * 1024 if k_suffix == "k" else size
def parse_batch_spec(spec: str) -> list[BatchRequest]:
"""
Parse batch specification string into list of BatchRequest objects.
Grammar: (<count>?) q<q_len>(k?) (s<seq_len>(k?))?
Args:
spec: Batch specification string (see module docstring for grammar)
Returns:
List of BatchRequest objects
Raises:
ValueError: If spec format is invalid
"""
requests = []
for seg in spec.split("_"):
# Unified pattern: (<count>?) q<q_len>(k?) (s<seq_len>(k?))?
m = re.match(r"^(?:(\d+))?q(\d+)(k?)(?:s(\d+)(k?))?$", seg)
if m:
cnt = int(m.group(1)) if m.group(1) else 1
q_len = _parse_size(m.group(2), m.group(3))
kv_len = _parse_size(m.group(4), m.group(5)) if m.group(4) else q_len
requests.extend([BatchRequest(q_len=q_len, kv_len=kv_len)] * cnt)
continue
raise ValueError(f"Invalid batch spec segment: '{seg}'")
return requests
def format_batch_spec(requests: list[BatchRequest]) -> str:
"""
Format list of BatchRequest into human-readable string.
Groups requests by type and provides counts and sizes.
Args:
requests: List of BatchRequest objects
Returns:
Formatted string describing the batch
"""
kinds = {
"prefill": [],
"extend": [],
"decode": [],
}
for req in requests:
tup = (req.q_len, req.kv_len)
if req.is_prefill:
kinds["prefill"].append(tup)
elif req.is_extend:
kinds["extend"].append(tup)
elif req.is_decode:
kinds["decode"].append(tup)
parts = []
for kind in ["prefill", "extend", "decode"]:
lst = kinds[kind]
if not lst:
continue
cnt_total = len(lst)
ctr = Counter(lst)
inner = []
for (q, kv), cnt in ctr.items():
if kind == "prefill":
size = f"{q // 1024}k" if q % 1024 == 0 else str(q)
inner.append(f"{cnt}x{size}")
elif kind == "decode":
size = f"{kv // 1024}k" if kv % 1024 == 0 else str(kv)
inner.append(f"{cnt}x{size}")
else: # extend
qstr = f"{q // 1024}k" if q % 1024 == 0 else str(q)
kstr = f"{kv // 1024}k" if kv % 1024 == 0 else str(kv)
inner.append(f"{cnt}xq{qstr}kv{kstr}")
parts.append(f"{cnt_total} {kind} ({', '.join(inner)})")
return ", ".join(parts)
def reorder_for_flashinfer(requests: list[BatchRequest]) -> list[BatchRequest]:
"""
Reorder requests for FlashInfer: decode first, then prefill.
FlashInfer expects decode requests before prefill requests for
optimal performance.
Args:
requests: Original list of BatchRequest
Returns:
Reordered list with decode requests first
"""
decodes = [r for r in requests if r.is_decode]
non_decodes = [r for r in requests if not r.is_decode]
return decodes + non_decodes
def split_by_type(
requests: list[BatchRequest],
) -> dict[str, list[BatchRequest]]:
"""
Split requests by type for analysis.
Args:
requests: List of BatchRequest
Returns:
Dict with keys: 'decode', 'prefill', 'extend'
"""
result = {
"decode": [],
"prefill": [],
"extend": [],
}
for req in requests:
if req.is_decode:
result["decode"].append(req)
elif req.is_prefill:
result["prefill"].append(req)
elif req.is_extend:
result["extend"].append(req)
return result
def get_batch_stats(requests: list[BatchRequest]) -> dict:
"""
Compute statistics about a batch.
Args:
requests: List of BatchRequest
Returns:
Dict with batch statistics
"""
by_type = split_by_type(requests)
return {
"total_requests": len(requests),
"num_decode": len(by_type["decode"]),
"num_prefill": len(by_type["prefill"]),
"num_extend": len(by_type["extend"]),
"total_tokens": sum(r.q_len for r in requests),
"total_kv_cache": sum(r.kv_len for r in requests),
"max_q_len": max((r.q_len for r in requests), default=0),
"max_kv_len": max((r.kv_len for r in requests), default=0),
"avg_q_len": sum(r.q_len for r in requests) / len(requests) if requests else 0,
"avg_kv_len": (
sum(r.kv_len for r in requests) / len(requests) if requests else 0
),
}
def get_batch_type(batch_spec: str, spec_decode_threshold: int = 8) -> str:
"""
Classify a batch spec into a type string.
Args:
batch_spec: Batch specification string (e.g., "q2k", "8q1s1k", "2q2k_8q1s1k")
spec_decode_threshold: Max q_len to be considered spec-decode vs extend
Returns:
Type string: "prefill", "decode", "spec-decode", "extend", or "mixed (types...)"
"""
requests = parse_batch_spec(batch_spec)
# Classify each request
types_present = set()
for req in requests:
if req.is_decode:
types_present.add("decode")
elif req.is_prefill:
types_present.add("prefill")
elif req.is_extend:
# Distinguish spec-decode (small q_len) from extend (chunked prefill)
if req.q_len <= spec_decode_threshold:
types_present.add("spec-decode")
else:
types_present.add("extend")
if len(types_present) == 1:
return types_present.pop()
elif len(types_present) > 1:
# Sort for consistent output
sorted_types = sorted(types_present)
return f"mixed ({'+'.join(sorted_types)})"
else:
return "unknown"
| {
"repo_id": "vllm-project/vllm",
"file_path": "benchmarks/attention_benchmarks/batch_spec.py",
"license": "Apache License 2.0",
"lines": 209,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:benchmarks/attention_benchmarks/benchmark.py | #!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Universal vLLM Attention Benchmark
Benchmark any attention backend with the extended grammar.
Supports standard attention (Flash/Triton/FlashInfer) and MLA backends.
Examples:
# Standard attention
python benchmark.py --backends flash flashinfer --batch-specs "q2k" "8q1s1k"
# MLA backends
python benchmark.py --backends cutlass_mla flashinfer_mla --batch-specs "64q1s1k"
# Parameter sweep (CLI)
python benchmark.py --backend cutlass_mla \
--batch-specs "64q1s1k" \
--sweep-param num_kv_splits \
--sweep-values 1 4 8 16
# Parameter sweep (YAML config - recommended)
python benchmark.py --config configs/cutlass_numsplits.yaml
"""
import argparse
import sys
from dataclasses import replace
from pathlib import Path
import yaml
from rich.console import Console
from tqdm import tqdm
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
from batch_spec import parse_batch_spec
from common import (
BenchmarkConfig,
BenchmarkResult,
ModelParameterSweep,
ParameterSweep,
ResultsFormatter,
batch_spec_sort_key,
is_mla_backend,
)
def run_standard_attention_benchmark(config: BenchmarkConfig) -> BenchmarkResult:
"""Run standard attention benchmark (Flash/Triton/FlashInfer)."""
from runner import run_attention_benchmark
return run_attention_benchmark(config)
def run_mla_benchmark(config: BenchmarkConfig, **kwargs) -> BenchmarkResult:
"""Run MLA benchmark with appropriate backend."""
from mla_runner import run_mla_benchmark as run_mla
return run_mla(config.backend, config, **kwargs)
def run_benchmark(config: BenchmarkConfig, **kwargs) -> BenchmarkResult:
"""
Run a single benchmark with proper backend selection.
Args:
config: BenchmarkConfig with backend, batch_spec, and model params
**kwargs: Additional arguments passed to MLA benchmarks
Returns:
BenchmarkResult (may have error field set on failure)
"""
try:
if is_mla_backend(config.backend):
return run_mla_benchmark(config, **kwargs)
else:
return run_standard_attention_benchmark(config)
except Exception as e:
return BenchmarkResult(
config=config,
mean_time=float("inf"),
std_time=0,
min_time=float("inf"),
max_time=float("inf"),
error=str(e),
)
def run_model_parameter_sweep(
backends: list[str],
batch_specs: list[str],
base_config_args: dict,
sweep: ModelParameterSweep,
console: Console,
) -> list[BenchmarkResult]:
"""
Run model parameter sweep for given backends and batch specs.
Args:
backends: List of backend names
batch_specs: List of batch specifications
base_config_args: Base configuration arguments (num_layers, head_dim, etc.)
sweep: ModelParameterSweep configuration
console: Rich console for output
Returns:
List of BenchmarkResult objects
"""
all_results = []
console.print(
f"[yellow]Model sweep mode: testing {sweep.param_name} = {sweep.values}[/]"
)
total = len(backends) * len(batch_specs) * len(sweep.values)
with tqdm(total=total, desc="Benchmarking") as pbar:
for backend in backends:
for spec in batch_specs:
for value in sweep.values:
# Create config with modified model parameter
config_args = base_config_args.copy()
config_args[sweep.param_name] = value
# Create config with original backend for running
clean_config = BenchmarkConfig(
backend=backend, batch_spec=spec, **config_args
)
# Run benchmark
result = run_benchmark(clean_config)
# Replace backend with labeled version for display
backend_label = sweep.get_label(backend, value)
labeled_config = replace(result.config, backend=backend_label)
result = replace(result, config=labeled_config)
all_results.append(result)
if not result.success:
console.print(
f"[red]Error {backend} {spec} {sweep.param_name}="
f"{value}: {result.error}[/]"
)
pbar.update(1)
# Display sweep results - create separate table for each parameter value
console.print("\n[bold green]Model Parameter Sweep Results:[/]")
formatter = ResultsFormatter(console)
# Group results by parameter value and extract backend mapping
by_param_value = {}
backend_mapping = {} # Maps labeled backend -> original backend
for r in all_results:
# Extract original backend and param value from labeled backend
# The label format is: {backend}_{param_name}_{value}
# We need to reverse engineer this
labeled_backend = r.config.backend
# Try each backend to find which one this result belongs to
for backend in backends:
for value in sweep.values:
expected_label = sweep.get_label(backend, value)
if labeled_backend == expected_label:
backend_mapping[labeled_backend] = backend
param_value = str(value)
if param_value not in by_param_value:
by_param_value[param_value] = []
by_param_value[param_value].append(r)
break
# Create a table for each parameter value
sorted_param_values = sorted(
by_param_value.keys(), key=lambda x: int(x) if x.isdigit() else x
)
for param_value in sorted_param_values:
console.print(f"\n[bold cyan]{sweep.param_name} = {param_value}[/]")
param_results = by_param_value[param_value]
# Create modified results with original backend names
modified_results = []
for r in param_results:
# Get the original backend name from our mapping
original_backend = backend_mapping[r.config.backend]
modified_config = replace(r.config, backend=original_backend)
modified_result = replace(r, config=modified_config)
modified_results.append(modified_result)
# Print table with original backend names
formatter.print_table(modified_results, backends, compare_to_fastest=True)
# Show optimal backend for each (param_value, batch_spec) combination
console.print(
f"\n[bold cyan]Optimal backend for each ({sweep.param_name}, batch_spec):[/]"
)
# Group by (param_value, batch_spec)
by_param_and_spec = {}
for r in all_results:
if r.success:
# Find which (backend, value) this result corresponds to
labeled_backend = r.config.backend
for backend in backends:
for value in sweep.values:
expected_label = sweep.get_label(backend, value)
if labeled_backend == expected_label:
param_value = str(value)
spec = r.config.batch_spec
key = (param_value, spec)
if key not in by_param_and_spec:
by_param_and_spec[key] = []
by_param_and_spec[key].append(r)
break
# Sort by param value then spec (batch_size, q_len, kv_len)
sorted_keys = sorted(
by_param_and_spec.keys(),
key=lambda x: (
int(x[0]) if x[0].isdigit() else x[0],
batch_spec_sort_key(x[1]),
),
)
current_param_value = None
for param_value, spec in sorted_keys:
# Print header when param value changes
if param_value != current_param_value:
console.print(f"\n [bold]{sweep.param_name}={param_value}:[/]")
current_param_value = param_value
results = by_param_and_spec[(param_value, spec)]
best = min(results, key=lambda r: r.mean_time)
# Extract original backend name using the mapping
backend_name = backend_mapping[best.config.backend]
# Show all backends' times for comparison
times_str = " | ".join(
[
f"{backend_mapping[r.config.backend]}: {r.mean_time:.6f}s"
for r in sorted(results, key=lambda r: r.mean_time)
]
)
console.print(
f" {spec:12s} -> [bold green]{backend_name:15s}[/] ({times_str})"
)
return all_results
def run_parameter_sweep(
backends: list[str],
batch_specs: list[str],
base_config_args: dict,
sweep: ParameterSweep,
console: Console,
) -> list[BenchmarkResult]:
"""
Run parameter sweep for given backends and batch specs.
Args:
backends: List of backend names
batch_specs: List of batch specifications
base_config_args: Base configuration arguments (num_layers, head_dim, etc.)
sweep: ParameterSweep configuration
console: Rich console for output
Returns:
List of BenchmarkResult objects
"""
all_results = []
# Build list of values to sweep (including auto if requested)
sweep_values = list(sweep.values)
if sweep.include_auto:
sweep_values.append("auto")
console.print(f"[yellow]Sweep mode: testing {sweep.param_name} = {sweep_values}[/]")
total = len(backends) * len(batch_specs) * len(sweep_values)
with tqdm(total=total, desc="Benchmarking") as pbar:
for backend in backends:
for spec in batch_specs:
for value in sweep_values:
# Create config with original backend for running
config = BenchmarkConfig(
backend=backend, batch_spec=spec, **base_config_args
)
# Prepare kwargs for benchmark runner
kwargs = {}
if value != "auto":
kwargs[sweep.param_name] = value
# Run benchmark
result = run_benchmark(config, **kwargs)
# Replace backend with labeled version for display
backend_label = sweep.get_label(backend, value)
labeled_config = replace(result.config, backend=backend_label)
result = replace(result, config=labeled_config)
all_results.append(result)
if not result.success:
console.print(
f"[red]Error {backend} {spec} {sweep.param_name}="
f"{value}: {result.error}[/]"
)
pbar.update(1)
# Display sweep results
console.print("\n[bold green]Sweep Results:[/]")
backend_labels = [sweep.get_label(b, v) for b in backends for v in sweep_values]
formatter = ResultsFormatter(console)
formatter.print_table(all_results, backend_labels)
# Show optimal values
console.print(f"\n[bold cyan]Optimal {sweep.param_name} per batch spec:[/]")
by_spec = {}
for r in all_results:
if r.success:
spec = r.config.batch_spec
if spec not in by_spec:
by_spec[spec] = []
by_spec[spec].append(r)
for spec in sorted(by_spec.keys(), key=batch_spec_sort_key):
results = by_spec[spec]
best = min(results, key=lambda r: r.mean_time)
console.print(
f" {spec}: [bold green]{best.config.backend}[/] ({best.mean_time:.6f}s)"
)
return all_results
def load_config_from_yaml(config_path: str) -> dict:
"""Load configuration from YAML file."""
with open(config_path) as f:
return yaml.safe_load(f)
def generate_batch_specs_from_ranges(ranges: list[dict]) -> list[str]:
"""
Generate batch specs from range specifications.
Args:
ranges: List of range specifications, each containing:
- template: Batch spec template (e.g., "q{q_len}kv1k")
- q_len: Dict with start, stop, step, end_inclusive (optional)
- Other parameters can also be ranges
Returns:
List of generated batch spec strings
Example:
ranges = [
{
"template": "q{q_len}kv1k",
"q_len": {
"start": 1,
"stop": 16,
"step": 1,
"end_inclusive": true # Optional, defaults to true
}
}
]
Returns: ["q1kv1k", "q2kv1k", ..., "q16kv1k"]
"""
all_specs = []
for range_spec in ranges:
template = range_spec.get("template")
if not template:
raise ValueError("Range specification must include 'template'")
# Extract all range parameters from the spec
range_params = {}
for key, value in range_spec.items():
if key == "template":
continue
if isinstance(value, dict) and "start" in value:
# This is a range specification
start = value["start"]
stop = value["stop"]
step = value.get("step", 1)
# Check if end should be inclusive (default: True)
end_inclusive = value.get("end_inclusive", True)
# Adjust stop based on end_inclusive
if end_inclusive:
range_params[key] = list(range(start, stop + 1, step))
else:
range_params[key] = list(range(start, stop, step))
else:
# This is a fixed value
range_params[key] = [value]
# Generate all combinations (Cartesian product)
if range_params:
import itertools
param_names = list(range_params.keys())
param_values = [range_params[name] for name in param_names]
for values in itertools.product(*param_values):
params = dict(zip(param_names, values))
spec = template.format(**params)
all_specs.append(spec)
else:
# No parameters, just use template as-is
all_specs.append(template)
return all_specs
def main():
parser = argparse.ArgumentParser(
description="Universal vLLM attention benchmark",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=__doc__,
)
# Config file
parser.add_argument(
"--config",
help="Path to YAML config file (overrides other args)",
)
# Backend selection
parser.add_argument(
"--backends",
nargs="+",
help="Backends to benchmark (flash, triton, flashinfer, cutlass_mla, "
"flashinfer_mla, flashattn_mla, flashmla)",
)
parser.add_argument(
"--backend",
help="Single backend (alternative to --backends)",
)
# Batch specifications
parser.add_argument(
"--batch-specs",
nargs="+",
default=["q2k", "8q1s1k"],
help="Batch specifications using extended grammar",
)
# Model config
parser.add_argument("--num-layers", type=int, default=10, help="Number of layers")
parser.add_argument("--head-dim", type=int, default=128, help="Head dimension")
parser.add_argument("--num-q-heads", type=int, default=32, help="Query heads")
parser.add_argument("--num-kv-heads", type=int, default=8, help="KV heads")
parser.add_argument("--block-size", type=int, default=16, help="Block size")
# Benchmark settings
parser.add_argument("--device", default="cuda:0", help="Device")
parser.add_argument("--repeats", type=int, default=1, help="Repetitions")
parser.add_argument("--warmup-iters", type=int, default=3, help="Warmup iterations")
parser.add_argument("--profile-memory", action="store_true", help="Profile memory")
# Parameter sweep (use YAML config for advanced sweeps)
parser.add_argument(
"--sweep-param",
help="Parameter name to sweep (e.g., num_kv_splits, reorder_batch_threshold)",
)
parser.add_argument(
"--sweep-values",
type=int,
nargs="+",
help="Values to sweep for the parameter",
)
# Output
parser.add_argument("--output-csv", help="Save to CSV")
parser.add_argument("--output-json", help="Save to JSON")
args = parser.parse_args()
console = Console()
console.print("[bold cyan]vLLM Attention Benchmark[/]")
# Load config from YAML if provided
if args.config:
console.print(f"[yellow]Loading config from: {args.config}[/]")
yaml_config = load_config_from_yaml(args.config)
# Show description if available
if "description" in yaml_config:
console.print(f"[dim]{yaml_config['description']}[/]")
# Override args with YAML values, but CLI args take precedence
# Check if CLI provided backends (they would be non-None and not default)
cli_backends_provided = args.backends is not None or args.backend is not None
# Backend(s) - only use YAML if CLI didn't specify
if not cli_backends_provided:
if "backend" in yaml_config:
args.backend = yaml_config["backend"]
args.backends = None
elif "backends" in yaml_config:
args.backends = yaml_config["backends"]
args.backend = None
# Check for special modes
if "mode" in yaml_config:
args.mode = yaml_config["mode"]
else:
args.mode = None
# Batch specs and sizes
# Support both explicit batch_specs and generated batch_spec_ranges
if "batch_spec_ranges" in yaml_config:
# Generate batch specs from ranges
generated_specs = generate_batch_specs_from_ranges(
yaml_config["batch_spec_ranges"]
)
# Combine with any explicit batch_specs
if "batch_specs" in yaml_config:
args.batch_specs = yaml_config["batch_specs"] + generated_specs
else:
args.batch_specs = generated_specs
console.print(
f"[dim]Generated {len(generated_specs)} batch specs from ranges[/]"
)
elif "batch_specs" in yaml_config:
args.batch_specs = yaml_config["batch_specs"]
if "batch_sizes" in yaml_config:
args.batch_sizes = yaml_config["batch_sizes"]
else:
args.batch_sizes = None
# Model config
if "model" in yaml_config:
model = yaml_config["model"]
args.num_layers = model.get("num_layers", args.num_layers)
args.head_dim = model.get("head_dim", args.head_dim)
args.num_q_heads = model.get("num_q_heads", args.num_q_heads)
args.num_kv_heads = model.get("num_kv_heads", args.num_kv_heads)
args.block_size = model.get("block_size", args.block_size)
# Benchmark settings (top-level keys)
if "device" in yaml_config:
args.device = yaml_config["device"]
if "repeats" in yaml_config:
args.repeats = yaml_config["repeats"]
if "warmup_iters" in yaml_config:
args.warmup_iters = yaml_config["warmup_iters"]
if "profile_memory" in yaml_config:
args.profile_memory = yaml_config["profile_memory"]
# Parameter sweep configuration
if "parameter_sweep" in yaml_config:
sweep_config = yaml_config["parameter_sweep"]
args.parameter_sweep = ParameterSweep(
param_name=sweep_config["param_name"],
values=sweep_config["values"],
include_auto=sweep_config.get("include_auto", False),
label_format=sweep_config.get(
"label_format", "{backend}_{param_name}_{value}"
),
)
else:
args.parameter_sweep = None
# Model parameter sweep configuration
if "model_parameter_sweep" in yaml_config:
sweep_config = yaml_config["model_parameter_sweep"]
args.model_parameter_sweep = ModelParameterSweep(
param_name=sweep_config["param_name"],
values=sweep_config["values"],
label_format=sweep_config.get(
"label_format", "{backend}_{param_name}_{value}"
),
)
else:
args.model_parameter_sweep = None
# Output
if "output" in yaml_config:
output = yaml_config["output"]
if "csv" in output and not args.output_csv:
args.output_csv = output["csv"]
if "json" in output and not args.output_json:
args.output_json = output["json"]
console.print()
# Handle CLI-based parameter sweep (if not from YAML)
if (
(not hasattr(args, "parameter_sweep") or args.parameter_sweep is None)
and args.sweep_param
and args.sweep_values
):
args.parameter_sweep = ParameterSweep(
param_name=args.sweep_param,
values=args.sweep_values,
include_auto=False,
label_format="{backend}_{param_name}_{value}",
)
# Determine backends
backends = args.backends or ([args.backend] if args.backend else ["flash"])
console.print(f"Backends: {', '.join(backends)}")
console.print(f"Batch specs: {', '.join(args.batch_specs)}")
console.print()
# Run benchmarks
all_results = []
# Handle special mode: decode_vs_prefill comparison
if hasattr(args, "mode") and args.mode == "decode_vs_prefill":
console.print("[yellow]Mode: Decode vs Prefill pipeline comparison[/]")
console.print(
"[dim]For each query length, testing both decode and prefill pipelines[/]"
)
console.print("[dim]Using batched execution for optimal performance[/]")
# Extract batch sizes from config
batch_sizes = getattr(args, "batch_sizes", [1])
backend = backends[0] # Use first backend (should only be one)
# Calculate total benchmarks
total = len(batch_sizes)
with tqdm(total=total, desc="Benchmarking") as pbar:
for batch_size in batch_sizes:
# Prepare all configs for this batch size
configs_with_thresholds = []
for spec in args.batch_specs:
# Parse the batch spec to get query length
requests = parse_batch_spec(spec)
if not requests:
console.print(
f"[red]Error: Could not parse batch spec '{spec}'[/]"
)
continue
# Get query length from first request
query_length = requests[0].q_len
# Create batch spec for this batch size
# For batch_size > 1, we need to prepend the count
batch_spec = f"{batch_size}{spec}" if batch_size > 1 else spec
# Create base config (without backend name)
base_config = BenchmarkConfig(
backend=backend, # Will be overridden later
batch_spec=batch_spec,
num_layers=args.num_layers,
head_dim=args.head_dim,
num_q_heads=args.num_q_heads,
num_kv_heads=args.num_kv_heads,
block_size=args.block_size,
device=args.device,
repeats=args.repeats,
warmup_iters=args.warmup_iters,
profile_memory=args.profile_memory,
)
# Add decode pipeline config
decode_threshold = query_length
config_decode = replace(
base_config,
backend=f"{backend}_decode_qlen{query_length}_bs{batch_size}",
)
configs_with_thresholds.append((config_decode, decode_threshold))
# Add prefill pipeline config if query_length > 1
if query_length > 1:
prefill_threshold = query_length - 1
config_prefill = replace(
base_config,
backend=f"{backend}_prefill_qlen{query_length}"
f"_bs{batch_size}",
)
configs_with_thresholds.append(
(config_prefill, prefill_threshold)
)
# Run all benchmarks for this batch size in one go (batched mode)
try:
from mla_runner import run_mla_benchmark as run_mla
# Use batched API: pass list of (config, threshold) tuples
timing_results = run_mla(backend, configs_with_thresholds)
# Create BenchmarkResult objects from timing results
for (config, _), timing in zip(
configs_with_thresholds, timing_results
):
result = BenchmarkResult(
config=config,
mean_time=timing["mean"],
std_time=timing["std"],
min_time=timing["min"],
max_time=timing["max"],
throughput_tokens_per_sec=timing.get("throughput", None),
)
all_results.append(result)
except Exception as e:
import traceback
console.print(
f"[red]Error running batched benchmarks for "
f"batch_size={batch_size}: {e}[/]"
)
console.print("[red]Traceback:[/]")
traceback.print_exc()
# Add error results for all configs
for config, _ in configs_with_thresholds:
result = BenchmarkResult(
config=config,
mean_time=float("inf"),
std_time=0,
min_time=float("inf"),
max_time=float("inf"),
error=str(e),
)
all_results.append(result)
pbar.update(1)
# Display decode vs prefill results
console.print("\n[bold green]Decode vs Prefill Results:[/]")
# Group by batch size
by_batch_size = {}
for r in all_results:
if r.success:
# Extract batch size from backend name
parts = r.config.backend.split("_")
bs_part = [p for p in parts if p.startswith("bs")]
if bs_part:
bs = int(bs_part[0][2:])
if bs not in by_batch_size:
by_batch_size[bs] = []
by_batch_size[bs].append(r)
# For each batch size, analyze crossover point
for bs in sorted(by_batch_size.keys()):
console.print(f"\n[bold cyan]Batch size: {bs}[/]")
results = by_batch_size[bs]
# Group by query length
by_qlen = {}
for r in results:
parts = r.config.backend.split("_")
qlen_part = [p for p in parts if p.startswith("qlen")]
if qlen_part:
qlen = int(qlen_part[0][4:])
if qlen not in by_qlen:
by_qlen[qlen] = {}
pipeline = "decode" if "decode" in r.config.backend else "prefill"
by_qlen[qlen][pipeline] = r
# Find crossover point
last_decode_faster = None
for qlen in sorted(by_qlen.keys()):
pipelines = by_qlen[qlen]
if "decode" in pipelines and "prefill" in pipelines:
decode_time = pipelines["decode"].mean_time
prefill_time = pipelines["prefill"].mean_time
faster = "decode" if decode_time < prefill_time else "prefill"
speedup = (
prefill_time / decode_time
if decode_time < prefill_time
else decode_time / prefill_time
)
console.print(
f" qlen={qlen:3d}: decode={decode_time:.6f}s, "
f"prefill={prefill_time:.6f}s -> "
f"[bold]{faster}[/] ({speedup:.2f}x)"
)
if faster == "decode":
last_decode_faster = qlen
if last_decode_faster is not None:
optimal_threshold = last_decode_faster
console.print(
f"\n [bold green]Optimal threshold for batch_size={bs}: "
f"{optimal_threshold}[/]"
)
console.print(
f" [dim](Use decode pipeline for query_length <= "
f"{optimal_threshold})[/]"
)
else:
console.print(
f"\n [yellow]Prefill always faster for batch_size={bs}[/]"
)
# Handle model parameter sweep mode
elif hasattr(args, "model_parameter_sweep") and args.model_parameter_sweep:
# Model parameter sweep
base_config_args = {
"num_layers": args.num_layers,
"head_dim": args.head_dim,
"num_q_heads": args.num_q_heads,
"num_kv_heads": args.num_kv_heads,
"block_size": args.block_size,
"device": args.device,
"repeats": args.repeats,
"warmup_iters": args.warmup_iters,
"profile_memory": args.profile_memory,
}
all_results = run_model_parameter_sweep(
backends,
args.batch_specs,
base_config_args,
args.model_parameter_sweep,
console,
)
# Handle parameter sweep mode (unified)
elif hasattr(args, "parameter_sweep") and args.parameter_sweep:
# Unified parameter sweep
base_config_args = {
"num_layers": args.num_layers,
"head_dim": args.head_dim,
"num_q_heads": args.num_q_heads,
"num_kv_heads": args.num_kv_heads,
"block_size": args.block_size,
"device": args.device,
"repeats": args.repeats,
"warmup_iters": args.warmup_iters,
"profile_memory": args.profile_memory,
}
all_results = run_parameter_sweep(
backends, args.batch_specs, base_config_args, args.parameter_sweep, console
)
else:
# Normal mode: compare backends
total = len(backends) * len(args.batch_specs)
with tqdm(total=total, desc="Benchmarking") as pbar:
for spec in args.batch_specs:
for backend in backends:
config = BenchmarkConfig(
backend=backend,
batch_spec=spec,
num_layers=args.num_layers,
head_dim=args.head_dim,
num_q_heads=args.num_q_heads,
num_kv_heads=args.num_kv_heads,
block_size=args.block_size,
device=args.device,
repeats=args.repeats,
warmup_iters=args.warmup_iters,
profile_memory=args.profile_memory,
)
result = run_benchmark(config)
all_results.append(result)
if not result.success:
console.print(f"[red]Error {backend} {spec}: {result.error}[/]")
pbar.update(1)
# Display results
console.print("\n[bold green]Results:[/]")
formatter = ResultsFormatter(console)
formatter.print_table(all_results, backends)
# Save results
if all_results:
formatter = ResultsFormatter(console)
if args.output_csv:
formatter.save_csv(all_results, args.output_csv)
if args.output_json:
formatter.save_json(all_results, args.output_json)
if __name__ == "__main__":
main()
| {
"repo_id": "vllm-project/vllm",
"file_path": "benchmarks/attention_benchmarks/benchmark.py",
"license": "Apache License 2.0",
"lines": 749,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:benchmarks/attention_benchmarks/mla_runner.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
MLA benchmark runner - shared utilities for MLA benchmarks.
This module provides helpers for running MLA backends without
needing full VllmConfig integration.
"""
import numpy as np
import torch
from batch_spec import parse_batch_spec
from common import (
BenchmarkResult,
MockHfConfig,
MockIndexer,
MockKVBProj,
MockLayer,
setup_mla_dims,
)
from vllm.config import (
CacheConfig,
CompilationConfig,
ModelConfig,
ParallelConfig,
SchedulerConfig,
VllmConfig,
set_current_vllm_config,
)
# ============================================================================
# VllmConfig Creation
# ============================================================================
def _add_mock_methods_to_model_config(model_config: ModelConfig) -> None:
"""
Add mock methods for layer-specific queries to ModelConfig.
These methods are needed by metadata builders but aren't normally
present on ModelConfig when used in benchmark contexts.
"""
import types
model_config.get_num_layers = types.MethodType(lambda self: 1, model_config)
model_config.get_sliding_window_for_layer = types.MethodType(
lambda self, _i: None, model_config
)
model_config.get_logits_soft_cap_for_layer = types.MethodType(
lambda self, _i: None, model_config
)
model_config.get_sm_scale_for_layer = types.MethodType(
lambda self, _i: 1.0 / model_config.get_head_size() ** 0.5, model_config
)
def create_minimal_vllm_config(
model_name: str = "deepseek-v3",
block_size: int = 128,
max_num_seqs: int = 256,
mla_dims: dict | None = None,
index_topk: int | None = None,
) -> VllmConfig:
"""
Create minimal VllmConfig for MLA benchmarks.
Args:
model_name: Model name (deepseek-v2, deepseek-v3, etc.) - used if mla_dims not
provided
block_size: KV cache block size
max_num_seqs: Maximum number of sequences
mla_dims: Optional custom MLA dimensions dict. If not provided, uses
setup_mla_dims(model_name)
index_topk: Optional topk value for sparse MLA backends. If provided,
the config will include index_topk for sparse attention.
Returns:
VllmConfig for benchmarking
"""
# Get MLA dimensions - use provided or load from model name
if mla_dims is None:
mla_dims = setup_mla_dims(model_name)
# Create mock HF config first (avoids downloading from HuggingFace)
mock_hf_config = MockHfConfig(mla_dims, index_topk=index_topk)
# Create a temporary minimal config.json to avoid HF downloads
# This ensures consistent ModelConfig construction without network access
import json
import os
import shutil
import tempfile
minimal_config = {
"architectures": ["DeepseekV2ForCausalLM"],
"model_type": "deepseek_v2",
"num_attention_heads": mla_dims["num_q_heads"],
"num_key_value_heads": mla_dims["num_kv_heads"],
"hidden_size": mla_dims["head_dim"] * mla_dims["num_q_heads"],
"torch_dtype": "bfloat16",
"max_position_embeddings": 163840, # DeepSeek V3 default
"rope_theta": 10000.0,
"vocab_size": 128256,
}
# Create temporary directory with config.json
temp_dir = tempfile.mkdtemp(prefix="vllm_bench_")
config_path = os.path.join(temp_dir, "config.json")
with open(config_path, "w") as f:
json.dump(minimal_config, f)
try:
# Create model config using local path - no HF downloads
model_config = ModelConfig(
model=temp_dir, # Use local temp directory
tokenizer=None,
tokenizer_mode="auto",
trust_remote_code=True,
dtype="bfloat16",
seed=0,
max_model_len=32768,
quantization=None,
enforce_eager=False,
max_logprobs=20,
disable_sliding_window=False,
skip_tokenizer_init=True,
served_model_name=None,
limit_mm_per_prompt=None,
config_format="auto",
)
finally:
# Clean up temporary directory
shutil.rmtree(temp_dir, ignore_errors=True)
# Override with our mock config
model_config.hf_config = mock_hf_config
model_config.hf_text_config = mock_hf_config
# Add mock methods for layer-specific queries
_add_mock_methods_to_model_config(model_config)
# Create sub-configs
cache_config = CacheConfig(
block_size=block_size,
gpu_memory_utilization=0.9,
swap_space=0,
cache_dtype="auto",
enable_prefix_caching=False,
)
scheduler_config = SchedulerConfig(
max_num_seqs=max_num_seqs,
max_num_batched_tokens=8192,
max_model_len=32768,
is_encoder_decoder=False,
enable_chunked_prefill=True,
)
parallel_config = ParallelConfig(
tensor_parallel_size=1,
)
compilation_config = CompilationConfig()
return VllmConfig(
model_config=model_config,
cache_config=cache_config,
parallel_config=parallel_config,
scheduler_config=scheduler_config,
compilation_config=compilation_config,
)
# ============================================================================
# Backend Configuration
# ============================================================================
# Backend-specific properties that can't be inferred from the backend class
# Keys are AttentionBackendEnum names (uppercase)
_BACKEND_PROPERTIES = {
"FLASHMLA": {
"query_format": "concat", # Single concatenated tensor (vs tuple)
},
"FLASHMLA_SPARSE": {
"query_format": "concat", # Single concatenated tensor (vs tuple)
},
}
def _get_backend_config(backend: str) -> dict:
"""
Get backend configuration from AttentionBackendEnum.
Uses the registry to get the backend class and extract configuration
from its methods (get_impl_cls, get_builder_cls, is_sparse, etc.).
Args:
backend: Backend name matching AttentionBackendEnum exactly
(e.g., "FLASHMLA_SPARSE")
Returns:
Dict with backend configuration
"""
from vllm.v1.attention.backends.registry import AttentionBackendEnum
try:
backend_enum = AttentionBackendEnum[backend]
backend_class = backend_enum.get_class()
except (KeyError, ValueError) as e:
valid_backends = [e.name for e in AttentionBackendEnum if e.name != "CUSTOM"]
raise ValueError(
f"Unknown backend: {backend}. "
f"Valid MLA backends: {[b for b in valid_backends if 'MLA' in b]}"
) from e
# Get block size from backend class
block_sizes = backend_class.get_supported_kernel_block_sizes()
# Use first supported block size (backends typically support one for MLA)
block_size = block_sizes[0] if block_sizes else None
if hasattr(block_size, "value"):
# Handle MultipleOf enum
block_size = None
# Check if sparse via class method if available
is_sparse = getattr(backend_class, "is_sparse", lambda: False)()
# Get properties that can't be inferred
props = _BACKEND_PROPERTIES.get(backend, {})
return {
"backend_class": backend_class,
"impl_class": backend_class.get_impl_cls(),
"builder_class": backend_class.get_builder_cls(),
"query_format": props.get("query_format", "tuple"),
"block_size": block_size,
"is_sparse": is_sparse,
}
# ============================================================================
# Metadata Building Helpers
# ============================================================================
def _build_attention_metadata(
requests: list,
block_size: int,
device: torch.device,
builder_instance,
) -> tuple:
"""
Build attention metadata from batch requests.
Args:
requests: List of BatchRequest objects
block_size: KV cache block size
device: Target device
builder_instance: Metadata builder instance
Returns:
Tuple of (metadata, kv_cache_num_blocks)
"""
q_lens = [r.q_len for r in requests]
kv_lens = [r.kv_len for r in requests]
total_q = sum(q_lens)
max_kv = max(kv_lens)
# Build query start locations
q_start_cpu = torch.tensor(
[0] + [sum(q_lens[: i + 1]) for i in range(len(q_lens))],
dtype=torch.int32,
)
q_start_gpu = q_start_cpu.to(device)
# Build sequence lengths
seq_lens_cpu = torch.tensor(kv_lens, dtype=torch.int32)
seq_lens_gpu = seq_lens_cpu.to(device)
# Build num_computed_tokens (context length for each request)
context_lens = [kv_len - q_len for q_len, kv_len in zip(q_lens, kv_lens)]
num_computed_tokens_cpu = torch.tensor(context_lens, dtype=torch.int32)
# Build block table
num_blocks_per_req = [(kv + block_size - 1) // block_size for kv in kv_lens]
max_num_blocks = max(num_blocks_per_req)
block_table_cpu = np.zeros((len(requests), max_num_blocks), dtype=np.int32)
current_block = 0
for i, num_blocks in enumerate(num_blocks_per_req):
for j in range(num_blocks):
block_table_cpu[i, j] = current_block
current_block += 1
block_table_gpu = torch.from_numpy(block_table_cpu).to(device)
# Build slot mapping
slot_mapping_list = []
for i, (q_len, kv_len, num_blocks) in enumerate(
zip(q_lens, kv_lens, num_blocks_per_req)
):
context_len = kv_len - q_len
for j in range(q_len):
token_kv_idx = context_len + j
block_idx = token_kv_idx // block_size
offset_in_block = token_kv_idx % block_size
global_block_id = block_table_cpu[i, block_idx]
slot_id = global_block_id * block_size + offset_in_block
slot_mapping_list.append(slot_id)
slot_mapping = torch.tensor(slot_mapping_list, dtype=torch.int64, device=device)
# Create CommonAttentionMetadata
from vllm.v1.attention.backends.utils import CommonAttentionMetadata
common_attn_metadata = CommonAttentionMetadata(
num_reqs=len(requests),
max_query_len=max(q_lens),
max_seq_len=max_kv,
num_actual_tokens=total_q,
query_start_loc=q_start_gpu,
query_start_loc_cpu=q_start_cpu,
seq_lens=seq_lens_gpu,
_seq_lens_cpu=seq_lens_cpu,
_num_computed_tokens_cpu=num_computed_tokens_cpu,
slot_mapping=slot_mapping,
block_table_tensor=block_table_gpu,
dcp_local_seq_lens=None,
)
# Use the production build() method
metadata = builder_instance.build(
common_prefix_len=0,
common_attn_metadata=common_attn_metadata,
fast_build=False,
)
return metadata, current_block
def _create_input_tensors(
total_q: int,
mla_dims: dict,
query_format: str,
device: torch.device,
dtype: torch.dtype,
):
"""
Create input tensors for both decode and prefill modes.
MLA requires different tensor formats for decode vs prefill:
- Decode: Uses kv_lora_rank (512) dimension
- Prefill: Uses qk_nope_head_dim (128) to stay under FlashAttention's 256 limit
Args:
total_q: Total number of query tokens
mla_dims: MLA dimension configuration
query_format: Either "tuple" or "concat"
device: Target device
dtype: Tensor dtype
Returns:
Tuple of (decode_inputs, prefill_inputs)
- decode_inputs: Query tensor(s) for decode mode
- prefill_inputs: Dict with 'q', 'k_c_normed', 'k_pe', 'k_scale' for prefill
"""
if query_format == "tuple":
# Decode mode format: (q_nope, q_pe) where q_nope has kv_lora_rank dim
q_nope_decode = torch.randn(
total_q,
mla_dims["num_q_heads"],
mla_dims["kv_lora_rank"],
device=device,
dtype=dtype,
)
q_pe = torch.randn(
total_q,
mla_dims["num_q_heads"],
mla_dims["qk_rope_head_dim"],
device=device,
dtype=dtype,
)
decode_inputs = (q_nope_decode, q_pe)
# For prefill, we need q with qk_nope_head_dim instead of kv_lora_rank
q_nope_prefill = torch.randn(
total_q,
mla_dims["num_q_heads"],
mla_dims["qk_nope_head_dim"],
device=device,
dtype=dtype,
)
prefill_q = torch.cat([q_nope_prefill, q_pe], dim=-1)
else: # concat
decode_inputs = torch.randn(
total_q,
mla_dims["num_q_heads"],
mla_dims["kv_lora_rank"] + mla_dims["qk_rope_head_dim"],
device=device,
dtype=dtype,
)
# For prefill with concat format
prefill_q = torch.randn(
total_q,
mla_dims["num_q_heads"],
mla_dims["qk_nope_head_dim"] + mla_dims["qk_rope_head_dim"],
device=device,
dtype=dtype,
)
# Create additional inputs needed for prefill forward
k_c_normed = torch.randn(
total_q,
mla_dims["kv_lora_rank"],
device=device,
dtype=dtype,
)
k_pe = torch.randn(
total_q,
1, # Single head for MLA
mla_dims["qk_rope_head_dim"],
device=device,
dtype=dtype,
)
k_scale = torch.ones(1, device=device, dtype=torch.float32)
output = torch.zeros(
total_q,
mla_dims["num_q_heads"] * mla_dims["v_head_dim"],
device=device,
dtype=dtype,
)
prefill_inputs = {
"q": prefill_q,
"k_c_normed": k_c_normed,
"k_pe": k_pe,
"k_scale": k_scale,
"output": output,
}
return decode_inputs, prefill_inputs
# ============================================================================
# Backend Initialization
# ============================================================================
def _create_backend_impl(
backend_cfg: dict,
mla_dims: dict,
vllm_config: VllmConfig,
device: torch.device,
max_num_tokens: int = 8192,
index_topk: int | None = None,
):
"""
Create backend implementation instance.
Args:
backend_cfg: Backend configuration dict from _get_backend_config()
mla_dims: MLA dimension configuration
vllm_config: VllmConfig instance
device: Target device
max_num_tokens: Maximum number of tokens for sparse indexer buffer
index_topk: Topk value for sparse MLA backends
Returns:
Tuple of (impl, layer, builder_instance, indexer)
"""
# Get classes from backend config (already resolved by _get_backend_config)
impl_class = backend_cfg["impl_class"]
builder_class = backend_cfg["builder_class"]
# Calculate scale
scale = 1.0 / np.sqrt(mla_dims["qk_nope_head_dim"] + mla_dims["qk_rope_head_dim"])
# Create mock kv_b_proj layer for prefill mode
mock_kv_b_proj = MockKVBProj(
num_heads=mla_dims["num_q_heads"],
qk_nope_head_dim=mla_dims["qk_nope_head_dim"],
v_head_dim=mla_dims["v_head_dim"],
)
# Create indexer for sparse backends
indexer = None
if backend_cfg.get("is_sparse", False):
if index_topk is None:
index_topk = 2048 # Default topk for sparse MLA
indexer = MockIndexer(
max_num_tokens=max_num_tokens,
topk_tokens=index_topk,
device=device,
)
# Build impl kwargs
impl_kwargs = {
"num_heads": mla_dims["num_q_heads"],
"head_size": mla_dims["head_dim"],
"scale": scale,
"num_kv_heads": mla_dims["num_kv_heads"],
"alibi_slopes": None,
"sliding_window": None,
"kv_cache_dtype": "auto",
"logits_soft_cap": None,
"attn_type": "decoder",
"kv_sharing_target_layer_name": None,
"q_lora_rank": None,
"kv_lora_rank": mla_dims["kv_lora_rank"],
"qk_nope_head_dim": mla_dims["qk_nope_head_dim"],
"qk_rope_head_dim": mla_dims["qk_rope_head_dim"],
"qk_head_dim": mla_dims["qk_nope_head_dim"] + mla_dims["qk_rope_head_dim"],
"v_head_dim": mla_dims["v_head_dim"],
"kv_b_proj": mock_kv_b_proj,
}
# Add indexer for sparse backends
if indexer is not None:
impl_kwargs["indexer"] = indexer
# Create impl
impl = impl_class(**impl_kwargs)
# Initialize DCP attributes
if not hasattr(impl, "dcp_world_size") or impl.dcp_world_size in (None, -1):
impl.dcp_world_size = 1
impl.dcp_rank = 0
# Create KV cache spec for MockLayer
from vllm.v1.kv_cache_interface import FullAttentionSpec
kv_cache_spec = FullAttentionSpec(
block_size=backend_cfg["block_size"] or vllm_config.cache_config.block_size,
num_kv_heads=1, # MLA uses 1 KV head
head_size=576, # MLA head dim
dtype=torch.bfloat16,
)
# Create mock layer
layer = MockLayer(device, impl=impl, kv_cache_spec=kv_cache_spec)
# Create builder instance if needed
builder_instance = None
if builder_class:
# Populate static_forward_context so builder can find the layer
# MockLayer inherits from AttentionLayerBase, so isinstance checks pass
vllm_config.compilation_config.static_forward_context = {"placeholder": layer}
builder_instance = builder_class(
kv_cache_spec=kv_cache_spec,
layer_names=["placeholder"],
vllm_config=vllm_config,
device=device,
)
return impl, layer, builder_instance, indexer
# ============================================================================
# Config Helpers
# ============================================================================
def _extract_mla_dims_from_config(config) -> dict | None:
"""
Extract MLA dimensions from BenchmarkConfig if all required fields are present.
Args:
config: BenchmarkConfig instance
Returns:
Dict with MLA dimensions if all fields are provided, None otherwise
"""
# Check if all MLA-specific fields are provided
if all(
[
config.kv_lora_rank is not None,
config.qk_nope_head_dim is not None,
config.qk_rope_head_dim is not None,
config.v_head_dim is not None,
]
):
return {
"kv_lora_rank": config.kv_lora_rank,
"qk_nope_head_dim": config.qk_nope_head_dim,
"qk_rope_head_dim": config.qk_rope_head_dim,
"v_head_dim": config.v_head_dim,
"num_q_heads": config.num_q_heads,
"num_kv_heads": config.num_kv_heads,
"head_dim": config.head_dim,
}
# Fallback: if MLA fields not fully specified, try to construct from basic fields
elif config.head_dim == 576:
# This looks like a DeepSeek MLA config, use standard dimensions with custom
# head count
return {
"kv_lora_rank": 512,
"qk_nope_head_dim": 128,
"qk_rope_head_dim": 64,
"v_head_dim": 128,
"num_q_heads": config.num_q_heads,
"num_kv_heads": config.num_kv_heads,
"head_dim": config.head_dim,
}
return None
# ============================================================================
# Benchmark Execution
# ============================================================================
def _run_single_benchmark(
config,
impl,
layer,
builder_instance,
backend_cfg: dict,
mla_dims: dict,
device: torch.device,
indexer=None,
) -> BenchmarkResult:
"""
Run a single benchmark iteration.
Args:
config: BenchmarkConfig instance
impl: Backend implementation instance
layer: MockLayer instance
builder_instance: Metadata builder instance
backend_cfg: Backend configuration dict
mla_dims: MLA dimension configuration
device: Target device
indexer: Optional MockIndexer for sparse backends
Returns:
BenchmarkResult with timing statistics
"""
# Parse batch spec
requests = parse_batch_spec(config.batch_spec)
q_lens = [r.q_len for r in requests]
kv_lens = [r.kv_len for r in requests]
total_q = sum(q_lens)
max_kv_len = max(kv_lens)
# Determine block size
block_size = backend_cfg["block_size"] or config.block_size
# Build metadata
metadata, num_blocks = _build_attention_metadata(
requests, block_size, device, builder_instance
)
# Create KV cache
kv_cache = torch.zeros(
num_blocks,
block_size,
mla_dims["kv_lora_rank"] + mla_dims["qk_rope_head_dim"],
device=device,
dtype=torch.bfloat16,
)
# Create input tensors for both decode and prefill modes
decode_inputs, prefill_inputs = _create_input_tensors(
total_q,
mla_dims,
backend_cfg["query_format"],
device,
torch.bfloat16,
)
# Fill indexer with random indices for sparse backends
is_sparse = backend_cfg.get("is_sparse", False)
if is_sparse and indexer is not None:
indexer.fill_random_indices(total_q, max_kv_len)
# Determine which forward method to use
if is_sparse:
# Sparse backends use forward_mqa
forward_fn = lambda: impl.forward_mqa(decode_inputs, kv_cache, metadata, layer)
elif metadata.decode is not None:
forward_fn = lambda: impl._forward_decode(
decode_inputs, kv_cache, metadata, layer
)
elif metadata.prefill is not None:
forward_fn = lambda: impl._forward_prefill(
prefill_inputs["q"],
prefill_inputs["k_c_normed"],
prefill_inputs["k_pe"],
kv_cache,
metadata,
prefill_inputs["k_scale"],
prefill_inputs["output"],
)
else:
raise RuntimeError("Metadata has neither decode nor prefill metadata")
# Warmup
for _ in range(config.warmup_iters):
forward_fn()
torch.cuda.synchronize()
# Benchmark
times = []
for _ in range(config.repeats):
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
for _ in range(config.num_layers):
forward_fn()
end.record()
torch.cuda.synchronize()
elapsed_ms = start.elapsed_time(end)
times.append(elapsed_ms / 1000.0 / config.num_layers)
mean_time = float(np.mean(times))
return BenchmarkResult(
config=config,
mean_time=mean_time,
std_time=float(np.std(times)),
min_time=float(np.min(times)),
max_time=float(np.max(times)),
throughput_tokens_per_sec=total_q / mean_time if mean_time > 0 else 0,
)
def _run_mla_benchmark_batched(
backend: str,
configs_with_params: list[tuple], # [(config, threshold, num_splits), ...]
index_topk: int = 2048,
) -> list[BenchmarkResult]:
"""
Unified batched MLA benchmark runner for all backends.
Works for: flashattn_mla, flashmla, flashinfer_mla, cutlass_mla,
flashinfer_mla_sparse, flashmla_sparse
This function reuses backend initialization across multiple benchmarks
to avoid setup/teardown overhead.
Args:
backend: Backend name
configs_with_params: List of (config, threshold, num_splits) tuples
- threshold: reorder_batch_threshold (FlashAttn/FlashMLA only)
- num_splits: num_kv_splits (CUTLASS only)
index_topk: Topk value for sparse MLA backends (default 2048)
Returns:
List of BenchmarkResult objects
"""
if not configs_with_params:
return []
backend_cfg = _get_backend_config(backend)
device = torch.device(configs_with_params[0][0].device)
torch.cuda.set_device(device)
# Determine block size
config_block_size = configs_with_params[0][0].block_size
block_size = backend_cfg["block_size"] or config_block_size
# Extract MLA dimensions from the first config
first_config = configs_with_params[0][0]
mla_dims = _extract_mla_dims_from_config(first_config)
# If config didn't provide MLA dims, fall back to default model
if mla_dims is None:
mla_dims = setup_mla_dims("deepseek-v3")
# Determine if this is a sparse backend
is_sparse = backend_cfg.get("is_sparse", False)
# Create and set vLLM config for MLA (reused across all benchmarks)
vllm_config = create_minimal_vllm_config(
model_name="deepseek-v3", # Used only for model path
block_size=block_size,
mla_dims=mla_dims, # Use custom dims from config or default
index_topk=index_topk if is_sparse else None,
)
results = []
with set_current_vllm_config(vllm_config):
# Create backend impl, layer, builder, and indexer (reused across benchmarks)
impl, layer, builder_instance, indexer = _create_backend_impl(
backend_cfg,
mla_dims,
vllm_config,
device,
index_topk=index_topk if is_sparse else None,
)
# Run each benchmark with the shared impl
for config, threshold, num_splits in configs_with_params:
# Set threshold for this benchmark (FlashAttn/FlashMLA only)
original_threshold = None
if threshold is not None and builder_instance:
original_threshold = builder_instance.reorder_batch_threshold
builder_instance.reorder_batch_threshold = threshold
# Set num_splits for CUTLASS
original_num_splits = None
if num_splits is not None and hasattr(impl, "_num_kv_splits"):
original_num_splits = impl._num_kv_splits
impl._num_kv_splits = num_splits
try:
result = _run_single_benchmark(
config,
impl,
layer,
builder_instance,
backend_cfg,
mla_dims,
device,
indexer=indexer,
)
results.append(result)
finally:
# Restore original threshold
if original_threshold is not None:
builder_instance.reorder_batch_threshold = original_threshold
# Restore original num_splits
if original_num_splits is not None:
impl._num_kv_splits = original_num_splits
return results
# ============================================================================
# Public API
# ============================================================================
def run_mla_benchmark(
backend: str,
config,
reorder_batch_threshold: int | None = None,
num_kv_splits: int | None = None,
index_topk: int = 2048,
) -> BenchmarkResult | list[BenchmarkResult]:
"""
Unified MLA benchmark runner for all backends.
Works for: flashattn_mla, flashmla, flashinfer_mla, cutlass_mla,
flashinfer_mla_sparse, flashmla_sparse
Always uses batched execution internally for optimal performance.
Args:
backend: Backend name (flashattn_mla, flashmla, flashinfer_mla, cutlass_mla,
flashinfer_mla_sparse, flashmla_sparse)
config: BenchmarkConfig or list of (BenchmarkConfig, param) tuples
reorder_batch_threshold: Threshold override for FlashAttn/FlashMLA
(single config mode only)
num_kv_splits: Number of KV splits for CUTLASS (single config mode only)
index_topk: Topk value for sparse MLA backends (default 2048)
Returns:
BenchmarkResult (single mode) or list of BenchmarkResult (batched mode)
"""
# Normalize to batched mode: (config, threshold, num_splits)
if isinstance(config, list):
# Already in batched format
if len(config) > 0 and isinstance(config[0], tuple):
# Format: [(cfg, param), ...] where param is threshold or num_splits
if backend in ("flashattn_mla", "flashmla", "flashmla_sparse"):
configs_with_params = [(cfg, param, None) for cfg, param in config]
else: # cutlass_mla, flashinfer_mla, or sparse backends
configs_with_params = [(cfg, None, param) for cfg, param in config]
else:
# Format: [cfg, ...] - just configs
configs_with_params = [(cfg, None, None) for cfg in config]
return_single = False
else:
# Single config: convert to batched format
configs_with_params = [(config, reorder_batch_threshold, num_kv_splits)]
return_single = True
# Use unified batched execution
results = _run_mla_benchmark_batched(backend, configs_with_params, index_topk)
# Return single result or list based on input
return results[0] if return_single else results
| {
"repo_id": "vllm-project/vllm",
"file_path": "benchmarks/attention_benchmarks/mla_runner.py",
"license": "Apache License 2.0",
"lines": 752,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:benchmarks/attention_benchmarks/runner.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Standard attention benchmark runner - shared utilities for non-MLA benchmarks.
This module provides helpers for running standard attention backends
(FlashAttention, Triton, FlashInfer) with real vLLM integration.
"""
import logging
import types
from contextlib import contextmanager
import numpy as np
import torch
from batch_spec import parse_batch_spec, reorder_for_flashinfer
from common import BenchmarkConfig, BenchmarkResult, MockLayer, get_attention_scale
from vllm.config import (
CacheConfig,
CompilationConfig,
DeviceConfig,
LoadConfig,
ModelConfig,
ParallelConfig,
SchedulerConfig,
VllmConfig,
set_current_vllm_config,
)
from vllm.v1.attention.backends.utils import (
CommonAttentionMetadata,
get_kv_cache_layout,
set_kv_cache_layout,
)
from vllm.v1.kv_cache_interface import FullAttentionSpec
# ============================================================================
# Backend Configuration
# ============================================================================
def _get_backend_config(backend: str) -> dict:
"""
Get backend configuration from AttentionBackendEnum.
Args:
backend: Backend name matching AttentionBackendEnum exactly
(e.g., "FLASH_ATTN", "TRITON_ATTN", "FLASHINFER")
Returns:
Dict with backend_class
"""
from vllm.v1.attention.backends.registry import AttentionBackendEnum
try:
backend_enum = AttentionBackendEnum[backend]
backend_class = backend_enum.get_class()
except (KeyError, ValueError) as e:
valid_backends = [b.name for b in AttentionBackendEnum if b.name != "CUSTOM"]
raise ValueError(
f"Unknown backend: {backend}. Valid backends: {valid_backends}"
) from e
return {"backend_class": backend_class}
@contextmanager
def log_warnings_and_errors_only():
"""Temporarily set vLLM logger to WARNING level."""
logger = logging.getLogger("vllm")
old_level = logger.level
logger.setLevel(logging.WARNING)
try:
yield
finally:
logger.setLevel(old_level)
# ============================================================================
# Metadata Building Helpers
# ============================================================================
def _build_common_attn_metadata(
q_lens: list[int],
kv_lens: list[int],
block_size: int,
device: torch.device,
) -> CommonAttentionMetadata:
"""Build CommonAttentionMetadata from query/kv lengths."""
batch_size = len(q_lens)
total_tokens = sum(q_lens)
query_start_loc = torch.zeros(batch_size + 1, dtype=torch.int32, device=device)
query_start_loc[1:] = torch.tensor(q_lens, dtype=torch.int32, device=device).cumsum(
0
)
query_start_loc_cpu = query_start_loc.cpu()
seq_lens = torch.tensor(kv_lens, dtype=torch.int32, device=device)
max_seq_len = int(seq_lens.max().item())
max_blocks = (max(kv_lens) + block_size - 1) // block_size
num_blocks = batch_size * max_blocks
block_table_tensor = torch.arange(
num_blocks, dtype=torch.int32, device=device
).view(batch_size, max_blocks)
slot_mapping = torch.arange(total_tokens, dtype=torch.int64, device=device)
max_query_len = max(q_lens)
return CommonAttentionMetadata(
query_start_loc=query_start_loc,
query_start_loc_cpu=query_start_loc_cpu,
seq_lens=seq_lens,
num_reqs=batch_size,
num_actual_tokens=total_tokens,
max_query_len=max_query_len,
max_seq_len=max_seq_len,
block_table_tensor=block_table_tensor,
slot_mapping=slot_mapping,
causal=True,
)
def _create_vllm_config(
config: BenchmarkConfig,
max_num_blocks: int,
) -> VllmConfig:
"""Create a VllmConfig for benchmarking with mock model methods."""
model_config = ModelConfig(
model="meta-llama/Meta-Llama-3-8B",
tokenizer="meta-llama/Meta-Llama-3-8B",
trust_remote_code=False,
dtype="auto", # Use model's native dtype
seed=0,
max_model_len=1024,
)
cache_config = CacheConfig(
block_size=config.block_size,
cache_dtype="auto",
swap_space=0,
)
cache_config.num_gpu_blocks = max_num_blocks
cache_config.num_cpu_blocks = 0
parallel_config = ParallelConfig(tensor_parallel_size=1)
scheduler_config = SchedulerConfig(
max_num_seqs=256,
max_num_batched_tokens=8192,
max_model_len=8192,
is_encoder_decoder=False,
enable_chunked_prefill=True,
)
device_config = DeviceConfig()
load_config = LoadConfig()
compilation_config = CompilationConfig()
# Add mock methods for benchmark config values
model_config.get_num_layers = types.MethodType(
lambda self: config.num_layers, model_config
)
model_config.get_sliding_window_for_layer = types.MethodType(
lambda self, i: None, model_config
)
model_config.get_logits_soft_cap_for_layer = types.MethodType(
lambda self, i: 0.0, model_config
)
model_config.get_sm_scale_for_layer = types.MethodType(
lambda self, i: 1.0 / config.head_dim**0.5, model_config
)
model_config.get_num_attention_heads = types.MethodType(
lambda self, parallel_config=None: config.num_q_heads, model_config
)
model_config.get_num_kv_heads = types.MethodType(
lambda self, parallel_config=None: config.num_kv_heads, model_config
)
model_config.get_head_size = types.MethodType(
lambda self: config.head_dim, model_config
)
model_config.get_sliding_window = types.MethodType(lambda self: None, model_config)
return VllmConfig(
model_config=model_config,
cache_config=cache_config,
parallel_config=parallel_config,
scheduler_config=scheduler_config,
device_config=device_config,
load_config=load_config,
compilation_config=compilation_config,
)
# ============================================================================
# Backend Initialization
# ============================================================================
def _create_backend_impl(
backend_cfg: dict,
config: BenchmarkConfig,
device: torch.device,
dtype: torch.dtype,
):
"""Create backend implementation instance."""
backend_class = backend_cfg["backend_class"]
scale = get_attention_scale(config.head_dim)
impl = backend_class.get_impl_cls()(
num_heads=config.num_q_heads,
head_size=config.head_dim,
scale=scale,
num_kv_heads=config.num_kv_heads,
alibi_slopes=None,
sliding_window=None,
kv_cache_dtype="auto",
)
kv_cache_spec = FullAttentionSpec(
block_size=config.block_size,
num_kv_heads=config.num_kv_heads,
head_size=config.head_dim,
dtype=dtype,
)
layer = MockLayer(device, kv_cache_spec=kv_cache_spec)
return backend_class, impl, layer
def _create_metadata_builder(
backend_class,
kv_cache_spec: FullAttentionSpec,
vllm_config: VllmConfig,
device: torch.device,
backend_name: str = "",
):
"""Create metadata builder instance."""
layer_names = ["layer_0"]
builder_cls = backend_class.get_builder_cls()
# Flashinfer needs get_per_layer_parameters mocked since we don't have
# real model layers registered
if backend_name == "FLASHINFER":
import unittest.mock
from vllm.v1.attention.backends.utils import PerLayerParameters
def mock_get_per_layer_parameters(vllm_config, layer_names, impl_cls):
head_size = vllm_config.model_config.get_head_size()
return {
layer_name: PerLayerParameters(
window_left=-1, # No sliding window
logits_soft_cap=0.0, # No soft cap
sm_scale=1.0 / (head_size**0.5), # Standard scale
)
for layer_name in layer_names
}
with unittest.mock.patch(
"vllm.v1.attention.backends.flashinfer.get_per_layer_parameters",
mock_get_per_layer_parameters,
):
return builder_cls(
kv_cache_spec=kv_cache_spec,
layer_names=layer_names,
vllm_config=vllm_config,
device=device,
)
return builder_cls(
kv_cache_spec=kv_cache_spec,
layer_names=layer_names,
vllm_config=vllm_config,
device=device,
)
# ============================================================================
# Tensor Creation Helpers
# ============================================================================
def _create_input_tensors(
config: BenchmarkConfig,
total_q: int,
device: torch.device,
dtype: torch.dtype,
) -> tuple:
"""Create Q, K, V input tensors for all layers."""
q_list = [
torch.randn(
total_q, config.num_q_heads, config.head_dim, device=device, dtype=dtype
)
for _ in range(config.num_layers)
]
k_list = [
torch.randn(
total_q, config.num_kv_heads, config.head_dim, device=device, dtype=dtype
)
for _ in range(config.num_layers)
]
v_list = [
torch.randn(
total_q, config.num_kv_heads, config.head_dim, device=device, dtype=dtype
)
for _ in range(config.num_layers)
]
return q_list, k_list, v_list
def _create_kv_cache(
config: BenchmarkConfig,
max_num_blocks: int,
backend_class,
device: torch.device,
dtype: torch.dtype,
) -> list:
"""Create KV cache tensors for all layers using the backend's methods.
Uses the backend's get_kv_cache_shape() and get_kv_cache_stride_order()
to create the cache with the correct shape and memory layout.
"""
# Get the logical shape from the backend
cache_shape = backend_class.get_kv_cache_shape(
num_blocks=max_num_blocks,
block_size=config.block_size,
num_kv_heads=config.num_kv_heads,
head_size=config.head_dim,
)
# Get the stride order for custom memory layout
try:
stride_order = backend_class.get_kv_cache_stride_order()
assert len(stride_order) == len(cache_shape)
except (AttributeError, NotImplementedError):
stride_order = tuple(range(len(cache_shape)))
# Permute shape to physical layout order
physical_shape = tuple(cache_shape[i] for i in stride_order)
# Compute inverse permutation to get back to logical view
inv_order = [stride_order.index(i) for i in range(len(stride_order))]
cache_list = []
for _ in range(config.num_layers):
# Allocate in physical layout order (contiguous in memory)
cache = torch.zeros(*physical_shape, device=device, dtype=dtype)
# Permute to logical view
cache = cache.permute(*inv_order)
cache_list.append(cache)
return cache_list
# ============================================================================
# Benchmark Execution
# ============================================================================
def _run_single_benchmark(
config: BenchmarkConfig,
impl,
layer,
q_list: list,
k_list: list,
v_list: list,
cache_list: list,
attn_metadata,
device: torch.device,
dtype: torch.dtype,
) -> tuple:
"""Run single benchmark iteration with warmup and timing loop."""
total_q = q_list[0].shape[0]
out = torch.empty(
total_q, config.num_q_heads, config.head_dim, device=device, dtype=dtype
)
# Warmup
for _ in range(config.warmup_iters):
for i in range(config.num_layers):
impl.forward(
layer,
q_list[i],
k_list[i],
v_list[i],
cache_list[i],
attn_metadata,
output=out,
)
torch.cuda.synchronize()
# Benchmark
times = []
for _ in range(config.repeats):
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
for i in range(config.num_layers):
impl.forward(
layer,
q_list[i],
k_list[i],
v_list[i],
cache_list[i],
attn_metadata,
output=out,
)
end.record()
torch.cuda.synchronize()
elapsed_ms = start.elapsed_time(end)
times.append(elapsed_ms / 1000.0 / config.num_layers) # seconds per layer
mem_stats = {}
if config.profile_memory:
mem_stats = {
"allocated_mb": torch.cuda.memory_allocated(device) / 1024**2,
"reserved_mb": torch.cuda.memory_reserved(device) / 1024**2,
}
return times, mem_stats
# ============================================================================
# Public API
# ============================================================================
def run_attention_benchmark(config: BenchmarkConfig) -> BenchmarkResult:
"""
Run standard attention benchmark with real kernels.
Supports: FLASH_ATTN, TRITON_ATTN, FLASHINFER
Args:
config: Benchmark configuration
Returns:
BenchmarkResult with timing and memory statistics
"""
device = torch.device(config.device)
torch.cuda.set_device(device)
backend_cfg = _get_backend_config(config.backend)
requests = parse_batch_spec(config.batch_spec)
if config.backend == "FLASHINFER":
requests = reorder_for_flashinfer(requests)
q_lens = [r.q_len for r in requests]
kv_lens = [r.kv_len for r in requests]
total_q = sum(q_lens)
max_kv = max(kv_lens)
batch_size = len(q_lens)
# Calculate total blocks needed: batch_size * max_blocks_per_request
max_blocks_per_request = (max_kv + config.block_size - 1) // config.block_size
max_num_blocks = batch_size * max_blocks_per_request
# Suppress vLLM logs during setup to reduce spam
with log_warnings_and_errors_only():
# Create vllm_config first - uses model's native dtype via "auto"
vllm_config = _create_vllm_config(config, max_num_blocks)
dtype = vllm_config.model_config.dtype
# Wrap everything in set_current_vllm_config context
# This is required for backends like flashinfer that need global config
with set_current_vllm_config(vllm_config):
backend_class, impl, layer = _create_backend_impl(
backend_cfg, config, device, dtype
)
# Set KV cache layout if the backend requires a specific one
# (e.g., FlashInfer requires HND on SM100/Blackwell for TRTLLM attention)
required_layout = backend_class.get_required_kv_cache_layout()
if required_layout is not None:
set_kv_cache_layout(required_layout)
get_kv_cache_layout.cache_clear()
common_metadata = _build_common_attn_metadata(
q_lens, kv_lens, config.block_size, device
)
kv_cache_spec = FullAttentionSpec(
block_size=config.block_size,
num_kv_heads=config.num_kv_heads,
head_size=config.head_dim,
dtype=dtype,
)
builder = _create_metadata_builder(
backend_class, kv_cache_spec, vllm_config, device, config.backend
)
attn_metadata = builder.build(
common_prefix_len=0,
common_attn_metadata=common_metadata,
)
q_list, k_list, v_list = _create_input_tensors(
config, total_q, device, dtype
)
cache_list = _create_kv_cache(
config, max_num_blocks, backend_class, device, dtype
)
times, mem_stats = _run_single_benchmark(
config,
impl,
layer,
q_list,
k_list,
v_list,
cache_list,
attn_metadata,
device,
dtype,
)
mean_time = np.mean(times)
throughput = total_q / mean_time if mean_time > 0 else 0
return BenchmarkResult(
config=config,
mean_time=mean_time,
std_time=np.std(times),
min_time=np.min(times),
max_time=np.max(times),
throughput_tokens_per_sec=throughput,
memory_allocated_mb=mem_stats.get("allocated_mb"),
memory_reserved_mb=mem_stats.get("reserved_mb"),
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "benchmarks/attention_benchmarks/runner.py",
"license": "Apache License 2.0",
"lines": 448,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/entrypoints/openai/server_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import asyncio
import hashlib
import json
import secrets
import uuid
from argparse import Namespace
from collections.abc import Awaitable
from contextlib import asynccontextmanager
from http import HTTPStatus
import pydantic
from fastapi import FastAPI, HTTPException, Request
from fastapi.exceptions import RequestValidationError
from fastapi.responses import JSONResponse
from starlette.concurrency import iterate_in_threadpool
from starlette.datastructures import URL, Headers, MutableHeaders
from starlette.types import ASGIApp, Message, Receive, Scope, Send
from vllm import envs
from vllm.engine.protocol import EngineClient
from vllm.entrypoints.openai.engine.protocol import ErrorInfo, ErrorResponse
from vllm.entrypoints.utils import sanitize_message
from vllm.exceptions import VLLMValidationError
from vllm.logger import init_logger
from vllm.utils.gc_utils import freeze_gc_heap
logger = init_logger("vllm.entrypoints.openai.server_utils")
class AuthenticationMiddleware:
"""
Pure ASGI middleware that authenticates each request by checking
if the Authorization Bearer token exists and equals anyof "{api_key}".
Notes
-----
There are two cases in which authentication is skipped:
1. The HTTP method is OPTIONS.
2. The request path doesn't start with /v1 (e.g. /health).
"""
def __init__(self, app: ASGIApp, tokens: list[str]) -> None:
self.app = app
self.api_tokens = [hashlib.sha256(t.encode("utf-8")).digest() for t in tokens]
def verify_token(self, headers: Headers) -> bool:
authorization_header_value = headers.get("Authorization")
if not authorization_header_value:
return False
scheme, _, param = authorization_header_value.partition(" ")
if scheme.lower() != "bearer":
return False
param_hash = hashlib.sha256(param.encode("utf-8")).digest()
token_match = False
for token_hash in self.api_tokens:
token_match |= secrets.compare_digest(param_hash, token_hash)
return token_match
def __call__(self, scope: Scope, receive: Receive, send: Send) -> Awaitable[None]:
if scope["type"] not in ("http", "websocket") or scope["method"] == "OPTIONS":
# scope["type"] can be "lifespan" or "startup" for example,
# in which case we don't need to do anything
return self.app(scope, receive, send)
root_path = scope.get("root_path", "")
url_path = URL(scope=scope).path.removeprefix(root_path)
headers = Headers(scope=scope)
# Type narrow to satisfy mypy.
if url_path.startswith("/v1") and not self.verify_token(headers):
response = JSONResponse(content={"error": "Unauthorized"}, status_code=401)
return response(scope, receive, send)
return self.app(scope, receive, send)
class XRequestIdMiddleware:
"""
Middleware the set's the X-Request-Id header for each response
to a random uuid4 (hex) value if the header isn't already
present in the request, otherwise use the provided request id.
"""
def __init__(self, app: ASGIApp) -> None:
self.app = app
def __call__(self, scope: Scope, receive: Receive, send: Send) -> Awaitable[None]:
if scope["type"] not in ("http", "websocket"):
return self.app(scope, receive, send)
# Extract the request headers.
request_headers = Headers(scope=scope)
async def send_with_request_id(message: Message) -> None:
"""
Custom send function to mutate the response headers
and append X-Request-Id to it.
"""
if message["type"] == "http.response.start":
response_headers = MutableHeaders(raw=message["headers"])
request_id = request_headers.get("X-Request-Id", uuid.uuid4().hex)
response_headers.append("X-Request-Id", request_id)
await send(message)
return self.app(scope, receive, send_with_request_id)
def load_log_config(log_config_file: str | None) -> dict | None:
if not log_config_file:
return None
try:
with open(log_config_file) as f:
return json.load(f)
except Exception as e:
logger.warning(
"Failed to load log config from file %s: error %s", log_config_file, e
)
return None
def get_uvicorn_log_config(args: Namespace) -> dict | None:
"""
Get the uvicorn log config based on the provided arguments.
Priority:
1. If log_config_file is specified, use it
2. If disable_access_log_for_endpoints is specified, create a config with
the access log filter
3. Otherwise, return None (use uvicorn defaults)
"""
# First, try to load from file if specified
log_config = load_log_config(args.log_config_file)
if log_config is not None:
return log_config
# If endpoints to filter are specified, create a config with the filter
if args.disable_access_log_for_endpoints:
from vllm.logging_utils import create_uvicorn_log_config
# Parse comma-separated string into list
excluded_paths = [
p.strip()
for p in args.disable_access_log_for_endpoints.split(",")
if p.strip()
]
return create_uvicorn_log_config(
excluded_paths=excluded_paths,
log_level=args.uvicorn_log_level,
)
return None
def _extract_content_from_chunk(chunk_data: dict) -> str:
"""Extract content from a streaming response chunk."""
try:
from vllm.entrypoints.openai.chat_completion.protocol import (
ChatCompletionStreamResponse,
)
from vllm.entrypoints.openai.completion.protocol import (
CompletionStreamResponse,
)
# Try using Completion types for type-safe parsing
if chunk_data.get("object") == "chat.completion.chunk":
chat_response = ChatCompletionStreamResponse.model_validate(chunk_data)
if chat_response.choices and chat_response.choices[0].delta.content:
return chat_response.choices[0].delta.content
elif chunk_data.get("object") == "text_completion":
completion_response = CompletionStreamResponse.model_validate(chunk_data)
if completion_response.choices and completion_response.choices[0].text:
return completion_response.choices[0].text
except pydantic.ValidationError:
# Fallback to manual parsing
if "choices" in chunk_data and chunk_data["choices"]:
choice = chunk_data["choices"][0]
if "delta" in choice and choice["delta"].get("content"):
return choice["delta"]["content"]
elif choice.get("text"):
return choice["text"]
return ""
class SSEDecoder:
"""Robust Server-Sent Events decoder for streaming responses."""
def __init__(self):
self.buffer = ""
self.content_buffer = []
def decode_chunk(self, chunk: bytes) -> list[dict]:
"""Decode a chunk of SSE data and return parsed events."""
import json
try:
chunk_str = chunk.decode("utf-8")
except UnicodeDecodeError:
# Skip malformed chunks
return []
self.buffer += chunk_str
events = []
# Process complete lines
while "\n" in self.buffer:
line, self.buffer = self.buffer.split("\n", 1)
line = line.rstrip("\r") # Handle CRLF
if line.startswith("data: "):
data_str = line[6:].strip()
if data_str == "[DONE]":
events.append({"type": "done"})
elif data_str:
try:
event_data = json.loads(data_str)
events.append({"type": "data", "data": event_data})
except json.JSONDecodeError:
# Skip malformed JSON
continue
return events
def extract_content(self, event_data: dict) -> str:
"""Extract content from event data."""
return _extract_content_from_chunk(event_data)
def add_content(self, content: str) -> None:
"""Add content to the buffer."""
if content:
self.content_buffer.append(content)
def get_complete_content(self) -> str:
"""Get the complete buffered content."""
return "".join(self.content_buffer)
def _log_streaming_response(response, response_body: list) -> None:
"""Log streaming response with robust SSE parsing."""
from starlette.concurrency import iterate_in_threadpool
sse_decoder = SSEDecoder()
chunk_count = 0
def buffered_iterator():
nonlocal chunk_count
for chunk in response_body:
chunk_count += 1
yield chunk
# Parse SSE events from chunk
events = sse_decoder.decode_chunk(chunk)
for event in events:
if event["type"] == "data":
content = sse_decoder.extract_content(event["data"])
sse_decoder.add_content(content)
elif event["type"] == "done":
# Log complete content when done
full_content = sse_decoder.get_complete_content()
if full_content:
# Truncate if too long
if len(full_content) > 2048:
full_content = full_content[:2048] + ""
"...[truncated]"
logger.info(
"response_body={streaming_complete: content=%r, chunks=%d}",
full_content,
chunk_count,
)
else:
logger.info(
"response_body={streaming_complete: no_content, chunks=%d}",
chunk_count,
)
return
response.body_iterator = iterate_in_threadpool(buffered_iterator())
logger.info("response_body={streaming_started: chunks=%d}", len(response_body))
def _log_non_streaming_response(response_body: list) -> None:
"""Log non-streaming response."""
try:
decoded_body = response_body[0].decode()
logger.info("response_body={%s}", decoded_body)
except UnicodeDecodeError:
logger.info("response_body={<binary_data>}")
async def log_response(request: Request, call_next):
response = await call_next(request)
response_body = [section async for section in response.body_iterator]
response.body_iterator = iterate_in_threadpool(iter(response_body))
# Check if this is a streaming response by looking at content-type
content_type = response.headers.get("content-type", "")
is_streaming = content_type == "text/event-stream; charset=utf-8"
# Log response body based on type
if not response_body:
logger.info("response_body={<empty>}")
elif is_streaming:
_log_streaming_response(response, response_body)
else:
_log_non_streaming_response(response_body)
return response
async def http_exception_handler(_: Request, exc: HTTPException):
err = ErrorResponse(
error=ErrorInfo(
message=sanitize_message(exc.detail),
type=HTTPStatus(exc.status_code).phrase,
code=exc.status_code,
)
)
return JSONResponse(err.model_dump(), status_code=exc.status_code)
async def validation_exception_handler(_: Request, exc: RequestValidationError):
param = None
errors = exc.errors()
for error in errors:
if "ctx" in error and "error" in error["ctx"]:
ctx_error = error["ctx"]["error"]
if isinstance(ctx_error, VLLMValidationError):
param = ctx_error.parameter
break
exc_str = str(exc)
errors_str = str(errors)
if errors and errors_str and errors_str != exc_str:
message = f"{exc_str} {errors_str}"
else:
message = exc_str
err = ErrorResponse(
error=ErrorInfo(
message=sanitize_message(message),
type=HTTPStatus.BAD_REQUEST.phrase,
code=HTTPStatus.BAD_REQUEST,
param=param,
)
)
return JSONResponse(err.model_dump(), status_code=HTTPStatus.BAD_REQUEST)
_running_tasks: set[asyncio.Task] = set()
@asynccontextmanager
async def lifespan(app: FastAPI):
try:
if app.state.log_stats:
engine_client: EngineClient = app.state.engine_client
async def _force_log():
while True:
await asyncio.sleep(envs.VLLM_LOG_STATS_INTERVAL)
await engine_client.do_log_stats()
task = asyncio.create_task(_force_log())
_running_tasks.add(task)
task.add_done_callback(_running_tasks.remove)
else:
task = None
# Mark the startup heap as static so that it's ignored by GC.
# Reduces pause times of oldest generation collections.
freeze_gc_heap()
try:
yield
finally:
if task is not None:
task.cancel()
finally:
# Ensure app state including engine ref is gc'd
del app.state
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/entrypoints/openai/server_utils.py",
"license": "Apache License 2.0",
"lines": 312,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/entrypoints/openai/generate/api_router.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from typing import TYPE_CHECKING
from fastapi import FastAPI
if TYPE_CHECKING:
from argparse import Namespace
from starlette.datastructures import State
from vllm.engine.protocol import EngineClient
from vllm.entrypoints.logger import RequestLogger
from vllm.tasks import SupportedTask
else:
RequestLogger = object
def register_generate_api_routers(app: FastAPI):
from vllm.entrypoints.openai.chat_completion.api_router import (
attach_router as register_chat_api_router,
)
register_chat_api_router(app)
from vllm.entrypoints.openai.responses.api_router import (
attach_router as register_responses_api_router,
)
register_responses_api_router(app)
from vllm.entrypoints.openai.completion.api_router import (
attach_router as register_completion_api_router,
)
register_completion_api_router(app)
from vllm.entrypoints.anthropic.api_router import (
attach_router as register_anthropic_api_router,
)
register_anthropic_api_router(app)
async def init_generate_state(
engine_client: "EngineClient",
state: "State",
args: "Namespace",
request_logger: RequestLogger | None,
supported_tasks: tuple["SupportedTask", ...],
):
from vllm.entrypoints.anthropic.serving import AnthropicServingMessages
from vllm.entrypoints.chat_utils import load_chat_template
from vllm.entrypoints.mcp.tool_server import (
DemoToolServer,
MCPToolServer,
ToolServer,
)
from vllm.entrypoints.openai.chat_completion.serving import OpenAIServingChat
from vllm.entrypoints.openai.completion.serving import OpenAIServingCompletion
from vllm.entrypoints.openai.responses.serving import OpenAIServingResponses
from vllm.entrypoints.serve.disagg.serving import ServingTokens
if args.tool_server == "demo":
tool_server: ToolServer | None = DemoToolServer()
assert isinstance(tool_server, DemoToolServer)
await tool_server.init_and_validate()
elif args.tool_server:
tool_server = MCPToolServer()
await tool_server.add_tool_server(args.tool_server)
else:
tool_server = None
resolved_chat_template = load_chat_template(args.chat_template)
state.openai_serving_responses = (
OpenAIServingResponses(
engine_client,
state.openai_serving_models,
request_logger=request_logger,
chat_template=resolved_chat_template,
chat_template_content_format=args.chat_template_content_format,
return_tokens_as_token_ids=args.return_tokens_as_token_ids,
enable_auto_tools=args.enable_auto_tool_choice,
tool_parser=args.tool_call_parser,
tool_server=tool_server,
reasoning_parser=args.structured_outputs_config.reasoning_parser,
enable_prompt_tokens_details=args.enable_prompt_tokens_details,
enable_force_include_usage=args.enable_force_include_usage,
enable_log_outputs=args.enable_log_outputs,
log_error_stack=args.log_error_stack,
)
if "generate" in supported_tasks
else None
)
state.openai_serving_chat = (
OpenAIServingChat(
engine_client,
state.openai_serving_models,
args.response_role,
request_logger=request_logger,
chat_template=resolved_chat_template,
chat_template_content_format=args.chat_template_content_format,
default_chat_template_kwargs=args.default_chat_template_kwargs,
trust_request_chat_template=args.trust_request_chat_template,
return_tokens_as_token_ids=args.return_tokens_as_token_ids,
enable_auto_tools=args.enable_auto_tool_choice,
exclude_tools_when_tool_choice_none=args.exclude_tools_when_tool_choice_none,
tool_parser=args.tool_call_parser,
reasoning_parser=args.structured_outputs_config.reasoning_parser,
enable_prompt_tokens_details=args.enable_prompt_tokens_details,
enable_force_include_usage=args.enable_force_include_usage,
enable_log_outputs=args.enable_log_outputs,
enable_log_deltas=args.enable_log_deltas,
log_error_stack=args.log_error_stack,
)
if "generate" in supported_tasks
else None
)
# Warm up chat template processing to avoid first-request latency
if state.openai_serving_chat is not None:
await state.openai_serving_chat.warmup()
state.openai_serving_completion = (
OpenAIServingCompletion(
engine_client,
state.openai_serving_models,
request_logger=request_logger,
return_tokens_as_token_ids=args.return_tokens_as_token_ids,
enable_prompt_tokens_details=args.enable_prompt_tokens_details,
enable_force_include_usage=args.enable_force_include_usage,
log_error_stack=args.log_error_stack,
)
if "generate" in supported_tasks
else None
)
state.anthropic_serving_messages = (
AnthropicServingMessages(
engine_client,
state.openai_serving_models,
args.response_role,
request_logger=request_logger,
chat_template=resolved_chat_template,
chat_template_content_format=args.chat_template_content_format,
return_tokens_as_token_ids=args.return_tokens_as_token_ids,
enable_auto_tools=args.enable_auto_tool_choice,
tool_parser=args.tool_call_parser,
reasoning_parser=args.structured_outputs_config.reasoning_parser,
enable_prompt_tokens_details=args.enable_prompt_tokens_details,
enable_force_include_usage=args.enable_force_include_usage,
)
if "generate" in supported_tasks
else None
)
state.serving_tokens = (
ServingTokens(
engine_client,
state.openai_serving_models,
request_logger=request_logger,
return_tokens_as_token_ids=args.return_tokens_as_token_ids,
log_error_stack=args.log_error_stack,
enable_prompt_tokens_details=args.enable_prompt_tokens_details,
enable_log_outputs=args.enable_log_outputs,
force_no_detokenize=args.tokens_only,
)
if "generate" in supported_tasks
else None
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/entrypoints/openai/generate/api_router.py",
"license": "Apache License 2.0",
"lines": 149,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/entrypoints/sagemaker/api_router.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import json
from collections.abc import Awaitable, Callable
from http import HTTPStatus
from typing import Any
import model_hosting_container_standards.sagemaker as sagemaker_standards
import pydantic
from fastapi import APIRouter, Depends, FastAPI, HTTPException, Request
from fastapi.responses import JSONResponse, Response
from vllm.entrypoints.openai.engine.protocol import ErrorResponse
from vllm.entrypoints.openai.engine.serving import OpenAIServing
from vllm.entrypoints.openai.utils import validate_json_request
from vllm.entrypoints.serve.instrumentator.basic import base
from vllm.entrypoints.serve.instrumentator.health import health
from vllm.tasks import POOLING_TASKS, SupportedTask
# TODO: RequestType = TypeForm[BaseModel] when recognized by type checkers
# (requires typing_extensions >= 4.13)
RequestType = Any
GetHandlerFn = Callable[[Request], OpenAIServing | None]
EndpointFn = Callable[[RequestType, Request], Awaitable[Any]]
def get_invocation_types(supported_tasks: tuple["SupportedTask", ...]):
# NOTE: Items defined earlier take higher priority
INVOCATION_TYPES: list[tuple[RequestType, tuple[GetHandlerFn, EndpointFn]]] = []
if "generate" in supported_tasks:
from vllm.entrypoints.openai.chat_completion.api_router import (
chat,
create_chat_completion,
)
from vllm.entrypoints.openai.chat_completion.protocol import (
ChatCompletionRequest,
)
from vllm.entrypoints.openai.completion.api_router import (
completion,
create_completion,
)
from vllm.entrypoints.openai.completion.protocol import CompletionRequest
INVOCATION_TYPES += [
(ChatCompletionRequest, (chat, create_chat_completion)),
(CompletionRequest, (completion, create_completion)),
]
if "embed" in supported_tasks:
from vllm.entrypoints.pooling.embed.api_router import (
create_embedding,
embedding,
)
from vllm.entrypoints.pooling.embed.protocol import EmbeddingRequest
INVOCATION_TYPES += [
(EmbeddingRequest, (embedding, create_embedding)),
]
if "classify" in supported_tasks:
from vllm.entrypoints.pooling.classify.api_router import (
classify,
create_classify,
)
from vllm.entrypoints.pooling.classify.protocol import ClassificationRequest
INVOCATION_TYPES += [
(ClassificationRequest, (classify, create_classify)),
]
if "score" in supported_tasks:
from vllm.entrypoints.pooling.score.api_router import do_rerank, rerank
from vllm.entrypoints.pooling.score.protocol import RerankRequest
INVOCATION_TYPES += [
(RerankRequest, (rerank, do_rerank)),
]
if "score" in supported_tasks or "embed" in supported_tasks:
from vllm.entrypoints.pooling.score.api_router import create_score, score
from vllm.entrypoints.pooling.score.protocol import ScoreRequest
INVOCATION_TYPES += [
(ScoreRequest, (score, create_score)),
]
if any(task in POOLING_TASKS for task in supported_tasks):
from vllm.entrypoints.pooling.pooling.api_router import create_pooling, pooling
from vllm.entrypoints.pooling.pooling.protocol import PoolingRequest
INVOCATION_TYPES += [
(PoolingRequest, (pooling, create_pooling)),
]
return INVOCATION_TYPES
def attach_router(app: FastAPI, supported_tasks: tuple["SupportedTask", ...]):
router = APIRouter()
# NOTE: Construct the TypeAdapters only once
INVOCATION_TYPES = get_invocation_types(supported_tasks)
INVOCATION_VALIDATORS = [
(pydantic.TypeAdapter(request_type), (get_handler, endpoint))
for request_type, (get_handler, endpoint) in INVOCATION_TYPES
]
@router.post("/ping", response_class=Response)
@router.get("/ping", response_class=Response)
@sagemaker_standards.register_ping_handler
async def ping(raw_request: Request) -> Response:
"""Ping check. Endpoint required for SageMaker"""
return await health(raw_request)
@router.post(
"/invocations",
dependencies=[Depends(validate_json_request)],
responses={
HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse},
HTTPStatus.UNSUPPORTED_MEDIA_TYPE.value: {"model": ErrorResponse},
HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse},
},
)
@sagemaker_standards.register_invocation_handler
@sagemaker_standards.stateful_session_manager()
@sagemaker_standards.inject_adapter_id(adapter_path="model")
async def invocations(raw_request: Request):
"""For SageMaker, routes requests based on the request type."""
try:
body = await raw_request.json()
except json.JSONDecodeError as e:
raise HTTPException(
status_code=HTTPStatus.BAD_REQUEST.value,
detail=f"JSON decode error: {e}",
) from e
valid_endpoints = [
(validator, endpoint)
for validator, (get_handler, endpoint) in INVOCATION_VALIDATORS
if get_handler(raw_request) is not None
]
for request_validator, endpoint in valid_endpoints:
try:
request = request_validator.validate_python(body)
except pydantic.ValidationError:
continue
return await endpoint(request, raw_request)
type_names = [
t.__name__ if isinstance(t := validator._type, type) else str(t)
for validator, _ in valid_endpoints
]
msg = f"Cannot find suitable handler for request. Expected one of: {type_names}"
res = base(raw_request).create_error_response(message=msg)
return JSONResponse(content=res.model_dump(), status_code=res.error.code)
app.include_router(router)
def sagemaker_standards_bootstrap(app: FastAPI) -> FastAPI:
return sagemaker_standards.bootstrap(app)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/entrypoints/sagemaker/api_router.py",
"license": "Apache License 2.0",
"lines": 134,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/kimi_k25.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# ruff: noqa: E501
"""
Kimi-K2.5 Model Implementation for vLLM.
Kimi-K2.5 extends Kimi-K2 with vision support
This module defines:
- KimiK25ProcessingInfo/KimiK25MultiModalProcessor: Processing logic
- KimiK25ForConditionalGeneration: Main model class
"""
from collections.abc import Iterable, Mapping, Sequence
from dataclasses import dataclass
from typing import Annotated, Any, Literal
import torch
from torch import nn
from transformers import BatchFeature
from transformers.processing_utils import ProcessorMixin
from vllm.config import VllmConfig
from vllm.config.multimodal import BaseDummyOptions
from vllm.logger import init_logger
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.quantization.compressed_tensors.compressed_tensors import (
CompressedTensorsConfig,
)
from vllm.model_executor.models.interfaces import (
SupportsMultiModal,
SupportsPP,
SupportsQuant,
)
from vllm.model_executor.models.kimi_k25_vit import (
KimiK25MultiModalProjector,
MoonViT3dPretrainedModel,
vision_tower_forward,
)
from vllm.multimodal import MULTIMODAL_REGISTRY
from vllm.multimodal.inputs import (
MultiModalDataDict,
MultiModalFieldConfig,
MultiModalKwargsItems,
NestedTensors,
VisionChunk,
VisionChunkImage,
VisionChunkVideo,
)
from vllm.multimodal.parse import MultiModalDataItems, VisionChunkProcessorItems
from vllm.multimodal.processing import (
BaseDummyInputsBuilder,
BaseMultiModalProcessor,
BaseProcessingInfo,
InputProcessingContext,
PromptReplacement,
PromptUpdate,
)
from vllm.platforms import current_platform
from vllm.sequence import IntermediateTensors
from vllm.transformers_utils.configs import KimiK25Config
from vllm.transformers_utils.processor import cached_get_image_processor
from vllm.utils.tensor_schema import TensorSchema, TensorShape
from .utils import (
AutoWeightsLoader,
WeightsMapper,
init_vllm_registered_model,
maybe_prefix,
)
logger = init_logger(__name__)
# Dummy input dimensions for profiling.
@dataclass
class MaxImageTokenMeta:
width: int = 3000
height: int = 3000
class KimiK25MediaPixelInputs(TensorSchema):
"""
Media input schema for K2-VL model.
Dimensions:
- np: Number of patches (flattened from all media items)
- ps: Patch size
- nm: Number of media items
"""
type: Literal["pixel_values"] = "pixel_values"
pixel_values: Annotated[
torch.Tensor | list[torch.Tensor],
TensorShape("np", 3, "ps", "ps"),
]
grid_thws: Annotated[torch.Tensor, TensorShape("nm", 3)]
class MoonshotKimiVAutoProcessor(ProcessorMixin):
attributes = ["tokenizer"]
tokenizer_class = "AutoTokenizer"
def __init__(
self, media_processor=None, tokenizer=None, media_token_id: int | None = None
):
super().__init__(tokenizer)
self.media_processor = media_processor
self.media_token_id = media_token_id
assert self.media_token_id is not None
# We do not support str input for text here
def __call__(
self,
vision_chunks: list[VisionChunk] | None = None,
*,
text: list[int] | str,
**kwargs,
) -> BatchFeature:
"""
Args:
vision_chunks: List of VisionChunk items to be processed.
For image: VisionChunkImage with type='image', image=PIL.Image
For video_chunk: VisionChunkVideo with type='video_chunk', video_chunk=list[PIL.Image]
text: The token ids to be fed to a model (required).
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- list of token ids to be fed to a model.
- **pixel_values** -- Pixel values to be fed to a model. Returned when `vision_chunks` is not `None`.
- **grid_thws** -- list of image 3D grid in LLM. Returned when `vision_chunks` is not `None`.
"""
mm_inputs = {}
input_ids = self.tokenizer.encode(text) if isinstance(text, str) else text
if vision_chunks is not None:
assert isinstance(vision_chunks, list)
mm_inputs = self.media_processor.preprocess(vision_chunks)
num_tokens_per_chunk = [
self.media_processor.media_tokens_calculator(chunk)
for chunk in vision_chunks
]
new_input_ids = []
for token in input_ids:
if token == self.media_token_id:
new_input_ids.extend(
[self.media_token_id] * num_tokens_per_chunk.pop(0)
)
else:
new_input_ids.append(token)
input_ids = new_input_ids
# XXX: _apply_hf_processor_text_mm will call tolist() on input_ids
return BatchFeature(
data={
"input_ids": torch.tensor([input_ids]),
**mm_inputs,
}
)
class KimiK25ProcessingInfo(BaseProcessingInfo):
"""Processing information for Kimi-K2.5 model.
Provides configuration and utilities for processing both
images and video-chunks.
"""
def __init__(self, ctx: InputProcessingContext) -> None:
super().__init__(ctx)
self.hf_config = self.get_hf_config()
self.media_token_id = self.hf_config.media_placeholder_token_id
media_processor = cached_get_image_processor(
self.ctx.model_config.model, trust_remote_code=True
)
self.media_processor = media_processor
self.hf_processor = MoonshotKimiVAutoProcessor(
media_processor=self.media_processor,
tokenizer=self.get_tokenizer(),
media_token_id=self.media_token_id,
)
self.media_tokens_calculator = self.media_processor.media_tokens_calculator
def get_hf_processor(self):
return self.hf_processor
def get_hf_config(self):
return self.ctx.get_hf_config(KimiK25Config)
def get_supported_mm_limits(self) -> Mapping[str, int | None]:
# None means unlimited
return {"vision_chunk": None}
class KimiK25DummyInputsBuilder(BaseDummyInputsBuilder[KimiK25ProcessingInfo]):
"""Builds dummy inputs for Kimi-K2.5 model profiling."""
def __init__(self, info: KimiK25ProcessingInfo) -> None:
super().__init__(info)
self.media_token_id = self.info.media_token_id
self.frame_per_chunk = self.info.media_processor.num_frames_per_chunk
def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str:
num_media = mm_counts.get("vision_chunk", 0)
return "<|media_pad|>" * num_media
def get_dummy_mm_items(self):
dummy_videos = self._get_dummy_images(
height=MaxImageTokenMeta.height,
width=MaxImageTokenMeta.width,
num_images=self.frame_per_chunk,
)
video_chunk_dummy_item = VisionChunkVideo(
type="video_chunk", video_chunk=dummy_videos
)
video_chunk_num_tokens = self.info.media_tokens_calculator(
video_chunk_dummy_item
)
image_dummy_item = VisionChunkImage(
type="image",
image=self._get_dummy_images(
height=MaxImageTokenMeta.height,
width=MaxImageTokenMeta.width,
num_images=1,
)[0],
)
image_num_tokens = self.info.media_tokens_calculator(image_dummy_item)
# return the larger one
if video_chunk_num_tokens >= image_num_tokens:
return [video_chunk_dummy_item]
else:
return [image_dummy_item]
def get_dummy_mm_data(
self,
seq_len: int,
mm_counts: Mapping[str, int],
mm_options: Mapping[str, BaseDummyOptions],
) -> MultiModalDataDict:
# TODO: Support mm_options for vision_chunk to allow user configuration
dummy_items = self.get_dummy_mm_items()
return {"vision_chunk": dummy_items}
class KimiK25MultiModalProcessor(BaseMultiModalProcessor[KimiK25ProcessingInfo]):
"""Multi-modal processor for Kimi-K2.5.
Handles both image and video-chunk modalities.
"""
def _get_mm_fields_config(
self,
hf_inputs: BatchFeature,
hf_processor_mm_kwargs: Mapping[str, object],
) -> Mapping[str, MultiModalFieldConfig]:
"""Indicates how to slice media input into multiple items.
pixel_values: [N, 3, patch_size, patch_size], all patches collected from B medias
grid_thws: [B,3], each item: [N_t, N_h ,N_w], indicates the grid size in time/height/width direction
for current item.
by multiplying [N_t, N_h ,N_w], we get the number of patches for each media item, thus we can slice
pixel_values by pixel_values[start:start + N_t*N_h*N_w] to get patches of one item.
"""
grid_thws = hf_inputs.get("grid_thws", torch.empty((0, 3)))
grid_sizes = grid_thws.prod(-1)
return dict(
pixel_values=MultiModalFieldConfig.flat_from_sizes(
"vision_chunk", grid_sizes
),
grid_thws=MultiModalFieldConfig.batched("vision_chunk"),
)
def _get_prompt_updates(
self,
mm_items: MultiModalDataItems,
hf_processor_mm_kwargs: Mapping[str, Any],
out_mm_kwargs: MultiModalKwargsItems,
) -> Sequence[PromptUpdate]:
hf_config = self.info.get_hf_config()
media_token_id = hf_config.media_placeholder_token_id
def get_replacement(item_idx: int):
media = mm_items.get_items("vision_chunk", (VisionChunkProcessorItems,))
num_media_token = self.info.media_tokens_calculator(media[item_idx])
return [media_token_id] * num_media_token
return [
PromptReplacement(
modality="vision_chunk",
target=[media_token_id],
replacement=get_replacement,
),
]
def split_video_chunks(self, video):
return self.info.media_processor.split_video_chunks(video)
@MULTIMODAL_REGISTRY.register_processor(
KimiK25MultiModalProcessor,
info=KimiK25ProcessingInfo,
dummy_inputs=KimiK25DummyInputsBuilder,
)
class KimiK25ForConditionalGeneration(
nn.Module, SupportsMultiModal, SupportsPP, SupportsQuant
):
"""Kimi-K2.5 model for conditional generation.
Supports both image and video-chunk modalities.
Video-chunks are temporal segments (typically 4 frames) that are
processed with temporal pooling.
"""
supports_encoder_tp_data = True
hf_to_vllm_mapper = WeightsMapper(
orig_to_new_prefix={
# For legacy NVFP4 checkpoint compatibility:
# see https://github.com/vllm-project/vllm/pull/33346#issuecomment-3851475033
"language_model.layers.": "language_model.model.layers.",
# mm projector
"mm_projector.proj.0": "mm_projector.linear_1",
"mm_projector.proj.2": "mm_projector.linear_2",
}
)
@classmethod
def get_placeholder_str(cls, modality: str, i: int) -> str | None:
# Kimi-K2.5 uses video_chunk for all media types
if modality == "image":
return "<|media_begin|>image<|media_content|><|media_pad|><|media_end|>"
elif modality == "video":
# return a placeholder, to be replaced in the future.
return "<|kimi_k25_video_placeholder|>"
raise ValueError(f"Unsupported modality: {modality}")
def __init__(
self,
vllm_config: VllmConfig,
prefix: str = "",
) -> None:
super().__init__()
model_config = vllm_config.model_config
config: KimiK25Config = model_config.hf_config
self.config = config
quant_config = vllm_config.quant_config
# Check for MoonViT config compatibility
self.use_data_parallel = (
model_config.multimodal_config.mm_encoder_tp_mode == "data"
)
self.hidden_size = config.text_config.hidden_size
self.device = current_platform.current_device()
# Build vision tower directly with KimiK25VisionConfig
with self._mark_tower_model(vllm_config, "vision_chunk"):
self.vision_tower = MoonViT3dPretrainedModel(
config.vision_config,
quant_config=self._maybe_ignore_quant_config(quant_config),
prefix=maybe_prefix(prefix, "vision_tower"),
)
self.vision_tower = self.vision_tower.to(
device=self.device, dtype=model_config.dtype
)
self.mm_projector = KimiK25MultiModalProjector(
config=config.vision_config,
use_data_parallel=self.use_data_parallel,
quant_config=self._maybe_ignore_quant_config(quant_config),
prefix=maybe_prefix(prefix, "mm_projector"),
)
self.mm_projector = self.mm_projector.to(
device=self.device, dtype=model_config.dtype
)
self.quant_config = quant_config
with self._mark_language_model(vllm_config):
self.language_model = init_vllm_registered_model(
vllm_config=vllm_config,
hf_config=config.text_config,
prefix=maybe_prefix(prefix, "language_model"),
architectures=["DeepseekV2ForCausalLM"],
)
self.make_empty_intermediate_tensors = (
self.language_model.make_empty_intermediate_tensors
)
self.media_placeholder: int = self.config.media_placeholder_token_id
def _maybe_ignore_quant_config(self, quant_config: QuantizationConfig):
if isinstance(quant_config, CompressedTensorsConfig):
return None
return quant_config
def _parse_and_validate_media_input(
self, **kwargs: object
) -> KimiK25MediaPixelInputs | None:
pixel_values = kwargs.pop("pixel_values", None)
grid_thws = kwargs.pop("grid_thws", None)
if pixel_values is None:
return None
if isinstance(pixel_values, list):
pixel_values = torch.cat(pixel_values, dim=0)
if len(pixel_values.shape) == 5 or len(pixel_values.shape) == 3:
pixel_values = pixel_values.reshape(
pixel_values.shape[0] * pixel_values.shape[1], *pixel_values.shape[2:]
)
# The batch dimension of pixel_values has been flattened into shape[0]
target_dtype = next(self.vision_tower.parameters()).dtype
pixel_values = pixel_values.to(target_dtype)
assert isinstance(grid_thws, torch.Tensor), (
f"expect grid_thws to be a tensor, get {type(grid_thws)}"
)
# In some cases (e.g. with merger), grid_thws has an extra middle dimension
grid_thws = grid_thws.reshape(-1, grid_thws.shape[-1])
assert grid_thws.ndim == 2 and grid_thws.size(1) == 3, (
f"unexpected shape for grid_thws: {grid_thws.shape}"
)
return KimiK25MediaPixelInputs(
type="pixel_values",
pixel_values=pixel_values,
grid_thws=grid_thws,
)
def _process_media_input(
self, media_input: KimiK25MediaPixelInputs
) -> list[torch.Tensor]:
# NOTE(moyan): This forward will automatically batch the forward pass internally
media_features = vision_tower_forward(
self.vision_tower,
media_input["pixel_values"],
media_input["grid_thws"],
mm_projector=self.mm_projector,
use_data_parallel=self.use_data_parallel,
)
return media_features
def embed_multimodal(self, **kwargs: object) -> NestedTensors | None:
# Validate the multimodal input keyword arguments
media_input = self._parse_and_validate_media_input(**kwargs)
if media_input is None:
return None
# Run multimodal inputs through encoder and projector
vision_embeddings = self._process_media_input(media_input)
return vision_embeddings
def forward(
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
**kwargs: object,
) -> IntermediateTensors:
if intermediate_tensors is not None:
inputs_embeds = None
hidden_states = self.language_model(
input_ids=input_ids,
positions=positions,
intermediate_tensors=intermediate_tensors,
inputs_embeds=inputs_embeds,
)
return hidden_states
def compute_logits(self, hidden_states: torch.Tensor, **kwargs) -> torch.Tensor:
logits = self.language_model.compute_logits(hidden_states)
return logits
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]):
loader = AutoWeightsLoader(self)
return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/kimi_k25.py",
"license": "Apache License 2.0",
"lines": 410,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/kimi_k25_vit.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Vision tower implementation for Kimi-K2.5 model.
This module provides the vision encoder components for Kimi-K2.5,
including 3D patch embedding, RoPE position embedding, and
temporal pooling for video chunks.
"""
from collections.abc import Sequence
from copy import deepcopy
from typing import Any
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers.activations import GELUActivation
from vllm.distributed import divide, get_tensor_model_parallel_world_size
from vllm.logger import init_logger
from vllm.model_executor.layers.activation import get_act_fn
from vllm.model_executor.layers.attention.mm_encoder_attention import MMEncoderAttention
from vllm.model_executor.layers.linear import (
ColumnParallelLinear,
QKVParallelLinear,
ReplicatedLinear,
RowParallelLinear,
)
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.models.utils import maybe_prefix
from vllm.model_executor.models.vision import (
is_vit_use_data_parallel,
run_dp_sharded_mrope_vision_model,
)
from vllm.transformers_utils.configs.kimi_k25 import KimiK25VisionConfig
logger = init_logger(__name__)
def _apply_rope_input_validation(x, freqs_cis):
assert x.ndim == freqs_cis.ndim + 1, (x.shape, freqs_cis.shape)
assert x.shape[:-2] == freqs_cis.shape[:-1], (x.shape, freqs_cis.shape)
assert x.shape[-1] == 2 * freqs_cis.shape[-1], (x.shape, freqs_cis.shape)
assert freqs_cis.dtype == torch.complex64, freqs_cis.dtype
def get_rope_shape_decorate(func):
_get_rope_shape_first_call_flag = set()
def wrapper(org, interpolation_mode, shape):
key = (org.requires_grad, torch.is_grad_enabled(), interpolation_mode)
if key not in _get_rope_shape_first_call_flag:
_get_rope_shape_first_call_flag.add(key)
_ = func(org, interpolation_mode, shape=(64, 64))
return func(org, interpolation_mode, shape)
return wrapper
@get_rope_shape_decorate
@torch.compile(dynamic=True)
def get_rope_shape(org, interpolation_mode, shape):
return (
F.interpolate(
org.permute((2, 0, 1)).unsqueeze(0),
size=shape,
mode=interpolation_mode,
)
.squeeze(0)
.permute((1, 2, 0))
.flatten(end_dim=1)
)
def apply_rope(
xq: torch.Tensor, xk: torch.Tensor, freqs_cis: torch.Tensor
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Args: (The leading dimensions of all inputs should be the same)
xq: query, tensor of shape (..., num_heads, head_dim)
xk: key, tensor of shape (..., num_heads, head_dim)
freqs_cis: tensor of shape (..., head_dim/2), dtype=torch.complex64.
Returns:
xq_out, xk_out: tensors of shape (..., num_heads, head_dim)
"""
_apply_rope_input_validation(xq, freqs_cis)
_apply_rope_input_validation(xk, freqs_cis)
freqs_cis = freqs_cis.unsqueeze(-2) # ..., 1, head_dim/2
# ..., num_heads, head_dim/2
xq_ = torch.view_as_complex(xq.float().view(*xq.shape[:-1], -1, 2))
xk_ = torch.view_as_complex(xk.float().view(*xq.shape[:-1], -1, 2))
xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(-2) # ..., num_heads, head_dim
xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(-2) # ..., num_heads, head_dim
return xq_out.type_as(xq), xk_out.type_as(xk)
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
"""Generate 1D sincos positional embedding from grid positions."""
assert embed_dim % 2 == 0
omega = np.arange(embed_dim // 2, dtype=np.float32)
omega /= embed_dim / 2.0
omega = 1.0 / 10000**omega # (D/2,)
pos = pos.reshape(-1) # (M,)
out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product
emb_sin = np.sin(out) # (M, D/2)
emb_cos = np.cos(out) # (M, D/2)
emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
return emb
def get_1d_sincos_pos_embed(embed_dim, t_size, cls_token=False):
"""Generate 1D sincos positional embedding."""
grid_t = np.arange(t_size, dtype=np.float32)
pos_embed = get_1d_sincos_pos_embed_from_grid(embed_dim, grid_t)
if cls_token:
pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
return pos_embed
class Learnable2DInterpPosEmbDivided_fixed(nn.Module):
"""2D learnable position embedding with temporal extension."""
def __init__(
self,
height: int,
width: int,
num_frames: int,
dim: int,
interpolation_mode: str = "bicubic",
) -> None:
super().__init__()
self.height = height
self.width = width
self.num_frames = num_frames
self.dim = dim
self.interpolation_mode = interpolation_mode
self.weight = nn.Parameter(torch.empty(height, width, dim))
self.register_buffer(
"time_weight",
torch.from_numpy(get_1d_sincos_pos_embed(self.dim, self.num_frames))
.float()
.unsqueeze(1),
persistent=False,
)
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.weight)
def forward(self, x: torch.Tensor, grid_thws: torch.Tensor) -> torch.Tensor:
pos_embs = []
for t, h, w in grid_thws.tolist():
assert t <= self.num_frames, f"t:{t} > self.num_frames:{self.num_frames}"
if (h, w) == self.weight.shape[:-1]:
pos_emb_2d = self.weight.flatten(end_dim=1)
else:
pos_emb_2d = get_rope_shape(
self.weight,
interpolation_mode=self.interpolation_mode,
shape=(h, w),
)
if t == 1:
pos_emb_3d = pos_emb_2d
else:
pos_emb_3d = (
pos_emb_2d.unsqueeze(0).repeat(t, 1, 1) + self.time_weight[0:t]
)
pos_embs.append(pos_emb_3d.reshape(-1, pos_emb_3d.shape[-1]))
out = x + torch.cat(pos_embs)
return out
class MoonVision3dPatchEmbed(nn.Module):
"""3D patch embedding for vision tower."""
def __init__(
self,
out_dim: int,
in_dim: int = 3,
patch_size: int | tuple[int, int] = (14, 14),
pos_emb_height: int = 14,
pos_emb_width: int = 14,
pos_emb_time: int = 4,
pos_emb_type: str = "divided_fixed",
):
super().__init__()
assert isinstance(patch_size, int | Sequence), (
f"Invalid patch_size type: {type(patch_size)}"
)
if isinstance(patch_size, int):
patch_size = (patch_size, patch_size)
assert len(patch_size) == 2, (
f"Expected patch_size to be a tuple of 2, got {patch_size}"
)
self.patch_size = patch_size
self.proj = nn.Conv2d(
in_dim, out_dim, kernel_size=patch_size, stride=patch_size
)
if pos_emb_type == "divided_fixed":
self.pos_emb = Learnable2DInterpPosEmbDivided_fixed(
height=pos_emb_height,
width=pos_emb_width,
num_frames=pos_emb_time,
dim=out_dim,
)
else:
raise NotImplementedError(f"Not support pos_emb_type: {pos_emb_type}")
def forward(self, x: torch.Tensor, grid_thws: torch.Tensor) -> torch.Tensor:
x = self.proj(x).view(x.size(0), -1)
# apply positional embedding
x = self.pos_emb(x, grid_thws)
return x
class Rope2DPosEmbRepeated(nn.Module):
"""2D rotary position embedding with multi-resolution support."""
def __init__(self, dim: int, max_height: int, max_width: int, theta_base=10000):
super().__init__()
self.dim = dim
assert self.dim % 4 == 0, "dim must be divisible by 4"
self.max_height = max_height
self.max_width = max_width
self.theta_base = theta_base
def extra_repr(self):
return (
f"dim={self.dim}, max_height={self.max_height}, "
f"max_width={self.max_width}, theta_base={self.theta_base}"
)
def _precompute_freqs_cis(self, device: torch.device) -> torch.Tensor:
"""Calculate the cis(freqs) for each position in the 2D grid."""
N = self.max_height * self.max_width
flat_pos = torch.arange(0, N).float().to(device)
x_pos = flat_pos % self.max_width
y_pos = flat_pos // self.max_width
dim_range = (
torch.arange(0, self.dim, 4)[: (self.dim // 4)].float().to(device)
) # C/4
freqs = 1.0 / (self.theta_base ** (dim_range / self.dim))
x_freqs = torch.outer(x_pos, freqs).float() # N, C/4
y_freqs = torch.outer(y_pos, freqs).float() # N, C/4
x_cis = torch.polar(torch.ones_like(x_freqs), x_freqs) # N, C/4
y_cis = torch.polar(torch.ones_like(y_freqs), y_freqs) # N, C/4
# N, C/4, 2
freqs_cis = torch.cat(
[x_cis.unsqueeze(dim=-1), y_cis.unsqueeze(dim=-1)], dim=-1
)
# max_height, max_width, C/2
freqs_cis = freqs_cis.reshape(self.max_height, self.max_width, -1)
return freqs_cis
def get_freqs_cis(
self, grid_thws: torch.Tensor, device: torch.device
) -> torch.Tensor:
"""
Args:
grid_thws (torch.Tensor): grid time, height and width
Returns:
freqs_cis: tensor of shape (sum(t * height * width), dim//2)
"""
if not hasattr(self, "freqs_cis"):
self.register_buffer(
"freqs_cis", self._precompute_freqs_cis(device), persistent=False
)
shapes = grid_thws.tolist()
assert all(
1 <= h <= self.max_height and 1 <= w <= self.max_width for t, h, w in shapes
), (
shapes,
self.max_height,
self.max_width,
)
freqs_cis = torch.cat(
[
self.freqs_cis[:h, :w].reshape(-1, self.dim // 2).repeat(t, 1)
for t, h, w in shapes
],
dim=0,
)
return freqs_cis
class MLP2(nn.Module):
"""Two-layer MLP with tensor parallel support."""
def __init__(
self,
dims: list[int],
activation,
bias: bool = True,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
use_data_parallel: bool = False,
):
super().__init__()
assert len(dims) == 3
self.use_data_parallel = use_data_parallel
self.fc0 = ColumnParallelLinear(
dims[0],
dims[1],
bias=bias,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "fc0"),
disable_tp=self.use_data_parallel,
)
self.fc1 = RowParallelLinear(
dims[1],
dims[2],
bias=bias,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "fc1"),
disable_tp=self.use_data_parallel,
)
self.activation = activation
def forward(self, x: torch.Tensor) -> torch.Tensor:
x, _ = self.fc0(x)
x = self.activation(x)
x, _ = self.fc1(x)
return x
class MoonViTEncoderLayer(nn.Module):
"""Single encoder layer for MoonViT with TP/DP support."""
def __init__(
self,
num_heads: int,
hidden_dim: int,
mlp_dim: int,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
*,
activation=F.gelu,
attn_bias: bool = False,
):
super().__init__()
self.use_data_parallel = is_vit_use_data_parallel()
self.num_heads = num_heads
self.hidden_dim = hidden_dim
self.hidden_size_per_attention_head = self.hidden_dim // self.num_heads
self.tp_size = (
1 if self.use_data_parallel else get_tensor_model_parallel_world_size()
)
self.num_attention_heads_per_partition = divide(num_heads, self.tp_size)
self.norm0 = nn.LayerNorm(hidden_dim)
self.norm1 = nn.LayerNorm(hidden_dim)
self.mlp = MLP2(
[hidden_dim, mlp_dim, hidden_dim],
activation,
quant_config=quant_config,
prefix=f"{prefix}.mlp",
use_data_parallel=self.use_data_parallel,
)
self.wqkv = QKVParallelLinear(
hidden_size=hidden_dim,
head_size=self.hidden_size_per_attention_head,
total_num_heads=num_heads,
total_num_kv_heads=num_heads,
bias=attn_bias,
quant_config=quant_config,
prefix=f"{prefix}.wqkv",
disable_tp=self.use_data_parallel,
)
self.wo = RowParallelLinear(
hidden_dim,
hidden_dim,
bias=attn_bias,
quant_config=quant_config,
prefix=f"{prefix}.wo",
disable_tp=self.use_data_parallel,
)
self.attn = MMEncoderAttention(
num_heads=self.num_attention_heads_per_partition,
head_size=self.hidden_size_per_attention_head,
scale=self.hidden_size_per_attention_head**-0.5,
prefix=f"{prefix}.attn",
)
def attention_qkvpacked(
self,
x: torch.Tensor,
cu_seqlens: torch.Tensor,
rope_freqs_cis: torch.Tensor | None = None,
):
"""Compute self-attention with packed QKV.
Args:
x (torch.Tensor): (seqlen, hidden_dim)
cu_seqlens (torch.Tensor): cumulative sequence lengths
"""
seq_length = x.size(0)
xqkv, _ = self.wqkv(x)
qkv_shape = xqkv.size()[:-1] + (
3,
self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head,
)
# xqkv: (seqlen, 3, nheads, headdim)
xqkv = xqkv.view(*qkv_shape)
xq, xk, xv = torch.unbind(xqkv, dim=-3)
xq, xk = apply_rope(xq, xk, rope_freqs_cis)
max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max()
attn_out = self.attn(
xq.unsqueeze(0),
xk.unsqueeze(0),
xv.unsqueeze(0),
cu_seqlens=cu_seqlens,
max_seqlen=max_seqlen,
)
attn_out = attn_out.reshape(
seq_length,
self.num_attention_heads_per_partition
* self.hidden_size_per_attention_head,
)
attn_out, _ = self.wo(attn_out)
return attn_out
def forward(
self,
hidden_states: torch.Tensor,
cu_seqlens: torch.Tensor,
rope_freqs_cis: torch.Tensor | None = None,
):
residual = hidden_states
hidden_states = self.norm0(hidden_states)
hidden_states = self.attention_qkvpacked(
hidden_states, cu_seqlens, rope_freqs_cis
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.norm1(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
class MoonViT3dEncoder(nn.Module):
"""Full encoder stack for MoonViT 3D."""
def __init__(
self,
hidden_dim: int,
num_layers: int,
block_cfg: dict,
video_attn_type: str = "spatial_temporal",
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
assert video_attn_type == "spatial_temporal", (
f'video_attn_type must be "spatial_temporal", got {video_attn_type}'
)
self.video_attn_type = video_attn_type
self.rope_2d = Rope2DPosEmbRepeated(
block_cfg["hidden_dim"] // block_cfg["num_heads"], 512, 512
)
self.blocks = nn.ModuleList(
[
MoonViTEncoderLayer(
**block_cfg,
quant_config=quant_config,
prefix=f"{prefix}.blocks.{layer_idx}",
)
for layer_idx in range(num_layers)
]
)
self.final_layernorm = nn.LayerNorm(hidden_dim)
def forward(
self,
hidden_states: torch.Tensor,
grid_thws: torch.Tensor,
) -> torch.Tensor:
rope_freqs_cis = self.rope_2d.get_freqs_cis(
grid_thws=grid_thws, device=hidden_states.device
)
lengths = torch.cat(
(
torch.zeros(1, dtype=grid_thws.dtype, device=grid_thws.device),
grid_thws[:, 0] * grid_thws[:, 1] * grid_thws[:, 2],
)
)
cu_seqlens = lengths.to(hidden_states.device).cumsum(dim=0, dtype=torch.int32)
for block in self.blocks:
hidden_states = block(
hidden_states, cu_seqlens, rope_freqs_cis=rope_freqs_cis
)
hidden_states = self.final_layernorm(hidden_states)
return hidden_states
def tpool_patch_merger(
x: torch.Tensor,
grid_thws: torch.Tensor,
merge_kernel_size: tuple[int, int] = (2, 2),
) -> list[torch.Tensor]:
"""Temporal pooling patch merger."""
kh, kw = merge_kernel_size
lengths = (grid_thws[:, 0] * grid_thws[:, 1] * grid_thws[:, 2]).tolist()
seqs = x.split(lengths, dim=0)
outputs = []
for seq, (t, h, w) in zip(seqs, grid_thws.tolist()):
nh, nw = h // kh, w // kw
# Reshape: (t*h*w, d) -> (t, nh, kh, nw, kw, d)
v = seq.view(t, nh, kh, nw, kw, -1)
# Temporal pooling first (reduces tensor size before permute)
v = v.mean(dim=0) # (nh, kh, nw, kw, d)
# Spatial rearrangement: (nh, kh, nw, kw, d) -> (nh, nw, kh, kw, d)
out = v.permute(0, 2, 1, 3, 4).reshape(nh * nw, kh * kw, -1)
outputs.append(out)
return outputs
class MoonViT3dPretrainedModel(nn.Module):
"""Main vision tower model.
Uses KimiK25VisionConfig directly from transformers_utils/configs/kimi_k25.py.
"""
def __init__(
self,
config: KimiK25VisionConfig,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
):
super().__init__()
config = deepcopy(config)
self.config = config # Required for run_dp_sharded_mrope_vision_model
self.merge_kernel_size = config.merge_kernel_size
self.patch_size = config.patch_size
self.merge_type = config.merge_type
self.patch_embed = MoonVision3dPatchEmbed(
out_dim=config.hidden_size,
patch_size=config.patch_size,
pos_emb_height=config.init_pos_emb_height,
pos_emb_width=config.init_pos_emb_width,
pos_emb_time=config.init_pos_emb_time,
pos_emb_type=config.pos_emb_type,
)
self.encoder = MoonViT3dEncoder(
hidden_dim=config.hidden_size,
num_layers=config.num_hidden_layers,
block_cfg={
"num_heads": config.num_attention_heads,
"hidden_dim": config.hidden_size,
"mlp_dim": config.intermediate_size,
"activation": get_act_fn("gelu_pytorch_tanh"),
"attn_bias": True,
},
video_attn_type=config.video_attn_type,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "encoder"),
)
def forward(
self, pixel_values: torch.Tensor, grid_thws: torch.Tensor
) -> torch.Tensor:
"""
Args:
pixel_values (torch.Tensor): The input pixel values.
grid_thws (torch.Tensor): Temporal, height and width.
Returns:
torch.Tensor: The output tokens.
"""
hidden_states = self.patch_embed(pixel_values, grid_thws)
hidden_states = self.encoder(hidden_states, grid_thws)
if (
self.merge_type == "sd2_tpool"
): # spatial downsampling 2x with temporal pooling all
hidden_states = tpool_patch_merger(
hidden_states, grid_thws, merge_kernel_size=self.merge_kernel_size
)
else:
raise NotImplementedError(f"Not support {self.merge_type}")
return hidden_states
@torch.inference_mode()
def mm_projector_forward(mm_projector: torch.nn.Module, vt_output: list[torch.Tensor]):
"""Apply MM projector to vision tower outputs."""
num_embedding_list = [x.shape[0] for x in vt_output]
batched = torch.cat(vt_output, dim=0)
proj_out = mm_projector(batched)
proj_out = proj_out.reshape(-1, proj_out.shape[-1])
proj_out = torch.split(proj_out, num_embedding_list)
return proj_out
@torch.inference_mode()
def vision_tower_forward(
vision_tower: Any,
pixel_values: torch.Tensor,
grid_thw: torch.Tensor,
mm_projector: Any,
use_data_parallel: bool,
) -> list[torch.Tensor]:
"""DP-sharded vision tower forward with mrope.
Uses vLLM's standard data parallelism utility to shard the batch
across available GPUs, enabling parallel processing of vision features.
"""
if use_data_parallel:
grid_thw_list = grid_thw.tolist()
vt_outputs = run_dp_sharded_mrope_vision_model(
vision_model=vision_tower,
pixel_values=pixel_values,
grid_thw_list=grid_thw_list,
rope_type="rope_2d",
)
else:
vt_outputs = vision_tower(pixel_values, grid_thw)
tensors = mm_projector_forward(mm_projector, list(vt_outputs))
return list(tensors)
class KimiK25MultiModalProjector(nn.Module):
"""Multi-modal projector with patch merging for Kimi-K2.5."""
def __init__(
self,
config: KimiK25VisionConfig,
use_data_parallel: bool = False,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
):
super().__init__()
self.use_data_parallel = use_data_parallel
# Hidden size after patch merging
merge_h, merge_w = config.merge_kernel_size
self.hidden_size = config.hidden_size * merge_h * merge_w
self.pre_norm = torch.nn.LayerNorm(config.hidden_size, eps=1e-5)
self.linear_1 = ReplicatedLinear(
self.hidden_size,
self.hidden_size,
bias=True,
quant_config=quant_config,
prefix=f"{prefix}.linear_1",
)
self.linear_2 = ReplicatedLinear(
self.hidden_size,
config.mm_hidden_size,
bias=True,
quant_config=quant_config,
prefix=f"{prefix}.linear_2",
)
self.act = GELUActivation()
def forward(self, image_features: torch.Tensor) -> torch.Tensor:
hidden_states = self.pre_norm(image_features).view(-1, self.hidden_size)
hidden_states, _ = self.linear_1(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states, _ = self.linear_2(hidden_states)
return hidden_states
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/kimi_k25_vit.py",
"license": "Apache License 2.0",
"lines": 595,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/transformers_utils/configs/kimi_k25.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Kimi-K2.5 Model Configuration.
This configuration supports video-chunk as an internal modality type.
A video-chunk is the smallest independently processable unit of video.
"""
from transformers import DeepseekV3Config
from transformers.configuration_utils import PretrainedConfig
class KimiK25VisionConfig(PretrainedConfig):
model_type = "kimi_k25_vision"
def __init__(
self,
# Vision Tower
patch_size: int = 14,
init_pos_emb_height: int = 64,
init_pos_emb_width: int = 64,
init_pos_emb_time: int = 4,
pos_emb_type: str = "divided_fixed",
num_attention_heads: int = 16,
num_hidden_layers: int = 27,
hidden_size: int = 1152,
intermediate_size: int = 4304,
merge_kernel_size: tuple[int, int] = (2, 2),
video_attn_type: str = "spatial_temporal",
merge_type: str = "sd2_tpool",
# MM Projector
mm_projector_type: str = "patchmerger",
mm_hidden_size: int | None = None,
projector_hidden_act: str = "gelu",
projector_ln_eps: float = 1e-5,
**kwargs,
):
super().__init__(**kwargs)
# Vision Tower
self.patch_size = patch_size
self.init_pos_emb_height = init_pos_emb_height
self.init_pos_emb_width = init_pos_emb_width
self.init_pos_emb_time = init_pos_emb_time
self.pos_emb_type = pos_emb_type
self.num_attention_heads = num_attention_heads
self.num_hidden_layers = num_hidden_layers
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.merge_kernel_size = merge_kernel_size
self.video_attn_type = video_attn_type
self.merge_type = merge_type
# MM Projector
self.mm_projector_type = mm_projector_type
if mm_hidden_size is not None:
self.mm_hidden_size = mm_hidden_size
else:
self.mm_hidden_size = hidden_size
self.projector_hidden_act = projector_hidden_act
self.projector_ln_eps = projector_ln_eps
class KimiK25Config(PretrainedConfig):
"""Kimi-K2.5 model configuration.
Kimi-K2.5 extends Kimi-K2 with vision support using video-chunks.
A video-chunk consists of multiple consecutive frames
that are processed together with temporal pooling.
Args:
vision_config: Configuration for the vision tower and projector.
text_config: Configuration for the text model (DeepseekV3).
ignore_index: The ignore index for the loss function.
media_placeholder_token_id: The token ID for media placeholders.
pad_token_id: The token ID for padding.
"""
model_type = "kimi_k25"
def __init__(
self,
vision_config: dict | KimiK25VisionConfig | None = None,
text_config: dict | DeepseekV3Config | None = None,
ignore_index: int = -100,
media_placeholder_token_id: int = 163605,
pad_token_id: int = 0,
use_unified_vision_chunk: bool = False,
video_placeholder: str = "<|kimi_k25_video_placeholder|>",
**kwargs,
):
# Vision config
if vision_config is None:
vision_config = KimiK25VisionConfig()
elif isinstance(vision_config, dict):
vision_config = KimiK25VisionConfig(**vision_config)
self.vision_config: KimiK25VisionConfig = vision_config
# Text config
if text_config is None:
text_config = DeepseekV3Config()
elif isinstance(text_config, dict):
text_config = DeepseekV3Config(**text_config)
self.text_config: DeepseekV3Config = text_config
# Set mm_hidden_size to text hidden size if not explicitly set
if self.vision_config.mm_hidden_size == self.vision_config.hidden_size:
self.vision_config.mm_hidden_size = self.text_config.hidden_size
# Other config
self.ignore_index = ignore_index
self.media_placeholder_token_id = media_placeholder_token_id
self.use_unified_vision_chunk = use_unified_vision_chunk
self.video_placeholder = video_placeholder
# Propagate quantization config from text model
if getattr(self.text_config, "quantization_config", None) is not None:
self.quantization_config = self.text_config.quantization_config
super().__init__(pad_token_id=pad_token_id, **kwargs)
@property
def hidden_size(self) -> int:
"""Get hidden size from text config for compatibility."""
return self.text_config.hidden_size
@property
def vocab_size(self) -> int:
"""Get vocab size from text config for compatibility."""
return self.text_config.vocab_size
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/transformers_utils/configs/kimi_k25.py",
"license": "Apache License 2.0",
"lines": 111,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/fused_moe/flashinfer_a2a_prepare_finalize.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import torch
import vllm.model_executor.layers.fused_moe.modular_kernel as mk
from vllm.distributed import get_ep_group
from vllm.distributed.device_communicators.base_device_communicator import (
All2AllManagerBase,
)
from vllm.forward_context import get_forward_context
from vllm.model_executor.layers.fused_moe.config import FusedMoEQuantConfig
from vllm.model_executor.layers.fused_moe.utils import moe_kernel_quantize_input
from vllm.utils.flashinfer import nvfp4_block_scale_interleave
def get_local_sizes():
return get_forward_context().dp_metadata.get_chunk_sizes_across_dp_rank()
class FlashInferA2APrepareAndFinalize(mk.FusedMoEPrepareAndFinalize):
"""Base class for FlashInfer MoE prepare and finalize operations."""
def __init__(
self,
num_dispatchers: int = 1,
):
super().__init__()
self.num_dispatchers_ = num_dispatchers
self.all2all_manager = get_ep_group().device_communicator.all2all_manager
@property
def activation_format(self) -> mk.FusedMoEActivationFormat:
return mk.FusedMoEActivationFormat.Standard
def max_num_tokens_per_rank(self) -> int | None:
return None
def topk_indices_dtype(self) -> torch.dtype | None:
return None
def num_dispatchers(self) -> int:
return self.num_dispatchers_
def output_is_reduced(self) -> bool:
return False
def _apply_router_weight_on_input(
self,
a1: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
apply_router_weight_on_input: bool,
) -> None:
"""Apply router weight on input if needed."""
if apply_router_weight_on_input:
topk = topk_ids.size(1)
assert topk == 1, (
"apply_router_weight_on_input is only implemented for topk=1"
)
a1.mul_(topk_weights.to(a1.dtype))
def prepare(
self,
a1: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
num_experts: int,
expert_map: torch.Tensor | None,
apply_router_weight_on_input: bool,
quant_config: FusedMoEQuantConfig,
defer_input_quant: bool = False,
) -> mk.PrepareResultType:
self._apply_router_weight_on_input(
a1, topk_weights, topk_ids, apply_router_weight_on_input
)
global_num_tokens_cpu = get_local_sizes()
top_k = topk_ids.size(1)
(self.alltoall_info, topk_ids, topk_weights, a1q, a1q_scale) = (
flashinfer_alltoall_dispatch(
self.all2all_manager,
global_num_tokens_cpu,
a1,
quant_config.a1_gscale,
topk_ids,
topk_weights,
top_k,
num_experts,
quant_config,
defer_input_quant=defer_input_quant,
)
)
return a1q, a1q_scale, None, topk_ids, topk_weights
def finalize(
self,
output: torch.Tensor,
fused_expert_output: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
apply_router_weight_on_input: bool,
weight_and_reduce_impl: mk.TopKWeightAndReduce,
) -> None:
top_k = topk_ids.size(1)
token_count = output.shape[0]
fused_expert_output = flashinfer_alltoall_combine(
self.all2all_manager,
fused_expert_output,
top_k=top_k,
token_count=token_count,
alltoall_info=self.alltoall_info,
)
output.copy_(fused_expert_output)
def flashinfer_alltoall_dispatch(
all2all_manager: All2AllManagerBase,
global_num_tokens_cpu: list[int],
x: torch.Tensor,
gs: torch.Tensor,
topk_ids: torch.Tensor,
topk_weights: torch.Tensor,
top_k: int,
num_experts: int,
quant_config: FusedMoEQuantConfig,
defer_input_quant: bool = False,
):
from flashinfer.comm.trtllm_alltoall import MnnvlMoe
assert all2all_manager.ensure_alltoall_workspace_initialized(), (
"FlashInfer AllToAll workspace not available"
)
ep_rank = all2all_manager.rank
ep_size = all2all_manager.world_size
max_num_token = (
max(global_num_tokens_cpu) if global_num_tokens_cpu is not None else x.shape[0]
)
orig_topk_weights_dtype = topk_weights.dtype
alltoall_info, topk_ids, topk_weights, _ = (
MnnvlMoe.mnnvl_moe_alltoallv_prepare_without_allgather(
topk_ids,
topk_weights,
None,
all2all_manager.prepare_workspace_tensor,
max_num_token,
ep_rank,
ep_size,
num_experts,
num_experts,
top_k,
)
)
topk_weights = topk_weights.view(dtype=orig_topk_weights_dtype)
if not defer_input_quant:
x, x_sf = moe_kernel_quantize_input(
x,
gs,
quant_config.quant_dtype,
quant_config.per_act_token_quant,
quant_config.block_shape,
# NOTE: swizzling pads the scales to multiple of 128
# which makes the scales tensor different shape than
# the hidden states, breaking the A2A kernel. So, we
# delay the swizzling until after the A2A.
is_fp4_scale_swizzled=False,
)
x = MnnvlMoe.mnnvl_moe_alltoallv(
x,
alltoall_info,
all2all_manager.workspace_tensor,
ep_rank,
ep_size,
)
x_sf = MnnvlMoe.mnnvl_moe_alltoallv(
x_sf,
alltoall_info,
all2all_manager.workspace_tensor,
ep_rank,
ep_size,
)
# Swizzle after the A2A if nvfp4.
if quant_config.quant_dtype == "nvfp4":
if x_sf.element_size() == 1:
x_sf = x_sf.view(torch.uint8)
x_sf = nvfp4_block_scale_interleave(x_sf)
else:
# Block-scale path: pass activations through without quantization
x_sf = None
x = MnnvlMoe.mnnvl_moe_alltoallv(
x,
alltoall_info,
all2all_manager.workspace_tensor,
ep_rank,
ep_size,
)
return alltoall_info, topk_ids, topk_weights, x, x_sf
def flashinfer_alltoall_combine(
all2all_manager: All2AllManagerBase,
output: torch.Tensor,
top_k: int,
token_count: int,
alltoall_info,
):
from flashinfer.comm.trtllm_alltoall import MnnvlMoe
assert all2all_manager.ensure_alltoall_workspace_initialized(), (
"FlashInfer AllToAll workspace not available"
)
return MnnvlMoe.mnnvl_moe_alltoallv_combine(
output,
alltoall_info,
all2all_manager.workspace_tensor,
ep_rank=all2all_manager.rank,
ep_size=all2all_manager.world_size,
top_k=top_k,
token_count=token_count,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/fused_moe/flashinfer_a2a_prepare_finalize.py",
"license": "Apache License 2.0",
"lines": 198,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/test_access_log_filter.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Tests for the UvicornAccessLogFilter class.
"""
import logging
from vllm.logging_utils.access_log_filter import (
UvicornAccessLogFilter,
create_uvicorn_log_config,
)
class TestUvicornAccessLogFilter:
"""Test cases for UvicornAccessLogFilter."""
def test_filter_allows_all_when_no_excluded_paths(self):
"""Filter should allow all logs when no paths are excluded."""
filter = UvicornAccessLogFilter(excluded_paths=[])
record = logging.LogRecord(
name="uvicorn.access",
level=logging.INFO,
pathname="",
lineno=0,
msg='%s - "%s %s HTTP/%s" %d',
args=("127.0.0.1:12345", "GET", "/v1/completions", "1.1", 200),
exc_info=None,
)
assert filter.filter(record) is True
def test_filter_allows_all_when_excluded_paths_is_none(self):
"""Filter should allow all logs when excluded_paths is None."""
filter = UvicornAccessLogFilter(excluded_paths=None)
record = logging.LogRecord(
name="uvicorn.access",
level=logging.INFO,
pathname="",
lineno=0,
msg='%s - "%s %s HTTP/%s" %d',
args=("127.0.0.1:12345", "GET", "/health", "1.1", 200),
exc_info=None,
)
assert filter.filter(record) is True
def test_filter_excludes_health_endpoint(self):
"""Filter should exclude /health endpoint when configured."""
filter = UvicornAccessLogFilter(excluded_paths=["/health"])
record = logging.LogRecord(
name="uvicorn.access",
level=logging.INFO,
pathname="",
lineno=0,
msg='%s - "%s %s HTTP/%s" %d',
args=("127.0.0.1:12345", "GET", "/health", "1.1", 200),
exc_info=None,
)
assert filter.filter(record) is False
def test_filter_excludes_metrics_endpoint(self):
"""Filter should exclude /metrics endpoint when configured."""
filter = UvicornAccessLogFilter(excluded_paths=["/metrics"])
record = logging.LogRecord(
name="uvicorn.access",
level=logging.INFO,
pathname="",
lineno=0,
msg='%s - "%s %s HTTP/%s" %d',
args=("127.0.0.1:12345", "GET", "/metrics", "1.1", 200),
exc_info=None,
)
assert filter.filter(record) is False
def test_filter_allows_non_excluded_endpoints(self):
"""Filter should allow endpoints not in the excluded list."""
filter = UvicornAccessLogFilter(excluded_paths=["/health", "/metrics"])
record = logging.LogRecord(
name="uvicorn.access",
level=logging.INFO,
pathname="",
lineno=0,
msg='%s - "%s %s HTTP/%s" %d',
args=("127.0.0.1:12345", "POST", "/v1/completions", "1.1", 200),
exc_info=None,
)
assert filter.filter(record) is True
def test_filter_excludes_multiple_endpoints(self):
"""Filter should exclude multiple configured endpoints."""
filter = UvicornAccessLogFilter(excluded_paths=["/health", "/metrics", "/ping"])
# Test /health
record_health = logging.LogRecord(
name="uvicorn.access",
level=logging.INFO,
pathname="",
lineno=0,
msg='%s - "%s %s HTTP/%s" %d',
args=("127.0.0.1:12345", "GET", "/health", "1.1", 200),
exc_info=None,
)
assert filter.filter(record_health) is False
# Test /metrics
record_metrics = logging.LogRecord(
name="uvicorn.access",
level=logging.INFO,
pathname="",
lineno=0,
msg='%s - "%s %s HTTP/%s" %d',
args=("127.0.0.1:12345", "GET", "/metrics", "1.1", 200),
exc_info=None,
)
assert filter.filter(record_metrics) is False
# Test /ping
record_ping = logging.LogRecord(
name="uvicorn.access",
level=logging.INFO,
pathname="",
lineno=0,
msg='%s - "%s %s HTTP/%s" %d',
args=("127.0.0.1:12345", "GET", "/ping", "1.1", 200),
exc_info=None,
)
assert filter.filter(record_ping) is False
def test_filter_with_query_parameters(self):
"""Filter should exclude endpoints even with query parameters."""
filter = UvicornAccessLogFilter(excluded_paths=["/health"])
record = logging.LogRecord(
name="uvicorn.access",
level=logging.INFO,
pathname="",
lineno=0,
msg='%s - "%s %s HTTP/%s" %d',
args=("127.0.0.1:12345", "GET", "/health?verbose=true", "1.1", 200),
exc_info=None,
)
assert filter.filter(record) is False
def test_filter_different_http_methods(self):
"""Filter should exclude endpoints regardless of HTTP method."""
filter = UvicornAccessLogFilter(excluded_paths=["/ping"])
# Test GET
record_get = logging.LogRecord(
name="uvicorn.access",
level=logging.INFO,
pathname="",
lineno=0,
msg='%s - "%s %s HTTP/%s" %d',
args=("127.0.0.1:12345", "GET", "/ping", "1.1", 200),
exc_info=None,
)
assert filter.filter(record_get) is False
# Test POST
record_post = logging.LogRecord(
name="uvicorn.access",
level=logging.INFO,
pathname="",
lineno=0,
msg='%s - "%s %s HTTP/%s" %d',
args=("127.0.0.1:12345", "POST", "/ping", "1.1", 200),
exc_info=None,
)
assert filter.filter(record_post) is False
def test_filter_with_different_status_codes(self):
"""Filter should exclude endpoints regardless of status code."""
filter = UvicornAccessLogFilter(excluded_paths=["/health"])
for status_code in [200, 500, 503]:
record = logging.LogRecord(
name="uvicorn.access",
level=logging.INFO,
pathname="",
lineno=0,
msg='%s - "%s %s HTTP/%s" %d',
args=("127.0.0.1:12345", "GET", "/health", "1.1", status_code),
exc_info=None,
)
assert filter.filter(record) is False
class TestCreateUvicornLogConfig:
"""Test cases for create_uvicorn_log_config function."""
def test_creates_valid_config_structure(self):
"""Config should have required logging configuration keys."""
config = create_uvicorn_log_config(excluded_paths=["/health"])
assert "version" in config
assert config["version"] == 1
assert "disable_existing_loggers" in config
assert "formatters" in config
assert "handlers" in config
assert "loggers" in config
assert "filters" in config
def test_config_includes_access_log_filter(self):
"""Config should include the access log filter."""
config = create_uvicorn_log_config(excluded_paths=["/health", "/metrics"])
assert "access_log_filter" in config["filters"]
filter_config = config["filters"]["access_log_filter"]
assert filter_config["()"] == UvicornAccessLogFilter
assert filter_config["excluded_paths"] == ["/health", "/metrics"]
def test_config_applies_filter_to_access_handler(self):
"""Config should apply the filter to the access handler."""
config = create_uvicorn_log_config(excluded_paths=["/health"])
assert "access" in config["handlers"]
assert "filters" in config["handlers"]["access"]
assert "access_log_filter" in config["handlers"]["access"]["filters"]
def test_config_with_custom_log_level(self):
"""Config should respect custom log level."""
config = create_uvicorn_log_config(
excluded_paths=["/health"], log_level="debug"
)
assert config["loggers"]["uvicorn"]["level"] == "DEBUG"
assert config["loggers"]["uvicorn.access"]["level"] == "DEBUG"
assert config["loggers"]["uvicorn.error"]["level"] == "DEBUG"
def test_config_with_empty_excluded_paths(self):
"""Config should work with empty excluded paths."""
config = create_uvicorn_log_config(excluded_paths=[])
assert config["filters"]["access_log_filter"]["excluded_paths"] == []
def test_config_with_none_excluded_paths(self):
"""Config should work with None excluded paths."""
config = create_uvicorn_log_config(excluded_paths=None)
assert config["filters"]["access_log_filter"]["excluded_paths"] == []
class TestIntegration:
"""Integration tests for the access log filter."""
def test_filter_with_real_logger(self):
"""Test filter works with a real Python logger simulating uvicorn."""
# Create a logger with our filter (simulating uvicorn.access)
logger = logging.getLogger("uvicorn.access")
logger.setLevel(logging.INFO)
# Clear any existing handlers
logger.handlers = []
# Create a custom handler that tracks messages
logged_messages: list[str] = []
class TrackingHandler(logging.Handler):
def emit(self, record):
logged_messages.append(record.getMessage())
handler = TrackingHandler()
handler.setLevel(logging.INFO)
filter = UvicornAccessLogFilter(excluded_paths=["/health", "/metrics"])
handler.addFilter(filter)
logger.addHandler(handler)
# Log using uvicorn's format with args tuple
# Format: '%s - "%s %s HTTP/%s" %d'
logger.info(
'%s - "%s %s HTTP/%s" %d',
"127.0.0.1:12345",
"GET",
"/health",
"1.1",
200,
)
logger.info(
'%s - "%s %s HTTP/%s" %d',
"127.0.0.1:12345",
"GET",
"/v1/completions",
"1.1",
200,
)
logger.info(
'%s - "%s %s HTTP/%s" %d',
"127.0.0.1:12345",
"GET",
"/metrics",
"1.1",
200,
)
logger.info(
'%s - "%s %s HTTP/%s" %d',
"127.0.0.1:12345",
"POST",
"/v1/chat/completions",
"1.1",
200,
)
# Verify only non-excluded endpoints were logged
assert len(logged_messages) == 2
assert "/v1/completions" in logged_messages[0]
assert "/v1/chat/completions" in logged_messages[1]
def test_filter_allows_non_uvicorn_access_logs(self):
"""Test filter allows logs from non-uvicorn.access loggers."""
filter = UvicornAccessLogFilter(excluded_paths=["/health"])
# Log record from a different logger name
record = logging.LogRecord(
name="uvicorn.error",
level=logging.INFO,
pathname="",
lineno=0,
msg="Some error message about /health",
args=(),
exc_info=None,
)
# Should allow because it's not from uvicorn.access
assert filter.filter(record) is True
def test_filter_handles_malformed_args(self):
"""Test filter handles log records with unexpected args format."""
filter = UvicornAccessLogFilter(excluded_paths=["/health"])
# Log record with insufficient args
record = logging.LogRecord(
name="uvicorn.access",
level=logging.INFO,
pathname="",
lineno=0,
msg="Some message",
args=("only", "two"),
exc_info=None,
)
# Should allow because args doesn't have expected format
assert filter.filter(record) is True
def test_filter_handles_non_tuple_args(self):
"""Test filter handles log records with non-tuple args."""
filter = UvicornAccessLogFilter(excluded_paths=["/health"])
# Log record with None args
record = logging.LogRecord(
name="uvicorn.access",
level=logging.INFO,
pathname="",
lineno=0,
msg="Some message without args",
args=None,
exc_info=None,
)
# Should allow because args is None
assert filter.filter(record) is True
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/test_access_log_filter.py",
"license": "Apache License 2.0",
"lines": 308,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/logging_utils/access_log_filter.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Access log filter for uvicorn to exclude specific endpoints from logging.
This module provides a logging filter that can be used to suppress access logs
for specific endpoints (e.g., /health, /metrics) to reduce log noise in
production environments.
"""
import logging
from urllib.parse import urlparse
class UvicornAccessLogFilter(logging.Filter):
"""
A logging filter that excludes access logs for specified endpoint paths.
This filter is designed to work with uvicorn's access logger. It checks
the log record's arguments for the request path and filters out records
matching the excluded paths.
Uvicorn access log format:
'%s - "%s %s HTTP/%s" %d'
(client_addr, method, path, http_version, status_code)
Example:
127.0.0.1:12345 - "GET /health HTTP/1.1" 200
Args:
excluded_paths: A list of URL paths to exclude from logging.
Paths are matched exactly.
Example: ["/health", "/metrics"]
"""
def __init__(self, excluded_paths: list[str] | None = None):
super().__init__()
self.excluded_paths = set(excluded_paths or [])
def filter(self, record: logging.LogRecord) -> bool:
"""
Determine if the log record should be logged.
Args:
record: The log record to evaluate.
Returns:
True if the record should be logged, False otherwise.
"""
if not self.excluded_paths:
return True
# This filter is specific to uvicorn's access logs.
if record.name != "uvicorn.access":
return True
# The path is the 3rd argument in the log record's args tuple.
# See uvicorn's access logging implementation for details.
log_args = record.args
if isinstance(log_args, tuple) and len(log_args) >= 3:
path_with_query = log_args[2]
# Get path component without query string.
if isinstance(path_with_query, str):
path = urlparse(path_with_query).path
if path in self.excluded_paths:
return False
return True
def create_uvicorn_log_config(
excluded_paths: list[str] | None = None,
log_level: str = "info",
) -> dict:
"""
Create a uvicorn logging configuration with access log filtering.
This function generates a logging configuration dictionary that can be
passed to uvicorn's `log_config` parameter. It sets up the access log
filter to exclude specified paths.
Args:
excluded_paths: List of URL paths to exclude from access logs.
log_level: The log level for uvicorn loggers.
Returns:
A dictionary containing the logging configuration.
Example:
>>> config = create_uvicorn_log_config(["/health", "/metrics"])
>>> uvicorn.run(app, log_config=config)
"""
config = {
"version": 1,
"disable_existing_loggers": False,
"filters": {
"access_log_filter": {
"()": UvicornAccessLogFilter,
"excluded_paths": excluded_paths or [],
},
},
"formatters": {
"default": {
"()": "uvicorn.logging.DefaultFormatter",
"fmt": "%(levelprefix)s %(message)s",
"use_colors": None,
},
"access": {
"()": "uvicorn.logging.AccessFormatter",
"fmt": '%(levelprefix)s %(client_addr)s - "%(request_line)s" %(status_code)s', # noqa: E501
},
},
"handlers": {
"default": {
"formatter": "default",
"class": "logging.StreamHandler",
"stream": "ext://sys.stderr",
},
"access": {
"formatter": "access",
"class": "logging.StreamHandler",
"stream": "ext://sys.stdout",
"filters": ["access_log_filter"],
},
},
"loggers": {
"uvicorn": {
"handlers": ["default"],
"level": log_level.upper(),
"propagate": False,
},
"uvicorn.error": {
"level": log_level.upper(),
"handlers": ["default"],
"propagate": False,
},
"uvicorn.access": {
"handlers": ["access"],
"level": log_level.upper(),
"propagate": False,
},
},
}
return config
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/logging_utils/access_log_filter.py",
"license": "Apache License 2.0",
"lines": 123,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/glm_ocr.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# Adapted from
# https://github.com/huggingface/transformers/blob/main/src/transformers/models/Glm4v/modeling_Glm4v.py
# Copyright 2026 The ZhipuAI Team.
# Copyright 2026 The vLLM team.
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
# All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inference-only GLM-OCR model compatible with HuggingFace weights."""
from collections.abc import Callable
from functools import partial
from typing import TYPE_CHECKING
import torch
import torch.nn as nn
from einops import rearrange
if TYPE_CHECKING:
from transformers.models.glm_ocr.configuration_glm_ocr import GlmOcrVisionConfig
from vllm.config import VllmConfig
from vllm.distributed import get_tensor_model_parallel_world_size, parallel_state
from vllm.distributed import utils as dist_utils
from vllm.logger import init_logger
from vllm.model_executor.layers.attention.mm_encoder_attention import (
MMEncoderAttention,
)
from vllm.model_executor.layers.conv import Conv2dLayer
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.linear import (
QKVParallelLinear,
RowParallelLinear,
)
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.rotary_embedding import get_rope
from vllm.model_executor.layers.rotary_embedding.common import (
ApplyRotaryEmb,
)
from vllm.model_executor.models.glm4_1v import (
Glm4vDummyInputsBuilder,
Glm4vForConditionalGeneration,
Glm4vMultiModalProcessor,
Glm4vPatchMerger,
Glm4vProcessingInfo,
Glm4vVisionBlock,
Glm4vVisionMLP,
Glm4vVisionPatchEmbed,
Glm4vVisionTransformer,
)
from vllm.multimodal import MULTIMODAL_REGISTRY
from .utils import (
maybe_prefix,
)
from .vision import (
get_vit_attn_backend,
is_vit_use_data_parallel,
)
logger = init_logger(__name__)
class GlmOcrVisionMLP(Glm4vVisionMLP):
pass
class GlmOcrVisionAttention(nn.Module):
def __init__(
self,
embed_dim: int,
num_heads: int,
projection_size: int,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
# Per attention head and per partition values.
use_data_parallel = is_vit_use_data_parallel()
self.tp_size = (
1 if use_data_parallel else get_tensor_model_parallel_world_size()
)
self.tp_rank = (
0 if use_data_parallel else parallel_state.get_tensor_model_parallel_rank()
)
self.hidden_size_per_attention_head = dist_utils.divide(
projection_size, num_heads
)
self.num_attention_heads_per_partition = dist_utils.divide(
num_heads, self.tp_size
)
self.head_dim = embed_dim // num_heads
self.q_norm = RMSNorm(self.head_dim, eps=1e-5)
self.k_norm = RMSNorm(self.head_dim, eps=1e-5)
self.qkv = QKVParallelLinear(
hidden_size=embed_dim,
head_size=self.hidden_size_per_attention_head,
total_num_heads=num_heads,
total_num_kv_heads=num_heads,
bias=True,
quant_config=quant_config,
# Change qkv prefix to align with GLM-4.5V-FP8 quantization cfg
prefix=f"{prefix}.qkv_proj" if quant_config else f"{prefix}.qkv",
disable_tp=use_data_parallel,
)
self.proj = RowParallelLinear(
input_size=projection_size,
output_size=embed_dim,
quant_config=quant_config,
prefix=f"{prefix}.proj",
bias=True,
disable_tp=use_data_parallel,
)
self.attn = MMEncoderAttention(
num_heads=self.num_attention_heads_per_partition,
head_size=self.hidden_size_per_attention_head,
scale=self.hidden_size_per_attention_head**-0.5,
prefix=f"{prefix}.attn",
)
self.apply_rotary_emb = ApplyRotaryEmb(enforce_enable=True)
def split_qkv(self, qkv: torch.Tensor) -> tuple[torch.Tensor, ...]:
# [s, b, 3 * head * head_dim]
seq_len, bs, _ = qkv.shape
# [s, b, 3 * head * head_dim] -> 3 * [s, b, head * head_dim]
q, k, v = qkv.chunk(3, dim=2)
# 3 * [s, b, head * head_dim] -> 3 * [s, b, head, head_dim]
new_shape = (
seq_len,
bs,
self.num_attention_heads_per_partition,
self.hidden_size_per_attention_head,
)
q, k, v = (x.view(*new_shape) for x in (q, k, v))
return q, k, v
def forward(
self,
x: torch.Tensor,
cu_seqlens: torch.Tensor,
rotary_pos_emb_cos: torch.Tensor,
rotary_pos_emb_sin: torch.Tensor,
max_seqlen: torch.Tensor | None = None, # Only used for Flash Attention
) -> torch.Tensor:
# [s, b, c] --> [s, b, head * 3 * head_dim]
x, _ = self.qkv(x)
# [s, b, 3 * head * head_dim] -> 3 * [s, b, head, head_dim]
q, k, v = self.split_qkv(x)
# RMSNorm on q, k
q_shape, k_shape = q.shape, k.shape
q = self.q_norm(q.reshape(-1, self.head_dim)).view(q_shape)
k = self.k_norm(k.reshape(-1, self.head_dim)).view(k_shape)
q, k, v = (rearrange(x, "s b ... -> b s ...").contiguous() for x in (q, k, v))
if rotary_pos_emb_cos is not None and rotary_pos_emb_sin is not None:
# [2 * b, s, heads, head_dim]
qk_concat = torch.cat([q, k], dim=0)
qk_rotated = self.apply_rotary_emb(
qk_concat,
rotary_pos_emb_cos,
rotary_pos_emb_sin,
)
q, k = torch.chunk(qk_rotated, 2, dim=0)
context_layer = self.attn(
query=q,
key=k,
value=v,
cu_seqlens=cu_seqlens,
max_seqlen=max_seqlen,
)
context_layer = rearrange(context_layer, "b s h d -> s b (h d)").contiguous()
output, _ = self.proj(context_layer)
return output
class GlmOcrVisionBlock(Glm4vVisionBlock):
def __init__(
self,
dim: int,
num_heads: int,
mlp_hidden_dim: int,
norm_layer: Callable[[int], nn.Module] | None = None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__(
dim,
num_heads,
mlp_hidden_dim,
norm_layer,
quant_config,
prefix,
)
if norm_layer is None:
norm_layer = partial(nn.LayerNorm, eps=1e-6)
self.norm1 = norm_layer(dim)
self.norm2 = norm_layer(dim)
self.attn = GlmOcrVisionAttention(
embed_dim=dim,
num_heads=num_heads,
projection_size=dim,
quant_config=quant_config,
prefix=f"{prefix}.attn",
)
self.mlp = GlmOcrVisionMLP(
dim,
mlp_hidden_dim,
bias=True,
quant_config=quant_config,
prefix=f"{prefix}.mlp",
)
class GlmOcrVisionPatchEmbed(Glm4vVisionPatchEmbed):
pass
class GlmOcrPatchMerger(Glm4vPatchMerger):
pass
class GlmOcrVisionTransformer(Glm4vVisionTransformer):
def __init__(
self,
vision_config: "GlmOcrVisionConfig",
norm_eps: float = 1e-5,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__(vision_config, norm_eps, quant_config, prefix)
del self.post_conv_layernorm
del self.embeddings
patch_size = vision_config.patch_size
temporal_patch_size = vision_config.temporal_patch_size
in_channels = vision_config.in_channels
depth = vision_config.depth
self.hidden_size = vision_config.hidden_size
self.num_heads = vision_config.num_heads
self.patch_size = vision_config.patch_size
self.spatial_merge_size = vision_config.spatial_merge_size
self.out_hidden_size = vision_config.out_hidden_size
self.patch_embed = Glm4vVisionPatchEmbed(
patch_size=patch_size,
temporal_patch_size=temporal_patch_size,
in_channels=in_channels,
hidden_size=self.hidden_size,
)
norm_layer = partial(RMSNorm, eps=norm_eps)
head_dim = self.hidden_size // self.num_heads
self.rotary_pos_emb = get_rope(
head_size=head_dim,
max_position=8192,
is_neox_style=True,
rope_parameters={"partial_rotary_factor": 0.5},
)
self.blocks = nn.ModuleList(
[
GlmOcrVisionBlock(
dim=self.hidden_size,
num_heads=self.num_heads,
mlp_hidden_dim=vision_config.intermediate_size,
norm_layer=norm_layer,
quant_config=quant_config,
prefix=f"{prefix}.blocks.{layer_idx}",
)
for layer_idx in range(depth)
]
)
self.merger = GlmOcrPatchMerger(
d_model=vision_config.out_hidden_size,
context_dim=vision_config.out_hidden_size * vision_config.in_channels,
quant_config=quant_config,
bias=False,
prefix=f"{prefix}.merger",
)
self.downsample = Conv2dLayer(
in_channels=vision_config.hidden_size,
out_channels=vision_config.out_hidden_size,
kernel_size=vision_config.spatial_merge_size,
stride=vision_config.spatial_merge_size,
)
self.post_layernorm = RMSNorm(
vision_config.hidden_size, eps=vision_config.rms_norm_eps
)
self.attn_backend = get_vit_attn_backend(
head_size=head_dim,
dtype=torch.get_default_dtype(),
)
def forward(
self,
x: torch.Tensor,
grid_thw: torch.Tensor | list[list[int]],
) -> torch.Tensor:
if isinstance(grid_thw, list):
grid_thw = torch.tensor(grid_thw, dtype=torch.int32)
# patchify
x = x.to(device=self.device, dtype=self.dtype)
x = self.patch_embed(x)
# compute position embedding
rotary_pos_emb_cos, rotary_pos_emb_sin, image_type_ids = self.rot_pos_emb(
grid_thw
)
# compute cu_seqlens
cu_seqlens = torch.repeat_interleave(
grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]
).cumsum(dim=0, dtype=torch.int32)
cu_seqlens = torch.cat([cu_seqlens.new_zeros(1), cu_seqlens])
cu_seqlens = cu_seqlens.to(self.device, non_blocking=True)
# pre-compute max_seqlen for attn mask to reduce cuMemcpy operations
max_seqlen = self.compute_attn_mask_seqlen(cu_seqlens)
# transformers
x = x.unsqueeze(1)
for blk in self.blocks:
x = blk(
x,
cu_seqlens=cu_seqlens,
rotary_pos_emb_cos=rotary_pos_emb_cos,
rotary_pos_emb_sin=rotary_pos_emb_sin,
max_seqlen=max_seqlen,
)
# adapter
x = self.post_layernorm(x)
x = x.view(-1, self.spatial_merge_size, self.spatial_merge_size, x.shape[-1])
x = x.permute(0, 3, 1, 2)
x = self.downsample(x).view(-1, self.out_hidden_size)
x = self.merger(x)
return x
@MULTIMODAL_REGISTRY.register_processor(
Glm4vMultiModalProcessor,
info=Glm4vProcessingInfo,
dummy_inputs=Glm4vDummyInputsBuilder,
)
class GlmOcrForConditionalGeneration(Glm4vForConditionalGeneration):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__(vllm_config=vllm_config, prefix=prefix)
config = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
with self._mark_tower_model(vllm_config, {"image", "video"}):
self.visual = GlmOcrVisionTransformer(
config.vision_config,
norm_eps=getattr(config, "rms_norm_eps", 1e-5),
quant_config=quant_config,
prefix=maybe_prefix(prefix, "visual"),
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/glm_ocr.py",
"license": "Apache License 2.0",
"lines": 340,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/glm_ocr_mtp.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# Copyright 2026 The ZhipuAI Team.
# Copyright 2026 The vLLM team.
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inference-only GLM-OCR MTP model compatible with HuggingFace weights."""
from collections.abc import Iterable
import torch
import torch.nn as nn
from vllm.config import VllmConfig
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.vocab_parallel_embedding import (
VocabParallelEmbedding,
)
from vllm.model_executor.model_loader.weight_utils import (
default_weight_loader,
maybe_remap_kv_scale_name,
)
from vllm.platforms import current_platform
from vllm.sequence import IntermediateTensors
from .glm4 import Glm4DecoderLayer, get_spec_layer_idx_from_weight_name
from .glm4_moe_lite_mtp import (
Glm4MoeLiteMultiTokenPredictor,
SharedHead,
)
from .interfaces import SupportsPP
from .utils import (
is_pp_missing_parameter,
maybe_prefix,
)
class GlmOcrMultiTokenPredictorLayer(nn.Module):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
nn.Module.__init__(self)
config = vllm_config.speculative_config.draft_model_config.hf_config.text_config
self.config = config
quant_config = vllm_config.quant_config
self.enorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.hnorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.eh_proj = nn.Linear(config.hidden_size * 2, config.hidden_size, bias=False)
self.device = current_platform.device_type
self.shared_head = SharedHead(
config=config, prefix=prefix, quant_config=quant_config
)
self.mtp_block = Glm4DecoderLayer(
vllm_config=vllm_config, prefix=prefix, config=self.config
)
def forward(
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
previous_hidden_states: torch.Tensor,
inputs_embeds: torch.Tensor | None = None,
spec_step_index: int = 0,
) -> torch.Tensor:
assert inputs_embeds is not None
# masking inputs at position 0, as not needed by MTP
inputs_embeds[positions[0] == 0] = 0
inputs_embeds = self.enorm(inputs_embeds)
previous_hidden_states = self.hnorm(previous_hidden_states)
hidden_states = self.eh_proj(
torch.cat([inputs_embeds, previous_hidden_states], dim=-1)
)
hidden_states, residual = self.mtp_block(
positions=positions, hidden_states=hidden_states, residual=None
)
hidden_states = residual + hidden_states
return hidden_states
class GlmOcrMultiTokenPredictor(Glm4MoeLiteMultiTokenPredictor):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
nn.Module.__init__(self)
config = vllm_config.model_config.hf_config.text_config
self.mtp_start_layer_idx = config.num_hidden_layers
self.num_mtp_layers = config.num_nextn_predict_layers
self.layers = torch.nn.ModuleDict(
{
str(idx): GlmOcrMultiTokenPredictorLayer(
vllm_config=vllm_config,
prefix=f"{prefix}.layers.{idx}",
)
for idx in range(
self.mtp_start_layer_idx,
self.mtp_start_layer_idx + self.num_mtp_layers,
)
}
)
self.embed_tokens = VocabParallelEmbedding(
config.vocab_size,
config.hidden_size,
)
self.logits_processor = LogitsProcessor(config.vocab_size)
class GlmOcrMTP(nn.Module, SupportsPP):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
self.config = vllm_config.model_config.hf_config.text_config
quant_config = vllm_config.quant_config
self.quant_config = quant_config
self.model = GlmOcrMultiTokenPredictor(
vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")
)
self.expert_weights = []
self.num_layers = self.config.num_nextn_predict_layers
for layer in self.model.layers.values():
assert isinstance(layer, GlmOcrMultiTokenPredictorLayer)
layer = layer.mtp_block
assert isinstance(layer, Glm4DecoderLayer)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.model.embed_input_ids(input_ids)
def forward(
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
hidden_states: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
spec_step_idx: int = 0,
) -> torch.Tensor:
hidden_states = self.model(
input_ids, positions, hidden_states, inputs_embeds, spec_step_idx
)
return hidden_states
def compute_logits(
self,
hidden_states: torch.Tensor,
spec_step_idx: int = 0,
) -> torch.Tensor | None:
return self.model.compute_logits(hidden_states, spec_step_idx)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
(".qkv_proj", ".q_proj", "q"),
(".qkv_proj", ".k_proj", "k"),
(".qkv_proj", ".v_proj", "v"),
(".gate_up_proj", ".gate_proj", 0),
(".gate_up_proj", ".up_proj", 1),
]
params_dict = dict(self.named_parameters())
loaded_params: set[str] = set()
for name, loaded_weight in weights:
if name == "lm_head.weight":
spec_layer = self.model.mtp_start_layer_idx
name = f"model.layers.{spec_layer}.shared_head.head.weight"
elif name == "model.embed_tokens.weight":
spec_layer = self.model.mtp_start_layer_idx
else:
spec_layer = get_spec_layer_idx_from_weight_name(self.config, name)
if spec_layer is None:
continue
name = self._rewrite_spec_layer_name(spec_layer, name)
if self.quant_config is not None and (
scale_name := self.quant_config.get_cache_scale(name)
):
# Loading kv cache quantization scales
param = params_dict[scale_name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
loaded_weight = (
loaded_weight if loaded_weight.dim() == 0 else loaded_weight[0]
)
weight_loader(param, loaded_weight)
loaded_params.add(scale_name)
continue
if "scale" in name or "zero_point" in name:
# Remapping the name of FP8 kv-scale or zero point.
name = maybe_remap_kv_scale_name(name, params_dict)
if name is None:
continue
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
if is_pp_missing_parameter(name, self):
continue
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
break
else:
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
# Some checkpoints include weight scale tensors for the
# LM head even when the quantized head isn't built. Skip
# them if the model does not expose a matching parameter
# to avoid KeyError during load.
if name.endswith(".weight_scale") and name not in params_dict:
continue
# According to DeepSeek-V3 Technical Report, MTP modules
# shares embedding layer. We only load the first weights.
if (
spec_layer != self.model.mtp_start_layer_idx
and ".layers" not in name
):
continue
if is_pp_missing_parameter(name, self):
continue
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
loaded_params.add(name)
return loaded_params
def _rewrite_spec_layer_name(self, spec_layer: int, name: str) -> str:
"""
Rewrite the weight name to match the format of the original model.
Add .mtp_block for modules in transformer layer block for spec layer
and rename shared layer weights to be top level.
"""
name = name.replace("model.language_model.layers", "model.layers")
spec_layer_weight_names = [
"embed_tokens",
"enorm",
"hnorm",
"eh_proj",
"shared_head",
]
shared_weight_names = ["embed_tokens"]
spec_layer_weight = False
shared_weight = False
for weight_name in spec_layer_weight_names:
if weight_name in name:
spec_layer_weight = True
if weight_name in shared_weight_names:
shared_weight = True
break
if not spec_layer_weight:
# treat rest weights as weights for transformer layer block
name = name.replace(
f"model.layers.{spec_layer}.", f"model.layers.{spec_layer}.mtp_block."
)
elif shared_weight:
# treat shared weights as top level weights
name = name.replace(f"model.layers.{spec_layer}.", "model.")
return name
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/glm_ocr_mtp.py",
"license": "Apache License 2.0",
"lines": 252,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/plugins/lora_resolvers/test_hf_hub_resolver.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import os
import pytest
from huggingface_hub.constants import HF_HUB_CACHE
from vllm.plugins.lora_resolvers.hf_hub_resolver import HfHubResolver
LORA_LIB_MODEL_NAME = "ibm-granite/granite-3.3-8b-instruct"
# Repo with multiple LoRAs contained in it
LORA_LIB = "ibm-granite/granite-3.3-8b-rag-agent-lib"
LORA_NAME = "ibm-granite/granite-3.3-8b-rag-agent-lib/answerability_prediction_lora" # noqa: E501
NON_LORA_SUBPATH = "ibm-granite/granite-3.3-8b-rag-agent-lib/README.md"
LIB_DOWNLOAD_DIR = os.path.join(
HF_HUB_CACHE, "models--ibm-granite--granite-3.3-8b-rag-agent-lib"
)
INVALID_REPO_NAME = "thisrepodoesnotexist"
# Repo with only one LoRA in the root dir
LORA_REPO_MODEL_NAME = "meta-llama/Llama-2-7b-hf"
LORA_REPO = "yard1/llama-2-7b-sql-lora-test"
REPO_DOWNLOAD_DIR = os.path.join(
HF_HUB_CACHE, "models--yard1--llama-2-7b-sql-lora-test"
)
@pytest.mark.asyncio
async def test_hf_resolver_with_direct_path():
hf_resolver = HfHubResolver([LORA_REPO])
assert hf_resolver is not None
lora_request = await hf_resolver.resolve_lora(LORA_REPO_MODEL_NAME, LORA_REPO)
assert lora_request.lora_name == LORA_REPO
assert REPO_DOWNLOAD_DIR in lora_request.lora_path
assert "adapter_config.json" in os.listdir(lora_request.lora_path)
@pytest.mark.asyncio
async def test_hf_resolver_with_nested_paths():
hf_resolver = HfHubResolver([LORA_LIB])
assert hf_resolver is not None
lora_request = await hf_resolver.resolve_lora(LORA_LIB_MODEL_NAME, LORA_NAME)
assert lora_request is not None
assert lora_request.lora_name == LORA_NAME
assert LIB_DOWNLOAD_DIR in lora_request.lora_path
assert "adapter_config.json" in os.listdir(lora_request.lora_path)
@pytest.mark.asyncio
async def test_hf_resolver_with_multiple_repos():
hf_resolver = HfHubResolver([LORA_LIB, LORA_REPO])
assert hf_resolver is not None
lora_request = await hf_resolver.resolve_lora(LORA_LIB_MODEL_NAME, LORA_NAME)
assert lora_request is not None
assert lora_request.lora_name == LORA_NAME
assert LIB_DOWNLOAD_DIR in lora_request.lora_path
assert "adapter_config.json" in os.listdir(lora_request.lora_path)
@pytest.mark.asyncio
async def test_missing_adapter():
hf_resolver = HfHubResolver([LORA_LIB])
assert hf_resolver is not None
missing_lora_request = await hf_resolver.resolve_lora(LORA_LIB_MODEL_NAME, "foobar")
assert missing_lora_request is None
@pytest.mark.asyncio
async def test_nonlora_adapter():
hf_resolver = HfHubResolver([LORA_LIB])
assert hf_resolver is not None
readme_request = await hf_resolver.resolve_lora(
LORA_LIB_MODEL_NAME, NON_LORA_SUBPATH
)
assert readme_request is None
@pytest.mark.asyncio
async def test_invalid_repo():
hf_resolver = HfHubResolver([LORA_LIB])
assert hf_resolver is not None
invalid_repo_req = await hf_resolver.resolve_lora(
INVALID_REPO_NAME,
f"{INVALID_REPO_NAME}/foo",
)
assert invalid_repo_req is None
@pytest.mark.asyncio
async def test_trailing_slash():
hf_resolver = HfHubResolver([LORA_LIB])
assert hf_resolver is not None
lora_request = await hf_resolver.resolve_lora(
LORA_LIB_MODEL_NAME,
f"{LORA_NAME}/",
)
assert lora_request is not None
assert lora_request.lora_name == f"{LORA_NAME}/"
assert LIB_DOWNLOAD_DIR in lora_request.lora_path
assert "adapter_config.json" in os.listdir(lora_request.lora_path)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/plugins/lora_resolvers/test_hf_hub_resolver.py",
"license": "Apache License 2.0",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/plugins/lora_resolvers/hf_hub_resolver.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import asyncio
import os
from huggingface_hub import HfApi, snapshot_download
import vllm.envs as envs
from vllm.logger import init_logger
from vllm.lora.request import LoRARequest
from vllm.lora.resolver import LoRAResolverRegistry
from vllm.plugins.lora_resolvers.filesystem_resolver import FilesystemResolver
logger = init_logger(__name__)
class HfHubResolver(FilesystemResolver):
def __init__(self, repo_list: list[str]):
logger.warning(
"LoRA is allowing resolution from the following repositories on"
" HF Hub: %s please note that allowing remote downloads"
" is not secure, and that this plugin is not intended for use in"
" production environments.",
repo_list,
)
self.repo_list: list[str] = repo_list
self.adapter_dirs: dict[str, set[str]] = {}
async def resolve_lora(
self, base_model_name: str, lora_name: str
) -> LoRARequest | None:
"""Resolves potential LoRA requests in a remote repo on HF Hub.
This is effectively the same behavior as the filesystem resolver, but
with a snapshot_download on dirs containing an adapter config prior
to inspecting the cached dir to build a potential LoRA
request.
"""
# If a LoRA name begins with the repository name, it's disambiguated
maybe_repo = await self._resolve_repo(lora_name)
# If we haven't inspected this repo before, save available adapter dirs
if maybe_repo is not None and maybe_repo not in self.adapter_dirs:
self.adapter_dirs[maybe_repo] = await self._get_adapter_dirs(maybe_repo)
maybe_subpath = await self._resolve_repo_subpath(lora_name, maybe_repo)
if maybe_repo is None or maybe_subpath is None:
return None
repo_path = await asyncio.to_thread(
snapshot_download,
repo_id=maybe_repo,
allow_patterns=f"{maybe_subpath}/*" if maybe_subpath != "." else "*",
)
lora_path = os.path.join(repo_path, maybe_subpath)
maybe_lora_request = await self._get_lora_req_from_path(
lora_name, lora_path, base_model_name
)
return maybe_lora_request
async def _resolve_repo(self, lora_name: str) -> str | None:
"""Given a fully qualified path to a LoRA with respect to its HF Hub
repo, match the right repo to potentially download from if one exists.
Args:
lora_name: Path to LoRA in HF Hub, e.g., <org>/<repo>/<subpath>,
match on <org>/<repo> (if it contains an adapter directly) or
<org>/<repo>/ if it may have one in subdirs.
"""
for potential_repo in self.repo_list:
if lora_name.startswith(potential_repo) and (
len(lora_name) == len(potential_repo)
or lora_name[len(potential_repo)] == "/"
):
return potential_repo
return None
async def _resolve_repo_subpath(
self, lora_name: str, maybe_repo: str | None
) -> str | None:
"""Given the fully qualified path of the LoRA with respect to the HF
Repo, get the subpath to download from assuming it's actually got an
adapter in it.
Args:
lora_name: Path to LoRA in HF Hub, e.g., <org>/<repo>/<subpath>
maybe_repo: Path to the repo to match against if one exists.
"""
if maybe_repo is None:
return None
repo_len = len(maybe_repo)
if lora_name == maybe_repo or (
len(lora_name) == repo_len + 1 and lora_name[-1] == "/"
):
# Resolves to the root of the directory
adapter_dir = "."
else:
# It's a subpath; removing trailing slashes if there are any
adapter_dir = lora_name[repo_len + 1 :].rstrip("/")
# Only download if the directory actually contains an adapter
is_adapter = adapter_dir in self.adapter_dirs[maybe_repo]
return adapter_dir if is_adapter else None
async def _get_adapter_dirs(self, repo_name: str) -> set[str]:
"""Gets the subpaths within a HF repo that contain an adapter config.
Args:
repo_name: Name of the HF hub repo to inspect.
"""
repo_files = await asyncio.to_thread(HfApi().list_repo_files, repo_id=repo_name)
adapter_dirs = {
os.path.dirname(name)
for name in repo_files
if name.endswith("adapter_config.json")
}
if "adapter_config.json" in repo_files:
adapter_dirs.add(".")
return adapter_dirs
def register_hf_hub_resolver():
"""Register the Hf hub LoRA Resolver with vLLM"""
hf_repo_list = envs.VLLM_LORA_RESOLVER_HF_REPO_LIST
is_enabled = (
envs.VLLM_PLUGINS is not None and "lora_hf_hub_resolver" in envs.VLLM_PLUGINS
)
if hf_repo_list:
if not is_enabled:
logger.warning(
"It appears that VLLM_LORA_RESOLVER_HF_REPO_LIST is set, but "
"lora_hf_hub_resolver is not enabled in VLLM_PLUGINS; you must"
" enable this resolver directly in VLLM_PLUGINS to use it "
" because it allows remote downloads."
)
else:
hf_hub_resolver = HfHubResolver(hf_repo_list.split(","))
LoRAResolverRegistry.register_resolver("Hf Hub Resolver", hf_hub_resolver)
return
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/plugins/lora_resolvers/hf_hub_resolver.py",
"license": "Apache License 2.0",
"lines": 120,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/v1/worker/gpu/lora_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import numpy as np
from vllm.lora.request import LoRARequest
NO_LORA_ID = 0
class LoraState:
def __init__(self, max_num_reqs: int):
self.lora_ids = np.zeros(max_num_reqs, dtype=np.int32)
self.lora_ids.fill(NO_LORA_ID)
# req_id -> lora_request
self.lora_requests: dict[str, LoRARequest] = {}
def add_request(
self, req_id: str, req_index: int, lora_request: LoRARequest | None
) -> None:
if lora_request is not None:
self.lora_requests[req_id] = lora_request
self.lora_ids[req_index] = lora_request.lora_int_id
else:
self.lora_ids[req_index] = NO_LORA_ID
def remove_request(self, req_id: str) -> None:
self.lora_requests.pop(req_id, None)
def make_lora_inputs(
self,
req_ids: list[str],
idx_mapping: np.ndarray,
num_scheduled_tokens: np.ndarray,
) -> tuple[tuple[int, ...], tuple[int, ...], set[LoRARequest]]:
lora_ids = self.lora_ids[idx_mapping]
prompt_lora_mapping = tuple(lora_ids)
token_lora_mapping = tuple(lora_ids.repeat(num_scheduled_tokens))
active_lora_requests: set[LoRARequest] = set()
for req_id in req_ids:
lora_request = self.lora_requests.get(req_id)
if lora_request is not None:
active_lora_requests.add(lora_request)
return prompt_lora_mapping, token_lora_mapping, active_lora_requests
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/worker/gpu/lora_utils.py",
"license": "Apache License 2.0",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/v1/e2e/test_streaming_input.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
End-to-end tests for the streaming input feature in AsyncLLM.
These tests verify that:
1. Streaming inputs work correctly with bunched inputs (queued)
2. Streaming inputs work correctly with spaced out inputs
3. Outputs are equivalent whether inputs are bunched or spaced
4. Cancelling the output stream correctly aborts the session
5. Closing the input stream correctly signals completion
6. Queued inputs are cancelled when the session is aborted
"""
import asyncio
from collections.abc import AsyncGenerator
import pytest
import pytest_asyncio
from vllm import SamplingParams
from vllm.engine.protocol import StreamingInput
from vllm.outputs import RequestOutput
from vllm.platforms import current_platform
from vllm.sampling_params import RequestOutputKind
from vllm.utils.torch_utils import set_default_torch_num_threads
from vllm.v1.engine.async_llm import AsyncLLM
if not current_platform.is_cuda():
pytest.skip(reason="V1 currently only supported on CUDA.", allow_module_level=True)
# Use a small model that doesn't require authentication for fast tests
MODEL = "facebook/opt-125m"
@pytest_asyncio.fixture(scope="module", loop_scope="module")
async def engine():
"""Create an AsyncLLM engine for the test.
Note: Using function scope because pytest_asyncio creates a new event loop
for each test, and the output_handler task gets cancelled between tests
with module scope.
"""
from vllm.engine.arg_utils import AsyncEngineArgs
engine_args = AsyncEngineArgs(
model=MODEL, enforce_eager=True, gpu_memory_utilization=0.7
)
with set_default_torch_num_threads(1):
engine = AsyncLLM.from_engine_args(engine_args)
try:
yield engine
finally:
engine.shutdown()
await asyncio.sleep(0.1)
def get_sampling_params(max_tokens: int = 20) -> SamplingParams:
"""Create sampling params for streaming input tests."""
return SamplingParams(
max_tokens=max_tokens,
ignore_eos=True,
output_kind=RequestOutputKind.DELTA,
temperature=0.0, # Deterministic for reproducibility
)
async def collect_outputs(
output_gen: AsyncGenerator[RequestOutput, None],
) -> tuple[list[RequestOutput], str]:
"""Collect all outputs from a generate call, return outputs and full text."""
outputs: list[RequestOutput] = []
full_text = ""
async for output in output_gen:
outputs.append(output)
if output.outputs and output.outputs[0].text:
full_text += output.outputs[0].text
return outputs, full_text
@pytest.mark.asyncio(loop_scope="module")
async def test_streaming_input_bunched(engine: AsyncLLM):
"""Test streaming input where all inputs are sent at once (bunched/queued).
This tests the case where multiple inputs arrive before any completes.
The inputs should be queued and processed in sequence.
"""
request_id = "test_bunched"
sampling_params = get_sampling_params(max_tokens=10)
# Create an input generator that yields all inputs quickly
async def bunched_input_generator() -> AsyncGenerator[StreamingInput, None]:
# Send multiple inputs rapidly - they should be queued
yield StreamingInput(prompt="Hello, my name is")
yield StreamingInput(prompt=" Alice and I like")
yield StreamingInput(prompt=" to code in Python")
outputs, full_text = await collect_outputs(
engine.generate(
bunched_input_generator(),
sampling_params,
request_id,
)
)
# Verify we got outputs
assert len(outputs) > 0, "Should have received outputs"
# Verify the final output is marked as finished
assert outputs[-1].finished, "Last output should be marked as finished"
# Verify intermediate outputs are not marked as finished
for output in outputs[:-1]:
assert not output.finished, "Intermediate outputs should not be finished"
# Verify we generated some text
assert len(full_text) > 0, "Should have generated text"
print(f"Bunched test generated: {full_text}")
@pytest.mark.asyncio(loop_scope="module")
async def test_streaming_input_spaced(engine: AsyncLLM):
"""Test streaming input where inputs are spaced out.
This tests the case where each input completes processing before the
next one is sent. Each chunk should be prefilled, generate tokens,
then the next chunk should be processed.
"""
request_id = "test_spaced"
sampling_params = get_sampling_params(max_tokens=10)
# Track when each input is sent
input_times: list[float] = []
outputs_per_chunk: list[int] = [0, 0, 0]
current_chunk = 0
async def spaced_input_generator() -> AsyncGenerator[StreamingInput, None]:
nonlocal current_chunk
import time
# First input
input_times.append(time.time())
yield StreamingInput(prompt="Hello, my name is")
current_chunk = 0
# Wait for some outputs to be generated
await asyncio.sleep(0.5)
# Second input
input_times.append(time.time())
current_chunk = 1
yield StreamingInput(prompt=" Alice and I like")
# Wait for some outputs
await asyncio.sleep(0.5)
# Third input
input_times.append(time.time())
current_chunk = 2
yield StreamingInput(prompt=" to code in Python")
outputs: list[RequestOutput] = []
full_text = ""
async for output in engine.generate(
spaced_input_generator(),
sampling_params,
request_id,
):
outputs.append(output)
if output.outputs and output.outputs[0].text:
full_text += output.outputs[0].text
outputs_per_chunk[current_chunk] += 1
# Verify we got outputs
assert len(outputs) > 0, "Should have received outputs"
# Verify the final output is marked as finished
assert outputs[-1].finished, "Last output should be marked as finished"
# Verify we received outputs from multiple chunks
# (with spaced inputs, we should see outputs distributed across chunks)
chunks_with_outputs = sum(1 for c in outputs_per_chunk if c > 0)
assert chunks_with_outputs >= 1, "Should have outputs from at least one chunk"
print(f"Spaced test generated: {full_text}")
print(f"Outputs per chunk: {outputs_per_chunk}")
@pytest.mark.asyncio(loop_scope="module")
async def test_streaming_input_output_equivalence(engine: AsyncLLM):
"""Test that bunched and spaced inputs produce equivalent outputs.
When the same prompts are provided either bunched or spaced,
the final concatenated output should be the same (with deterministic
sampling).
"""
prompts = ["Hello, my name is", " Bob and I work", " at Anthropic"]
sampling_params = get_sampling_params(max_tokens=15)
# Test bunched inputs
async def bunched_gen() -> AsyncGenerator[StreamingInput, None]:
for prompt in prompts:
yield StreamingInput(prompt=prompt)
_, bunched_text = await collect_outputs(
engine.generate(bunched_gen(), sampling_params, "equiv_bunched")
)
# Test spaced inputs (same prompts, but with delays)
async def spaced_gen() -> AsyncGenerator[StreamingInput, None]:
for prompt in prompts:
yield StreamingInput(prompt=prompt)
await asyncio.sleep(0.3)
_, spaced_text = await collect_outputs(
engine.generate(spaced_gen(), sampling_params, "equiv_spaced")
)
# Both should produce the same output since we use temperature=0
assert bunched_text == spaced_text, (
f"Bunched and spaced should produce same output.\n"
f"Bunched: {bunched_text!r}\n"
f"Spaced: {spaced_text!r}"
)
print(f"Equivalence test passed. Generated: {bunched_text}")
@pytest.mark.asyncio(loop_scope="module")
async def test_streaming_input_cancel_output_stream(engine: AsyncLLM):
"""Test that cancelling the output stream aborts the entire session.
When the consumer cancels iteration over the output generator,
the session should be aborted including any queued inputs.
"""
request_id = "test_cancel_output"
sampling_params = get_sampling_params(max_tokens=1000)
input_completed = asyncio.Event()
input_task_cancelled = False
async def slow_input_generator() -> AsyncGenerator[StreamingInput, None]:
nonlocal input_task_cancelled
try:
yield StreamingInput(prompt="Tell me a very long story about")
yield StreamingInput(prompt=" a dragon and a knight")
# This should be cancelled before we get here
await asyncio.sleep(10)
yield StreamingInput(prompt=" who become friends")
input_completed.set()
except asyncio.CancelledError:
input_task_cancelled = True
raise
outputs_received = 0
output_gen = engine.generate(slow_input_generator(), sampling_params, request_id)
# Collect a few outputs then cancel
try:
async for output in output_gen:
outputs_received += 1
if outputs_received >= 5:
# Cancel by breaking out of the loop (generator will be GC'd)
break
finally:
# Explicitly close the generator to ensure cleanup
await output_gen.aclose()
# Give time for cleanup
await asyncio.sleep(0.5)
# Verify we got some outputs before cancelling
assert outputs_received >= 5, "Should have received outputs before cancel"
# Verify the input task was cancelled
assert input_task_cancelled, "Input task should have been cancelled"
# Verify the session is properly cleaned up
assert not engine.output_processor.has_unfinished_requests(), (
"Should have no unfinished requests after cancel"
)
print(f"Cancel test passed. Received {outputs_received} outputs before cancel")
@pytest.mark.asyncio(loop_scope="module")
async def test_streaming_input_close_signals_completion(engine: AsyncLLM):
"""Test that closing the input stream signals completion.
When the input generator finishes (naturally or via return),
the session should complete with finished=True on the last output.
"""
request_id = "test_close_completion"
sampling_params = get_sampling_params(max_tokens=15)
input_generator_finished = False
async def limited_input_generator() -> AsyncGenerator[StreamingInput, None]:
nonlocal input_generator_finished
yield StreamingInput(prompt="What is 2 + 2? The answer is")
# Generator finishes naturally here
input_generator_finished = True
outputs, _ = await collect_outputs(
engine.generate(limited_input_generator(), sampling_params, request_id)
)
# Verify the input generator completed
assert input_generator_finished, "Input generator should have finished"
# Verify we got a finished output
assert len(outputs) > 0, "Should have received outputs"
assert outputs[-1].finished, "Last output should be marked as finished"
# Verify the session is cleaned up
assert not engine.output_processor.has_unfinished_requests(), (
"Should have no unfinished requests"
)
print("Close completion test passed")
@pytest.mark.asyncio(loop_scope="module")
async def test_streaming_input_abort_queued_inputs(engine: AsyncLLM):
"""Test that aborting the session cancels queued inputs.
When multiple inputs are queued and the session is aborted,
all pending inputs should be cancelled.
"""
request_id = "test_abort_queued"
# Use large max_tokens to ensure we have time to queue inputs
sampling_params = get_sampling_params(max_tokens=2000)
inputs_sent = 0
input_cancelled = False
async def many_inputs_generator() -> AsyncGenerator[StreamingInput, None]:
nonlocal inputs_sent, input_cancelled
try:
# Send several inputs to fill the queue
for i in range(10):
yield StreamingInput(prompt=f" Part {i}: Tell me about the number {i}.")
inputs_sent += 1
# Small delay to interleave with output processing
await asyncio.sleep(0.05)
except asyncio.CancelledError:
input_cancelled = True
raise
outputs_received = 0
output_gen = engine.generate(many_inputs_generator(), sampling_params, request_id)
try:
async for output in output_gen:
outputs_received += 1
# Cancel after receiving some outputs
if outputs_received >= 10:
break
finally:
await output_gen.aclose()
# Give time for cleanup
await asyncio.sleep(0.5)
# Verify we received some outputs
assert outputs_received >= 10, "Should have received outputs before abort"
# Verify the input generator was cancelled OR finished naturally
# (it might finish naturally if all inputs were sent before cancel)
assert input_cancelled or inputs_sent == 10, (
f"Input generator should have been cancelled or completed. "
f"cancelled={input_cancelled}, inputs_sent={inputs_sent}"
)
# Verify the session is cleaned up
assert not engine.output_processor.has_unfinished_requests(), (
"Should have no unfinished requests after abort"
)
print(
f"Abort queued test passed. Sent {inputs_sent} inputs, "
f"received {outputs_received} outputs"
)
@pytest.mark.asyncio(loop_scope="module")
async def test_streaming_input_error_propagation(engine: AsyncLLM):
"""Test that errors in the input generator are propagated to the caller."""
request_id = "test_error_propagation"
sampling_params = get_sampling_params(max_tokens=20)
class InputError(Exception):
pass
async def error_input_generator() -> AsyncGenerator[StreamingInput, None]:
yield StreamingInput(prompt="Start with this")
await asyncio.sleep(0.1)
raise InputError("Simulated input error")
# Note: The current implementation catches exceptions and puts them
# in the queue, so we should get the error when iterating outputs
with pytest.raises(InputError, match="Simulated input error"):
async for _ in engine.generate(
error_input_generator(), sampling_params, request_id
):
pass
# Give time for cleanup
await asyncio.sleep(0.3)
# Verify the session is cleaned up
assert not engine.output_processor.has_unfinished_requests(), (
"Should have no unfinished requests after error"
)
@pytest.mark.asyncio(loop_scope="module")
async def test_streaming_input_multiple_concurrent_sessions(engine: AsyncLLM):
"""Test multiple concurrent streaming input sessions.
Multiple streaming sessions should be able to run concurrently
without interfering with each other.
"""
num_sessions = 3
results: list[tuple[str, str]] = []
async def run_session(session_id: int) -> tuple[str, str]:
request_id = f"test_concurrent_{session_id}"
sampling_params = get_sampling_params(max_tokens=10)
prompts = [f"Session {session_id}: Hello", f" world from session {session_id}"]
async def input_gen() -> AsyncGenerator[StreamingInput, None]:
for prompt in prompts:
yield StreamingInput(prompt=prompt)
await asyncio.sleep(0.1)
_, text = await collect_outputs(
engine.generate(input_gen(), sampling_params, request_id)
)
return request_id, text
# Run sessions concurrently
tasks = [asyncio.create_task(run_session(i)) for i in range(num_sessions)]
results = await asyncio.gather(*tasks)
# Verify all sessions completed
assert len(results) == num_sessions
for request_id, text in results:
assert len(text) > 0, f"Session {request_id} should have generated text"
print(f"{request_id}: {text}")
# Verify cleanup
assert not engine.output_processor.has_unfinished_requests()
@pytest.mark.asyncio(loop_scope="module")
async def test_streaming_input_per_chunk_sampling_params(engine: AsyncLLM):
"""Test that per-chunk sampling params are respected.
Each StreamingInput can have its own sampling_params.
"""
request_id = "test_per_chunk_params"
base_params = get_sampling_params(max_tokens=10)
async def variable_params_generator() -> AsyncGenerator[StreamingInput, None]:
# First chunk with base params
yield StreamingInput(prompt="Count to five:", sampling_params=base_params)
# Second chunk with different max_tokens
chunk_params = get_sampling_params(max_tokens=5)
yield StreamingInput(
prompt=" Now count backwards:", sampling_params=chunk_params
)
outputs, full_text = await collect_outputs(
engine.generate(variable_params_generator(), base_params, request_id)
)
assert len(outputs) > 0, "Should have received outputs"
assert outputs[-1].finished, "Last output should be finished"
assert len(full_text) > 0, "Should have generated text"
print(f"Per-chunk params test generated: {full_text}")
@pytest.mark.asyncio(loop_scope="module")
async def test_streaming_input_empty_generator(engine: AsyncLLM):
"""Test behavior when the input generator yields nothing.
An empty generator should still produce a finished output.
"""
request_id = "test_empty_generator"
sampling_params = get_sampling_params(max_tokens=10)
async def empty_generator() -> AsyncGenerator[StreamingInput, None]:
# Don't yield anything
return
yield # Make it a generator
outputs: list[RequestOutput] = []
async for output in engine.generate(empty_generator(), sampling_params, request_id):
outputs.append(output)
# Should still get a finished marker
assert len(outputs) >= 1, "Should receive at least one output"
assert outputs[-1].finished, "Should have a finished output"
@pytest.mark.asyncio(loop_scope="module")
async def test_streaming_input_single_chunk(engine: AsyncLLM):
"""Test streaming input with a single chunk.
This is effectively the same as a regular non-streaming request,
but using the streaming input API.
"""
request_id = "test_single_chunk"
sampling_params = get_sampling_params(max_tokens=15)
async def single_chunk_generator() -> AsyncGenerator[StreamingInput, None]:
yield StreamingInput(prompt="What color is the sky? The sky is")
outputs, full_text = await collect_outputs(
engine.generate(single_chunk_generator(), sampling_params, request_id)
)
assert len(outputs) > 0
assert outputs[-1].finished
assert "blue" in full_text.lower() or len(full_text) > 0
print(f"Single chunk test generated: {full_text}")
@pytest.mark.asyncio(loop_scope="module")
async def test_streaming_input_reuse_request_id(engine: AsyncLLM):
"""Test that request IDs can be reused after a session completes."""
request_id = "test_reuse_id"
sampling_params = get_sampling_params(max_tokens=5)
# First session
async def gen1() -> AsyncGenerator[StreamingInput, None]:
yield StreamingInput(prompt="First session")
_, text1 = await collect_outputs(
engine.generate(gen1(), sampling_params, request_id)
)
# Second session with same ID
async def gen2() -> AsyncGenerator[StreamingInput, None]:
yield StreamingInput(prompt="Second session")
_, text2 = await collect_outputs(
engine.generate(gen2(), sampling_params, request_id)
)
assert len(text1) > 0
assert len(text2) > 0
assert not engine.output_processor.has_unfinished_requests()
print(f"Reuse ID test: session 1: {text1}, session 2: {text2}")
@pytest.mark.asyncio(loop_scope="module")
async def test_streaming_input_validation_errors(engine: AsyncLLM):
"""Test that invalid configurations raise appropriate errors."""
async def dummy_generator() -> AsyncGenerator[StreamingInput, None]:
yield StreamingInput(prompt="test")
# Test n > 1 is rejected
with pytest.raises(ValueError, match="Input streaming not currently supported"):
params_n2 = SamplingParams(max_tokens=10, n=2)
async for _ in engine.generate(dummy_generator(), params_n2, "test_n2"):
pass
# Test FINAL_ONLY is rejected
with pytest.raises(ValueError, match="Input streaming not currently supported"):
params_final = SamplingParams(
max_tokens=10, output_kind=RequestOutputKind.FINAL_ONLY
)
async for _ in engine.generate(dummy_generator(), params_final, "test_final"):
pass
# Test stop strings are rejected
with pytest.raises(ValueError, match="Input streaming not currently supported"):
params_stop = SamplingParams(max_tokens=10, stop=["stop"])
async for _ in engine.generate(dummy_generator(), params_stop, "test_stop"):
pass
@pytest.mark.asyncio(loop_scope="module")
async def test_streaming_input_delayed_generator_exit(engine: AsyncLLM):
"""Test that output generator exits when input generator closes after outputs.
This tests the case where:
1. Multiple inputs are sent and fully processed
2. The engine has finished
3. The input generator doesn't exit until after the engine finishes
4. The output generator should exit properly once the input generator exits
"""
request_id = "test_delayed_exit"
sampling_params = get_sampling_params(max_tokens=10)
engine_finished_event = asyncio.Event()
input_generator_exited = False
finish_count = 0
async def delayed_exit_input_generator() -> AsyncGenerator[StreamingInput, None]:
nonlocal input_generator_exited
# Send all inputs immediately
yield StreamingInput(prompt="Hello, my name is")
yield StreamingInput(prompt=" Alice")
# Wait until the engine has finished generating before exiting
await engine_finished_event.wait()
# Add a small delay to ensure we're testing the "delayed exit" case
await asyncio.sleep(0.1)
input_generator_exited = True
outputs: list[RequestOutput] = []
full_text = ""
async for output in engine.generate(
delayed_exit_input_generator(), sampling_params, request_id
):
outputs.append(output)
if output.outputs and output.outputs[0].text:
full_text += output.outputs[0].text
# Signal when the engine finishes both input chunks (each gets a finish_reason)
# Note: output.finished will be False while input stream is open
if output.outputs and output.outputs[0].finish_reason is not None:
finish_count += 1
if finish_count == 2:
engine_finished_event.set()
# Verify the input generator exited properly
assert input_generator_exited, (
"Input generator should have exited after engine finished"
)
# Verify we got outputs
assert len(outputs) > 0, "Should have received outputs"
# Verify we generated some text
assert len(full_text) > 0, "Should have generated text"
# Verify the session is cleaned up
assert not engine.output_processor.has_unfinished_requests(), (
"Should have no unfinished requests"
)
print(f"Delayed exit test passed. Generated: {full_text}")
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/e2e/test_streaming_input.py",
"license": "Apache License 2.0",
"lines": 506,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/v1/streaming_input/test_async_llm_streaming.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import asyncio
from collections.abc import AsyncGenerator
from unittest.mock import AsyncMock, MagicMock
import pytest
from vllm.engine.protocol import StreamingInput
from vllm.outputs import RequestOutput
from vllm.sampling_params import RequestOutputKind, SamplingParams
from vllm.v1.engine.async_llm import AsyncLLM
from vllm.v1.engine.output_processor import RequestOutputCollector
@pytest.fixture
def mock_async_llm():
"""Create a mock AsyncLLM with mocked dependencies."""
# Create a minimal mock without initializing the full engine
llm = MagicMock(spec=AsyncLLM)
# Mock the essential attributes
llm.vllm_config = MagicMock()
llm.vllm_config.cache_config.kv_sharing_fast_prefill = False
llm.model_config = MagicMock()
llm.model_config.max_model_len = 2048
llm.log_requests = False
llm.errored = False
llm._pause_cond = asyncio.Condition()
llm._paused = False
# Mock methods
llm._run_output_handler = MagicMock()
llm.abort = AsyncMock()
# Use the real generate method from AsyncLLM
llm.generate = AsyncLLM.generate.__get__(llm, AsyncLLM)
return llm
@pytest.mark.asyncio
async def test_generate_normal_flow(mock_async_llm):
"""Test normal generation flow with streaming requests."""
request_id = "test_request"
prompt = "Tell me about Paris"
sampling_params = SamplingParams(max_tokens=10)
# Create a mock queue with outputs
queue = RequestOutputCollector(RequestOutputKind.FINAL_ONLY, request_id)
output1 = RequestOutput(
request_id=request_id,
prompt="Tell me about Paris",
prompt_token_ids=[1, 2, 3],
prompt_logprobs=None,
outputs=[],
finished=False,
)
output2 = RequestOutput(
request_id=request_id,
prompt="Tell me about Paris",
prompt_token_ids=[1, 2, 3],
prompt_logprobs=None,
outputs=[],
finished=True,
)
# Feed outputs to queue as they're consumed to avoid aggregation
async def feed_outputs():
queue.put(output1)
await asyncio.sleep(1) # Let first output be consumed
queue.put(output2)
asyncio.create_task(feed_outputs()) # noqa
# Mock add_request to return the queue
async def mock_add_request(*args, **kwargs):
return queue
mock_async_llm.add_request = mock_add_request
# Collect outputs from generate
outputs = []
async for output in mock_async_llm.generate(
prompt=prompt,
sampling_params=sampling_params,
request_id=request_id,
):
outputs.append(output)
assert len(outputs) == 2
assert outputs[0].finished is False
assert outputs[1].finished is True
def make_output(request_id: str, finished: bool) -> RequestOutput:
"""Helper to create a RequestOutput."""
return RequestOutput(
request_id=request_id,
prompt="test",
prompt_token_ids=[1, 2, 3],
prompt_logprobs=None,
outputs=[],
finished=finished,
)
@pytest.mark.asyncio
async def test_generate_with_async_generator():
"""Test generate with an async input generator.
With the new streaming input API, completion is signaled by finishing
the input generator (not via a resumable flag). Each input chunk
produces intermediate outputs, and the final output has finished=True.
"""
request_id = "test"
sampling_params = SamplingParams(max_tokens=10)
llm = MagicMock(spec=AsyncLLM)
llm.vllm_config = MagicMock()
llm.vllm_config.cache_config.kv_sharing_fast_prefill = False
llm.model_config = MagicMock()
llm.model_config.max_model_len = 2048
llm.log_requests = False
llm.errored = False
llm._pause_cond = asyncio.Condition()
llm._paused = False
llm._run_output_handler = MagicMock()
llm.abort = AsyncMock()
# Bind the real generate method
llm.generate = AsyncLLM.generate.__get__(llm, AsyncLLM)
# Track inputs processed
inputs_received = []
queue = RequestOutputCollector(RequestOutputKind.DELTA, request_id)
async def mock_add_request(req_id, prompt, params, *args, **kwargs):
# When prompt is an AsyncGenerator, process streaming inputs
if isinstance(prompt, AsyncGenerator):
# Process inputs in background, produce outputs
async def handle_stream():
async for input_chunk in prompt:
inputs_received.append(input_chunk.prompt)
# Each input produces an intermediate output
queue.put(make_output(req_id, finished=False))
await asyncio.sleep(0.01)
# Final output when stream ends
queue.put(make_output(req_id, finished=True))
asyncio.create_task(handle_stream())
return queue
return queue
llm.add_request = mock_add_request
async def input_generator() -> AsyncGenerator[StreamingInput, None]:
yield StreamingInput(prompt="Hello", sampling_params=sampling_params)
yield StreamingInput(prompt=" world", sampling_params=sampling_params)
outputs = []
async for output in llm.generate(input_generator(), sampling_params, request_id):
outputs.append(output)
# Two intermediate outputs + one final output
assert len(outputs) == 3
assert outputs[0].finished is False
assert outputs[1].finished is False
assert outputs[2].finished is True
# Both inputs were processed
assert inputs_received == ["Hello", " world"]
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/streaming_input/test_async_llm_streaming.py",
"license": "Apache License 2.0",
"lines": 140,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/v1/streaming_input/test_gpu_model_runner_streaming.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Unit tests for GPUModelRunner._update_streaming_request function."""
from unittest.mock import Mock
import pytest
from vllm.multimodal.inputs import (
MultiModalFeatureSpec,
MultiModalKwargsItem,
PlaceholderRange,
)
from vllm.sampling_params import SamplingParams
from vllm.v1.worker.gpu_input_batch import CachedRequestState, InputBatch
from vllm.v1.worker.gpu_model_runner import GPUModelRunner
pytestmark = pytest.mark.cpu_test
@pytest.fixture
def mock_model_runner_with_input_batch():
"""Create a mock GPUModelRunner with a real InputBatch for e2e testing."""
runner = Mock(spec=GPUModelRunner)
runner.uses_mrope = False
runner.requests = {}
runner.max_num_reqs = 10
runner.max_model_len = 1024
# Create a real InputBatch for e2e testing
runner.input_batch = InputBatch(
max_num_reqs=10,
max_model_len=1024,
max_num_batched_tokens=1024,
device="cpu",
pin_memory=False,
vocab_size=32000,
block_sizes=[16],
kernel_block_sizes=[16],
is_spec_decode=False,
logitsprocs=None,
is_pooling_model=False,
)
return runner
def test_e2e_streaming_request_update_basic_flow(mock_model_runner_with_input_batch):
"""Test that streaming session are updated correctly.
This test validates that when a streaming session is updated with new prompt tokens:
1. The request is removed from InputBatch before updating (avoids duplication)
2. Request state fields are updated correctly
3. output_token_ids is cleared (intermediate outputs are now in prompt_token_ids)
"""
runner = mock_model_runner_with_input_batch
req_id = "streaming_req_0"
# Step 1: Create initial request state with some computed tokens
initial_req_state = CachedRequestState(
req_id=req_id,
prompt_token_ids=[1, 2, 3],
mm_features=[],
sampling_params=SamplingParams(temperature=0.5),
pooling_params=None,
generator=None,
block_ids=([0],),
num_computed_tokens=3,
output_token_ids=[10, 11], # Generated 2 tokens
)
runner.requests[req_id] = initial_req_state
# Add request to InputBatch
runner.input_batch.add_request(initial_req_state)
assert req_id in runner.input_batch.req_id_to_index
# Step 2: Create new request data with extended prompt
# The scheduler has already set prompt_token_ids to the full sequence
# (original prompt + intermediate outputs + new prompt)
new_req_data = Mock()
new_req_data.prompt_token_ids = [
1,
2,
3,
10,
4,
5,
] # Full sequence with intermediate output (10)
new_req_data.mm_features = []
new_req_data.prompt_embeds = None
new_req_data.sampling_params = SamplingParams(temperature=0.8, max_tokens=50)
new_req_data.pooling_params = None
new_req_data.block_ids = ([0, 1],)
new_req_data.num_computed_tokens = 4 # 3 original prompt + 1 intermediate output
# Step 3: Update the request
updated_req_state = GPUModelRunner._update_streaming_request(
runner, req_id, new_req_data
)
# Step 4: Verify the request state was updated correctly
assert updated_req_state.prompt_token_ids == [1, 2, 3, 10, 4, 5]
assert updated_req_state.num_computed_tokens == 4
assert updated_req_state.sampling_params.temperature == 0.8
assert updated_req_state.sampling_params.max_tokens == 50
assert updated_req_state.block_ids == ([0, 1],)
# Verify output_token_ids were cleared
# (intermediate outputs are now in prompt_token_ids)
assert updated_req_state.output_token_ids == []
# Verify the same object is returned
assert runner.requests[req_id] is updated_req_state
# Verify request was removed from InputBatch during update (avoids duplication)
assert req_id not in runner.input_batch.req_id_to_index
def test_e2e_streaming_with_multimodal_features(mock_model_runner_with_input_batch):
"""Test that streaming session with multimodal features are updated correctly.
This test validates that when a streaming session with mm features is updated:
1. The request is removed from InputBatch before updating (avoids duplication)
2. Multimodal features from both requests are preserved and merged correctly
3. New prompt tokens (including intermediate outputs) are appended correctly
4. output_token_ids is cleared (intermediate outputs are now in prompt_token_ids)
"""
runner = mock_model_runner_with_input_batch
req_id = "streaming_mm_req_0"
# Step 1: Create initial request state with one multimodal feature
mm_feature_1 = MultiModalFeatureSpec(
data=MultiModalKwargsItem.dummy(),
modality="audio",
identifier="audio_1",
mm_position=PlaceholderRange(offset=2, length=10),
)
initial_req_state = CachedRequestState(
req_id=req_id,
prompt_token_ids=[1, 2] + [0] * 10 + [3, 4], # 2 + 10 (mm) + 2 = 14 tokens
mm_features=[mm_feature_1],
sampling_params=SamplingParams(),
pooling_params=None,
generator=None,
block_ids=([0],),
num_computed_tokens=14,
output_token_ids=[100], # Generated 1 token
)
runner.requests[req_id] = initial_req_state
# Add request to InputBatch
runner.input_batch.add_request(initial_req_state)
assert req_id in runner.input_batch.req_id_to_index
# Step 2: Create new request data with additional multimodal feature
# The scheduler has already set prompt_token_ids to the full sequence
# (original prompt + intermediate outputs + new prompt with new multimodal feature)
mm_feature_2 = MultiModalFeatureSpec(
data=MultiModalKwargsItem.dummy(),
modality="audio",
identifier="audio_2",
mm_position=PlaceholderRange(offset=15, length=5),
)
new_req_data = Mock()
# Full sequence: [1, 2] + [0]*10 + [3, 4] + [100] + [0]*5 + [5] = 21 tokens
new_req_data.prompt_token_ids = [1, 2] + [0] * 10 + [3, 4, 100] + [0] * 5 + [5]
new_req_data.mm_features = [mm_feature_1, mm_feature_2]
new_req_data.prompt_embeds = None
new_req_data.sampling_params = SamplingParams(temperature=0.7, max_tokens=30)
new_req_data.pooling_params = None
new_req_data.block_ids = ([0, 1],)
new_req_data.num_computed_tokens = 14 # 14 tokens from initial request
# Step 3: Update the request
updated_req_state = GPUModelRunner._update_streaming_request(
runner, req_id, new_req_data
)
# Step 4: Verify the request state was updated correctly
# Verify multimodal features are preserved
assert len(updated_req_state.mm_features) == 2
assert updated_req_state.mm_features[0] == mm_feature_1
assert updated_req_state.mm_features[1] == mm_feature_2
# Verify prompt tokens include intermediate output (100) and new tokens
# Initial: 2 + 10 (mm1) + 2 = 14 tokens
# New: 2 + 10 (mm1) + 2 + 1 (output 100) + 5 (mm2) + 1 = 21 tokens
assert len(updated_req_state.prompt_token_ids) == 21
assert updated_req_state.prompt_token_ids == [1, 2] + [0] * 10 + [3, 4, 100] + [
0
] * 5 + [5]
# Verify output_token_ids were cleared
# (intermediate outputs are now in prompt_token_ids)
assert updated_req_state.output_token_ids == []
# Verify other parameters were updated
assert updated_req_state.num_computed_tokens == 14
assert updated_req_state.sampling_params.temperature == 0.7
assert updated_req_state.sampling_params.max_tokens == 30
assert updated_req_state.block_ids == ([0, 1],)
# Verify the same object is returned
assert runner.requests[req_id] is updated_req_state
# Verify request was removed from InputBatch during update (avoids duplication)
assert req_id not in runner.input_batch.req_id_to_index
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/streaming_input/test_gpu_model_runner_streaming.py",
"license": "Apache License 2.0",
"lines": 175,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/v1/streaming_input/test_scheduler_streaming.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import unittest
from unittest.mock import MagicMock
import torch
from vllm.config import DeviceConfig, VllmConfig
from vllm.multimodal.inputs import (
MultiModalFeatureSpec,
MultiModalKwargsItem,
PlaceholderRange,
)
from vllm.sampling_params import SamplingParams
from vllm.v1.core.sched.scheduler import Scheduler
from vllm.v1.engine import FinishReason
from vllm.v1.kv_cache_interface import (
FullAttentionSpec,
KVCacheConfig,
KVCacheGroupSpec,
)
from vllm.v1.outputs import ModelRunnerOutput
from vllm.v1.request import Request, RequestStatus, StreamingUpdate
from vllm.v1.structured_output import StructuredOutputManager
STOP_TOKEN = 128001
class DummyRequest(Request):
def __init__(
self,
request_id,
resumable=True,
prompt_token_ids=None,
mm_features: list[MultiModalFeatureSpec] | None = None,
max_tokens: int | None = 16,
):
super().__init__(
request_id=request_id,
prompt_token_ids=prompt_token_ids if prompt_token_ids is not None else [],
sampling_params=SamplingParams(
stop_token_ids=[STOP_TOKEN], max_tokens=max_tokens
),
pooling_params=None,
mm_features=mm_features,
resumable=resumable,
)
def create_scheduler() -> Scheduler:
vllm_config = VllmConfig(device_config=DeviceConfig("cpu"))
vllm_config.model_config = MagicMock()
vllm_config.model_config.skip_tokenizer_init = True
vllm_config.model_config.is_multimodal_model = False
vllm_config.model_config.max_model_len = 1024
vllm_config.model_config.enable_return_routed_experts = False
vllm_config.cache_config = MagicMock()
vllm_config.cache_config.num_gpu_blocks = 1000
vllm_config.cache_config.enable_prefix_caching = False
kv_cache_config = KVCacheConfig(
num_blocks=1000,
kv_cache_tensors=[],
kv_cache_groups=[
KVCacheGroupSpec(
["layer"],
FullAttentionSpec(
block_size=16, num_kv_heads=1, head_size=1, dtype=torch.float32
),
)
],
)
return Scheduler(
vllm_config=vllm_config,
kv_cache_config=kv_cache_config,
log_stats=True,
structured_output_manager=StructuredOutputManager(vllm_config),
block_size=16,
)
class TestStreamingScheduler(unittest.TestCase):
def test_add_request(self):
scheduler = create_scheduler()
request = DummyRequest(
request_id="test_request",
resumable=True,
)
scheduler.add_request(request)
assert "test_request" in scheduler.requests
assert request.status == RequestStatus.WAITING
assert len(scheduler.waiting) == 1
next_request = DummyRequest(
request_id="test_request",
resumable=True,
)
scheduler.add_request(next_request)
assert next_request.status == RequestStatus.WAITING
assert len(scheduler.requests["test_request"].streaming_queue) == 1
def test_update_request_as_session_max_token(self):
scheduler = create_scheduler()
session = DummyRequest(
request_id="session",
prompt_token_ids=[1, 2, 3],
)
session.num_computed_tokens = len(session.prompt_token_ids)
session.max_tokens = 10 # Initial max_tokens
session._output_token_ids = [1] * 10 # reach max_tokens
new_request = DummyRequest(
request_id="session",
prompt_token_ids=[4, 5, 6],
)
new_request.sampling_params = SamplingParams(max_tokens=10)
new_request.max_tokens = 10 # Additional max_tokens from new request
update = StreamingUpdate.from_request(new_request)
scheduler._update_request_as_session(session, update)
assert session.sampling_params.max_tokens == 10
# _update_request_as_session clears output tokens first, so
# max_tokens = num_output_tokens (0) + update.max_tokens (10) = 10
assert session.max_tokens == 10
session.num_computed_tokens = len(session.prompt_token_ids)
# Simulate generating 5 more output tokens
session._output_token_ids = [1] * 5
new_request2 = DummyRequest(
request_id="session",
prompt_token_ids=[7, 8, 9],
)
new_request2.sampling_params = SamplingParams(max_tokens=10)
new_request2.max_tokens = 10
update2 = StreamingUpdate.from_request(new_request2)
scheduler._update_request_as_session(session, update2)
assert session.sampling_params.max_tokens == 10
# Again, output tokens are cleared first, so max_tokens = 0 + 10 = 10
assert session.max_tokens == 10
def test_update_request_as_session(self):
scheduler = create_scheduler()
session = DummyRequest(
request_id="session",
prompt_token_ids=[1, 2, 3],
)
session.num_computed_tokens = len(session.prompt_token_ids)
new_request = DummyRequest(
request_id="session",
prompt_token_ids=[4, 5, 6],
)
new_request.sampling_params = SamplingParams(max_tokens=10)
update = StreamingUpdate.from_request(new_request)
scheduler._update_request_as_session(session, update)
assert session.prompt_token_ids == [1, 2, 3, 4, 5, 6]
assert session._all_token_ids == [1, 2, 3, 4, 5, 6]
assert session.sampling_params.max_tokens == 10
assert session.status == RequestStatus.WAITING
def test_update_request_as_session_with_multimodal(self):
scheduler = create_scheduler()
mm_feature = MultiModalFeatureSpec(
data=MultiModalKwargsItem.dummy(),
modality="audio",
identifier="",
mm_position=PlaceholderRange(offset=1, length=1),
)
session = DummyRequest(
request_id="session",
prompt_token_ids=[1, 2, 3],
mm_features=[mm_feature],
)
session.num_computed_tokens = len(session.prompt_token_ids)
mm_feature = MultiModalFeatureSpec(
data=MultiModalKwargsItem.dummy(),
modality="audio",
identifier="",
mm_position=PlaceholderRange(offset=2, length=1),
)
new_request = DummyRequest(
request_id="session",
prompt_token_ids=[4, 5, 6, 7],
mm_features=[mm_feature],
)
update = StreamingUpdate.from_request(new_request)
scheduler._update_request_as_session(session, update)
assert len(session.mm_features) == 2
assert session.mm_features[0].mm_position.offset == 1
# 2 + len([1, 2, 3])
assert session.mm_features[1].mm_position.offset == 5
def test_process_streaming_requests_with_finish_session(self):
"""Test that a non-resumable request signals stream completion.
With the new streaming API, completion is signaled by closing/finishing
the input generator. When a non-resumable request is added to a session
in WAITING_FOR_STREAMING_REQ state, the session is finished immediately
with FINISHED_ABORTED status.
"""
scheduler = create_scheduler()
session = DummyRequest(
request_id="session",
prompt_token_ids=[1, 2, 3],
resumable=True,
)
scheduler.add_request(session)
session.status = RequestStatus.WAITING_FOR_STREAMING_REQ
session.num_computed_tokens = len(session.prompt_token_ids)
# A non-resumable request signals stream completion
close_request = DummyRequest(
request_id="session",
prompt_token_ids=[0],
resumable=False,
max_tokens=1,
)
scheduler.add_request(close_request)
# The session should be immediately finished (stream completed)
assert session.status == RequestStatus.FINISHED_ABORTED
# The session should be removed from the scheduler
assert session.request_id not in scheduler.requests
def test_streaming_request_session_update(self):
"""Test that a resumable request updates a waiting session directly.
When a session is in WAITING_FOR_STREAMING_REQ state and a new resumable
request arrives, the update is applied directly via _update_request_as_session,
not queued.
"""
scheduler = create_scheduler()
session = DummyRequest(
request_id="session",
prompt_token_ids=[1, 2, 3],
resumable=True,
)
scheduler.add_request(session)
session.status = RequestStatus.WAITING_FOR_STREAMING_REQ
session.num_computed_tokens = len(session.prompt_token_ids)
next_request = DummyRequest(
request_id="session",
prompt_token_ids=[4, 5],
resumable=True,
)
scheduler.add_request(next_request)
# With the new behavior, when session is in WAITING_FOR_STREAMING_REQ,
# the update is applied directly (not queued), and session status
# becomes WAITING
assert session.status == RequestStatus.WAITING
assert session.prompt_token_ids == [1, 2, 3, 4, 5]
_ = scheduler.schedule()
assert session.status == RequestStatus.RUNNING
def test_update_request_as_session_with_output_tokens(self):
scheduler = create_scheduler()
session = DummyRequest(
request_id="session",
prompt_token_ids=[1, 2, 3], # 3 prompt tokens
)
session.append_output_token_ids([10, 11])
"""
The last output token (11) hasn't been "scheduled" yet, so `num_computed_tokens`
only includes: 3 prompt + 1 output (the 10) = 4
"""
session.num_computed_tokens = 4
new_request = DummyRequest(
request_id="session",
prompt_token_ids=[4, 5],
)
update = StreamingUpdate.from_request(new_request)
scheduler._update_request_as_session(session, update)
# _update_request_as_session keeps computed output tokens (they become
# part of the prompt) and only discards the final uncomputed sampled
# token. Computed output token 10 is kept, uncomputed token 11 is
# discarded.
assert session._all_token_ids == [1, 2, 3, 10, 4, 5]
assert session.prompt_token_ids == [1, 2, 3, 10, 4, 5]
# Output tokens list is cleared
assert session._output_token_ids == []
# num_computed_tokens is unchanged (KV cache still valid for computed
# tokens)
assert session.num_computed_tokens == 4
# Verify that the next schedule will only process the new prompt tokens
# num_new_tokens = num_tokens - num_computed_tokens = 6 - 4 = 2
num_new_tokens = session.num_tokens - session.num_computed_tokens
assert num_new_tokens == 2
def test_streaming_e2e_lifecycle(self):
"""
Comprehensive integration test covering complete streaming request lifecycle
including scheduler state management and aliasing bug prevention.
FULL LIFECYCLE:
================
CYCLE 1 (Initial Decode):
1. Add streaming request (seq_id=0) with prompt tokens [1,2,3]
2. Schedule() creates NewRequestData with prompt_token_ids
3. Model runner caches this prompt_token_ids reference (simulated)
4. Model executes and generates output token 10
5. update_from_output() appends token 10 to request._all_token_ids
6. Request transitions to RUNNING state
CYCLE 2 (Continue Decode):
7. Schedule() again - request is now in scheduled_cached_reqs (not new)
8. Model runner uses CACHED state to calculate num_tokens
9. Model generates output token (STOP_TOKEN)
10. update_from_output() appends STOP_TOKEN to request._all_token_ids
11. Request transitions to WAITING_FOR_STREAMING_REQ
CYCLE 3 (New Streaming Request):
12. Add new streaming request (seq_id=1) with prompt tokens [4,5]
13. Scheduler merges into session, creates NewRequestData again
14. Model runner caches new prompt_token_ids reference
15. Verify cached state from Cycle 1 wasn't corrupted by mutations
CRITICAL BUG PREVENTION:
========================
Without .copy() in _create_new_request_data():
- Cycle 1 Step 3: cached_state["prompt_token_ids"] aliases
request._all_token_ids
- Cycle 1 Step 5: When appending token 10, cached state mutates:
[1,2,3] -> [1,2,3,10]
- Cycle 2 Step 8: num_tokens = len([1,2,3,10]) + len([10])
= 5 (WRONG! Should be 4)
- Cycle 2: Discard logic would see seq_lens=4 < num_tokens=5
-> INCORRECTLY DISCARDS
With .copy() in _create_new_request_data():
- Cycle 1 Step 3: cached_state["prompt_token_ids"] is independent copy
- Cycle 1 Step 5: Only request._all_token_ids mutates, cached stays [1,2,3]
- Cycle 2 Step 8: num_tokens = len([1,2,3]) + len([10]) = 4 (CORRECT)
- Cycle 2: Discard logic works correctly
"""
scheduler = create_scheduler()
# ═══════════════════════════════════════════════════════════════════
# CYCLE 1: Initial Request Scheduling and First Decode
# ═══════════════════════════════════════════════════════════════════
session = DummyRequest(
request_id="session",
prompt_token_ids=[1, 2, 3],
)
scheduler.add_request(session)
# Step 2: Schedule creates NewRequestData
scheduler_output_cycle1 = scheduler.schedule()
# Verify request is in scheduled_new_reqs (first time scheduling)
assert len(scheduler_output_cycle1.scheduled_new_reqs) == 1
new_req_data_cycle1 = scheduler_output_cycle1.scheduled_new_reqs[0]
assert new_req_data_cycle1.prompt_token_ids == [1, 2, 3]
assert (
scheduler_output_cycle1.num_scheduled_tokens[session.request_id] == 3
) # [1, 2, 3]
assert (
session.request_id
not in scheduler_output_cycle1.scheduled_cached_reqs.req_ids
)
# Step 3: Simulate model runner caching the prompt_token_ids
# This simulates gpu_model_runner.py:706-720 CachedRequestState creation
# The model runner makes a copy of prompt_token_ids when creating
# CachedRequestState
cached_state_cycle1 = {
"req_id": session.request_id,
"prompt_token_ids": list(
new_req_data_cycle1.prompt_token_ids
), # Explicit copy
"output_token_ids": [],
"num_computed_tokens": 0,
}
# Store original for verification
original_cached_prompt_cycle1 = cached_state_cycle1["prompt_token_ids"].copy()
# Step 4-5: Model execution generates token, scheduler updates request
output_token_1 = 10
cached_state_cycle1["output_token_ids"].append(output_token_1)
mro_cycle1 = ModelRunnerOutput(
req_ids=[session.request_id],
req_id_to_index={session.request_id: 0},
sampled_token_ids=[[output_token_1]],
logprobs=None,
prompt_logprobs_dict={session.request_id: None},
pooler_output=[],
)
session.num_computed_tokens = len(session.prompt_token_ids)
eco_dict_cycle1 = scheduler.update_from_output(
scheduler_output_cycle1, mro_cycle1
)
# Step 6: Verify request state after Cycle 1
eco_cycle1 = eco_dict_cycle1[session.client_index].outputs[0]
assert eco_cycle1.finish_reason is None # Not stopped yet
assert session.status == RequestStatus.RUNNING
assert session in scheduler.running
assert session._all_token_ids == [1, 2, 3, 10] # Mutation happened here
# CRITICAL ASSERTION: Cached prompt_token_ids must NOT have changed
assert (
cached_state_cycle1["prompt_token_ids"] == original_cached_prompt_cycle1
), (
f"ALIASING BUG DETECTED in Cycle 1! "
f"cached_state['prompt_token_ids'] was mutated from "
f"{original_cached_prompt_cycle1} to "
f"{cached_state_cycle1['prompt_token_ids']}. "
f"This means _create_new_request_data() didn't call .copy()!"
)
assert cached_state_cycle1["prompt_token_ids"] is not session._all_token_ids, (
"ALIASING BUG! cached_state['prompt_token_ids'] is the same object as "
"session._all_token_ids. They must be independent copies."
)
# ═══════════════════════════════════════════════════════════════════
# CYCLE 2: Continue Decoding (Using Cached State)
# ═══════════════════════════════════════════════════════════════════
# Step 7: Schedule again - now request uses cached state
scheduler_output_cycle2 = scheduler.schedule()
# Verify request is NOT in scheduled_new_reqs (already cached)
assert not scheduler_output_cycle2.scheduled_new_reqs
assert (
session.request_id in scheduler_output_cycle2.scheduled_cached_reqs.req_ids
)
assert (
scheduler_output_cycle2.num_scheduled_tokens[session.request_id] == 1
) # Only the output token [10]
# Step 8: Calculate num_tokens like gpu_model_runner.py:1284 does
# This is where the bug would manifest!
num_tokens_cycle2 = len(cached_state_cycle1["prompt_token_ids"]) + len(
cached_state_cycle1["output_token_ids"]
)
# CRITICAL ASSERTION: num_tokens must be correct (3 prompt + 1 output = 4)
# Without .copy(), cached_state["prompt_token_ids"] would be [1,2,3,10]
# and num_tokens would incorrectly be 5, causing the discard bug
expected_num_tokens_cycle2 = 4
assert num_tokens_cycle2 == expected_num_tokens_cycle2, (
f"DISCARD BUG WOULD TRIGGER! num_tokens calculation is wrong. "
f"Expected {expected_num_tokens_cycle2}, got {num_tokens_cycle2}. "
f"cached_state['prompt_token_ids'] = "
f"{cached_state_cycle1['prompt_token_ids']} (should be [1,2,3], not [1,2,3,"
f"10]). Without .copy(), this would be 5 = len([1,2,3,10]) + len([10]). "
f"Discard logic would see: seq_lens={session.num_computed_tokens} "
f"< num_tokens={num_tokens_cycle2}, triggering incorrect discard!"
)
# Step 9-10: Model generates STOP_TOKEN, scheduler updates
output_token_2 = STOP_TOKEN
cached_state_cycle1["output_token_ids"].append(output_token_2)
mro_cycle2 = ModelRunnerOutput(
req_ids=[session.request_id],
req_id_to_index={session.request_id: 0},
sampled_token_ids=[[output_token_2]],
logprobs=None,
prompt_logprobs_dict={session.request_id: None},
pooler_output=[],
)
eco_dict_cycle2 = scheduler.update_from_output(
scheduler_output_cycle2, mro_cycle2
)
# Step 11: Verify request transitioned to WAITING_FOR_STREAMING_REQ
eco_cycle2 = eco_dict_cycle2[session.client_index].outputs[0]
assert eco_cycle2.finish_reason == FinishReason.STOP
assert session.status == RequestStatus.WAITING_FOR_STREAMING_REQ
assert session in scheduler.waiting
assert session._all_token_ids == [1, 2, 3, 10, STOP_TOKEN]
# CRITICAL ASSERTION: Cached prompt_token_ids STILL must not have changed
assert cached_state_cycle1["prompt_token_ids"] == [1, 2, 3], (
f"ALIASING BUG DETECTED in Cycle 2! "
f"cached_state['prompt_token_ids'] = "
f"{cached_state_cycle1['prompt_token_ids']} (should still be [1,2,3]). "
f"Mutations from update_from_output() leaked through!"
)
# ═══════════════════════════════════════════════════════════════════
# CYCLE 3: New Streaming Request (Session Continuation)
# ═══════════════════════════════════════════════════════════════════
# Step 12: Add new streaming request with seq_id=1
new_request = DummyRequest(
request_id="session",
prompt_token_ids=[4, 5],
)
scheduler.add_request(new_request)
# With the new streaming API, when session is in WAITING_FOR_STREAMING_REQ,
# the update is applied directly via _update_request_as_session (not queued).
# The session status becomes WAITING after the update is applied.
assert session.status == RequestStatus.WAITING
# Step 13: Scheduler schedules the updated session
scheduler_output_cycle3 = scheduler.schedule()
# Verify scheduler created NewRequestData with merged prompt_token_ids
assert len(scheduler_output_cycle3.scheduled_new_reqs) == 1
assert (
scheduler_output_cycle3.scheduled_new_reqs[0].prompt_token_ids
== session.prompt_token_ids
)
assert (
scheduler_output_cycle3.num_scheduled_tokens[session.request_id] == 2
) # Only new tokens [4, 5]
# Computed output tokens are kept (become part of prompt), only the
# final uncomputed sampled token (STOP_TOKEN) is discarded
assert session._all_token_ids == [1, 2, 3, 10, 4, 5]
assert session.prompt_token_ids == [1, 2, 3, 10, 4, 5] # Includes kept output
assert session._output_token_ids == [] # Output tokens are cleared
# Step 14: Model runner caches NEW prompt_token_ids reference
# The model runner makes a copy of prompt_token_ids when creating
# CachedRequestState
new_req_data_cycle3 = scheduler_output_cycle3.scheduled_new_reqs[0]
cached_state_cycle3 = {
"req_id": session.request_id,
"prompt_token_ids": list(
new_req_data_cycle3.prompt_token_ids
), # Explicit copy
"output_token_ids": [],
"num_computed_tokens": session.num_computed_tokens,
}
# Step 15: FINAL CRITICAL VERIFICATION
# The old cached state from Cycle 1 must still be unchanged
assert cached_state_cycle1["prompt_token_ids"] == [1, 2, 3], (
f"PERSISTENT ALIASING BUG! Even after new scheduling cycle, "
f"old cached_state was mutated to "
f"{cached_state_cycle1['prompt_token_ids']}. This proves the aliasing bug "
f"exists!"
)
# The new cached state must be independent
assert cached_state_cycle3["prompt_token_ids"] is not session._all_token_ids, (
"ALIASING BUG in Cycle 3! Cached state is aliased to _all_token_ids."
)
# Both cached states must be independent of each other
assert (
cached_state_cycle1["prompt_token_ids"]
is not cached_state_cycle3["prompt_token_ids"]
), "Cached states from different cycles should be independent objects."
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/streaming_input/test_scheduler_streaming.py",
"license": "Apache License 2.0",
"lines": 490,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/v1/worker/gpu/kv_connector.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import copy
from typing import TYPE_CHECKING
import torch
from vllm.config import VllmConfig
from vllm.distributed.kv_transfer import (
get_kv_transfer_group,
has_kv_transfer_group,
kv_transfer_state,
)
from vllm.distributed.kv_transfer.kv_connector.utils import copy_kv_blocks
from vllm.forward_context import (
get_forward_context,
is_forward_context_available,
set_forward_context,
)
from vllm.v1.outputs import (
EMPTY_MODEL_RUNNER_OUTPUT,
KVConnectorOutput,
ModelRunnerOutput,
)
if TYPE_CHECKING:
from vllm.v1.core.sched.output import SchedulerOutput
class KVConnector:
"""KVConnector interface used by GPUModelRunner."""
def pre_forward(self, scheduler_output: "SchedulerOutput") -> None:
pass
def post_forward(
self, scheduler_output: "SchedulerOutput", wait_for_save: bool = True
) -> KVConnectorOutput | None:
return None
def no_forward(self, scheduler_output: "SchedulerOutput") -> ModelRunnerOutput:
return EMPTY_MODEL_RUNNER_OUTPUT
def set_disabled(self, disabled: bool) -> None:
pass
class ActiveKVConnector(KVConnector):
def __init__(
self, vllm_config: VllmConfig, kv_caches_dict: dict[str, torch.Tensor]
):
self.vllm_config = vllm_config
self.kv_connector = get_kv_transfer_group()
# Register kv caches with KV Connector if applicable.
# TODO: support cross_layers_kv_cache
# (see https://github.com/vllm-project/vllm/pull/27743)
self.kv_connector.register_kv_caches(kv_caches_dict)
self.kv_connector.set_host_xfer_buffer_ops(copy_kv_blocks)
self._disabled = False
def pre_forward(self, scheduler_output: "SchedulerOutput") -> None:
if self._disabled:
return
if scheduler_output.preempted_req_ids:
self.kv_connector.handle_preemptions(scheduler_output.preempted_req_ids)
kv_connector_metadata = scheduler_output.kv_connector_metadata
assert kv_connector_metadata is not None
self.kv_connector.bind_connector_metadata(kv_connector_metadata)
# TODO: sort out KV Connectors' use of forward_context
if is_forward_context_available():
self.kv_connector.start_load_kv(get_forward_context())
else:
with set_forward_context(None, self.vllm_config):
self.kv_connector.start_load_kv(get_forward_context())
def post_forward(
self,
scheduler_output: "SchedulerOutput",
wait_for_save: bool = True,
clear_metadata: bool = True,
) -> KVConnectorOutput | None:
if self._disabled:
return None
output = KVConnectorOutput()
if wait_for_save:
self.kv_connector.wait_for_save()
output.finished_sending, output.finished_recving = (
self.kv_connector.get_finished(scheduler_output.finished_req_ids)
)
output.invalid_block_ids = self.kv_connector.get_block_ids_with_load_errors()
output.kv_connector_stats = self.kv_connector.get_kv_connector_stats()
output.kv_cache_events = self.kv_connector.get_kv_connector_kv_cache_events()
if clear_metadata:
self.kv_connector.clear_connector_metadata()
return output
def clear_metadata(self) -> None:
"""Clear the connector metadata. Call this after draft model runs."""
if not self._disabled:
self.kv_connector.clear_connector_metadata()
def no_forward(self, scheduler_output: "SchedulerOutput") -> ModelRunnerOutput:
if self._disabled:
return EMPTY_MODEL_RUNNER_OUTPUT
self.pre_forward(scheduler_output)
kv_connector_output = self.post_forward(scheduler_output, wait_for_save=False)
if kv_connector_output is None or kv_connector_output.is_empty():
return EMPTY_MODEL_RUNNER_OUTPUT
output = copy.copy(EMPTY_MODEL_RUNNER_OUTPUT)
output.kv_connector_output = kv_connector_output
return output
def set_disabled(self, disabled: bool) -> None:
# Ensure that layer-wise connector hooks aren't called when disabled.
kv_transfer_state._KV_CONNECTOR_AGENT = None if disabled else self.kv_connector
self._disabled = disabled
NO_OP_KV_CONNECTOR = KVConnector()
def get_kv_connector(
vllm_config: VllmConfig, kv_caches_dict: dict[str, torch.Tensor]
) -> KVConnector:
if not has_kv_transfer_group():
# No-op connector.
return NO_OP_KV_CONNECTOR
return ActiveKVConnector(vllm_config, kv_caches_dict)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/worker/gpu/kv_connector.py",
"license": "Apache License 2.0",
"lines": 108,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/v1/e2e/test_mamba_prefix_cache.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import multiprocessing as mp
import os
import traceback
from collections.abc import Callable
from dataclasses import dataclass
from typing import Any
import datasets
import pytest
import torch
from tests.utils import create_new_process_for_each_test
from vllm import LLM, SamplingParams, TokensPrompt
from vllm.config import CacheConfig
from vllm.distributed import cleanup_dist_env_and_memory
from vllm.model_executor.layers.mamba.mamba_utils import MambaStateCopyFunc
from vllm.sequence import IntermediateTensors
from vllm.v1.attention.backends.utils import CommonAttentionMetadata
from vllm.v1.core.kv_cache_manager import KVCacheBlocks, KVCacheManager
from vllm.v1.core.sched.output import SchedulerOutput
from vllm.v1.engine.core_client import InprocClient
from vllm.v1.kv_cache_interface import KVCacheConfig
from vllm.v1.outputs import SamplerOutput
from vllm.v1.request import Request
from vllm.v1.sample.metadata import SamplingMetadata
from vllm.v1.spec_decode.metadata import SpecDecodeMetadata
from vllm.v1.worker import mamba_utils
from vllm.v1.worker.gpu_input_batch import CachedRequestState
from vllm.v1.worker.gpu_model_runner import GPUModelRunner
from vllm.v1.worker.lora_model_runner_mixin import GPUInputBatch
from vllm.v1.worker.mamba_utils import get_mamba_groups
@dataclass
class StepAction:
num_computed_tokens_start: int
num_scheduled_tokens: int
kv_cache_block_ids: list[int] # [] to follow last step
preprocess_copy_idx: tuple[int, int] # -1, -1 for no copy
postprocess_copy_idx: tuple[int, int] # -1, -1 for no copy
num_speculative_tokens = 3
num_accepted_tokens = 1
prompt_token_ids: list[int] = []
MODEL = "Qwen/Qwen3-Next-80B-A3B-Instruct-FP8"
BLOCK_SIZE = 560
NUM_HIDDEN_LAYERS = 1
cur_step_action_idx = 0
cur_step_action: StepAction | None = None
step_actions: list[StepAction] = []
def get_fake_sample_fn() -> SamplerOutput:
def fake_sample_fn(
self: GPUModelRunner,
logits: torch.Tensor | None,
spec_decode_metadata: SpecDecodeMetadata | None,
) -> SamplerOutput:
assert logits is not None
num_computed_tokens_cpu_tensor = self.input_batch.num_computed_tokens_cpu_tensor
num_computed_tokens = num_computed_tokens_cpu_tensor[0].item()
if num_computed_tokens < self.input_batch.num_prompt_tokens[0].item():
first_token_id_index = self.input_batch.num_prompt_tokens[0].item()
else:
first_token_id_index = num_computed_tokens + 1
if spec_decode_metadata is None:
return SamplerOutput(
sampled_token_ids=torch.tensor(
[[prompt_token_ids[first_token_id_index]]],
device="cuda",
dtype=torch.int32,
),
logprobs_tensors=None,
)
accpeted_tokens = prompt_token_ids[
first_token_id_index : first_token_id_index
+ min(num_accepted_tokens, logits.shape[0])
]
sampled_token_ids = accpeted_tokens
return SamplerOutput(
sampled_token_ids=torch.tensor(
[sampled_token_ids], device="cuda", dtype=torch.int32
),
logprobs_tensors=None,
)
return fake_sample_fn
def get_fake_propose_draft_token_ids_fn():
def fake_propose_draft_token_ids_fn(
self: GPUModelRunner,
scheduler_output: SchedulerOutput,
sampled_token_ids: torch.Tensor | list[list[int]],
sampling_metadata: SamplingMetadata,
hidden_states: torch.Tensor,
sample_hidden_states: torch.Tensor,
aux_hidden_states: list[torch.Tensor] | None,
spec_decode_metadata: SpecDecodeMetadata | None,
common_attn_metadata: CommonAttentionMetadata,
slot_mappings: dict[str, torch.Tensor] | list[dict[str, torch.Tensor]] | None,
) -> list[list[int]]:
num_computed_tokens_cpu_tensor = self.input_batch.num_computed_tokens_cpu_tensor
num_computed_tokens = num_computed_tokens_cpu_tensor[0].item()
if (
self.input_batch.num_tokens_no_spec[0].item()
<= self.input_batch.num_prompt_tokens[0].item()
):
first_token_id_index = self.input_batch.num_prompt_tokens[0].item()
else:
first_token_id_index = (
num_computed_tokens + 1
) # bonus token isn't considered as computed
first_token_id_index += self.input_batch.num_accepted_tokens_cpu[0].item()
proposed_draft_token_ids = [
prompt_token_ids[
first_token_id_index : first_token_id_index + num_speculative_tokens
]
]
next_token_ids = torch.tensor(
prompt_token_ids[
first_token_id_index - 1 : first_token_id_index
- 1
+ num_accepted_tokens
],
device="cuda",
dtype=torch.int32,
)
valid_sampled_tokens_count = torch.tensor(
[num_accepted_tokens], device="cuda", dtype=torch.int32
)
self._copy_valid_sampled_token_count(next_token_ids, valid_sampled_tokens_count)
return torch.tensor(proposed_draft_token_ids, device="cuda", dtype=torch.int32)
return fake_propose_draft_token_ids_fn
def get_fake_step_action_fn(original_step_action_fn: Callable):
def fake_get_output(self: InprocClient):
global cur_step_action_idx
global cur_step_action
if cur_step_action_idx < len(step_actions):
cur_step_action = step_actions[cur_step_action_idx]
cur_step_action_idx += 1
else:
cur_step_action = None
print(f"cur_step_action: {cur_step_action_idx=} {cur_step_action=}")
return original_step_action_fn(self)
return fake_get_output
def get_fake_allocate_slots_fn(original_allocate_slots_fn: Callable):
def fake_allocate_slots_fn(
self: KVCacheManager,
request: Request,
num_new_tokens: int,
num_new_computed_tokens: int = 0,
new_computed_blocks: KVCacheBlocks | None = None,
num_lookahead_tokens: int = 0,
num_external_computed_tokens: int = 0,
delay_cache_blocks: bool = False,
num_encoder_tokens: int = 0,
):
ret = original_allocate_slots_fn(
self,
request,
num_new_tokens,
num_new_computed_tokens,
new_computed_blocks,
num_lookahead_tokens,
num_external_computed_tokens,
delay_cache_blocks,
num_encoder_tokens,
)
if cur_step_action is not None:
cur_block_ids = self.coordinator.single_type_managers[0].req_to_blocks[
request.request_id
]
not_null_block_flags = [not block.is_null for block in cur_block_ids]
block_ids = [1 if block else 0 for block in not_null_block_flags]
assert block_ids == cur_step_action.kv_cache_block_ids
return ret
return fake_allocate_slots_fn
mamba_kv_cache_dict = {}
def get_fake_execute_model_fn(original_execute_model_fn: Callable):
last_num_computed_tokens = 0
num_prompt_tokens = None
def fake_execute_model_fn(
self: GPUModelRunner,
scheduler_output: SchedulerOutput,
intermediate_tensors: IntermediateTensors | None = None,
):
if cur_step_action is not None:
num_scheduled_tokens = next(
iter(scheduler_output.num_scheduled_tokens.values())
)
assert num_scheduled_tokens == cur_step_action.num_scheduled_tokens
mamba_group_ids, mamba_spec = get_mamba_groups(self.kv_cache_config)
mamba_group_id = mamba_group_ids[0]
mamba_layer_name = self.kv_cache_config.kv_cache_groups[
mamba_group_id
].layer_names[0]
nonlocal last_num_computed_tokens
nonlocal num_prompt_tokens
if (
len(scheduler_output.scheduled_new_reqs) > 0
and scheduler_output.scheduled_new_reqs[0].prompt_token_ids is not None
):
# record number of prompt tokens
num_prompt_tokens = len(
scheduler_output.scheduled_new_reqs[0].prompt_token_ids
)
if len(scheduler_output.scheduled_cached_reqs.req_ids) > 0:
num_computed_tokens = (
scheduler_output.scheduled_cached_reqs.num_computed_tokens[0]
)
if (
self.num_spec_tokens
and num_prompt_tokens is not None
and num_computed_tokens > num_prompt_tokens
):
# NOTE (tdoublep) with async scheduling, the scheduler does not have an
# accurate measure of the number of computed tokens; we need to subtract
# the number of reject tokens from the previous timestep.
num_computed_tokens -= num_speculative_tokens + 1 - num_accepted_tokens
if (
num_computed_tokens // BLOCK_SIZE
> last_num_computed_tokens // BLOCK_SIZE
):
# generated a new aligned block in this step
block_idx = num_computed_tokens // mamba_spec.block_size - 1
block_id = (
self.input_batch.block_table.block_tables[mamba_group_id]
.block_table.cpu[0, block_idx]
.item()
)
if block_id != 0:
kv_cache = self.compilation_config.static_forward_context[
mamba_layer_name
].kv_cache
mamba_kv_cache_dict[
num_computed_tokens - num_computed_tokens % BLOCK_SIZE
] = (
kv_cache[0][0][block_id].clone(),
kv_cache[0][1][block_id].clone(),
)
last_num_computed_tokens = num_computed_tokens
else:
last_num_computed_tokens = 0
ret = original_execute_model_fn(self, scheduler_output, intermediate_tensors)
if cur_step_action is not None:
assert (
cur_step_action.num_computed_tokens_start
== self.input_batch.num_computed_tokens_cpu[0].item()
)
return ret
return fake_execute_model_fn
def get_fake_process_mamba_fn(
original_preprocess_mamba_fn: Callable,
original_post_process_mamba_fn: Callable,
original_copy_fn: Callable,
):
copy_info: tuple[list[int], list[int], list[int]] | None = None
def check_copy_info(
action: tuple[int, int],
kv_cache_config: KVCacheConfig,
forward_context: dict[str, Any],
input_batch: GPUInputBatch,
):
assert copy_info is not None
if action == (-1, -1):
assert len(copy_info[0]) == len(copy_info[1]) == len(copy_info[2]) == 0
else:
assert len(copy_info[0]) == len(copy_info[1]) == len(copy_info[2]) == 2
mamba_group_ids, mamba_spec = get_mamba_groups(kv_cache_config)
mamba_group_id = mamba_group_ids[0]
mamba_layer_name = kv_cache_config.kv_cache_groups[
mamba_group_id
].layer_names[0]
mamba_kv_cache = forward_context[mamba_layer_name].kv_cache[0][-1]
mamba_block_table = input_batch.block_table.block_tables[
mamba_group_id
].block_table.cpu[0]
expected_temporal_src = mamba_kv_cache[
mamba_block_table[action[0]]
].data_ptr()
expected_temporal_dest = mamba_kv_cache[
mamba_block_table[action[1]]
].data_ptr()
# -1 is qwen3-next's temporal. We skip checking conv as it is more complex.
assert copy_info[0][-1] == expected_temporal_src
assert copy_info[1][-1] == expected_temporal_dest
def fake_preprocess_mamba_fn(
scheduler_output: SchedulerOutput,
kv_cache_config: KVCacheConfig,
cache_config: CacheConfig,
mamba_state_idx: dict[str, int],
input_batch: GPUInputBatch,
requests: dict[str, CachedRequestState],
forward_context: dict[str, Any],
mamba_state_copy_funcs: tuple[MambaStateCopyFunc, ...],
copy_bufs: mamba_utils.MambaCopyBuffers,
):
nonlocal copy_info
copy_info = None
ret = original_preprocess_mamba_fn(
scheduler_output,
kv_cache_config,
cache_config,
mamba_state_idx,
input_batch,
requests,
forward_context,
mamba_state_copy_funcs,
copy_bufs,
)
if cur_step_action is not None:
check_copy_info(
cur_step_action.preprocess_copy_idx,
kv_cache_config,
forward_context,
input_batch,
)
return ret
def fake_post_process_mamba_fn(
scheduler_output: SchedulerOutput,
kv_cache_config: KVCacheConfig,
input_batch: GPUInputBatch,
requests: dict[str, CachedRequestState],
mamba_state_idx: dict[str, int],
forward_context: dict[str, Any],
mamba_state_copy_funcs: tuple[MambaStateCopyFunc, ...],
copy_bufs: mamba_utils.MambaCopyBuffers,
):
nonlocal copy_info
copy_info = None
ret = original_post_process_mamba_fn(
scheduler_output,
kv_cache_config,
input_batch,
requests,
mamba_state_idx,
forward_context,
mamba_state_copy_funcs,
copy_bufs,
)
if cur_step_action is not None:
check_copy_info(
cur_step_action.postprocess_copy_idx,
kv_cache_config,
forward_context,
input_batch,
)
return ret
def fake_copy_fn(copy_bufs: mamba_utils.MambaCopyBuffers):
nonlocal copy_info
assert copy_info is None
n = copy_bufs.offset
src_state_list = copy_bufs.src_ptrs.cpu[:n].tolist()
dest_state_list = copy_bufs.dst_ptrs.cpu[:n].tolist()
num_elements_list = copy_bufs.sizes.cpu[:n].tolist()
copy_info = (src_state_list, dest_state_list, num_elements_list)
return original_copy_fn(copy_bufs)
return fake_preprocess_mamba_fn, fake_post_process_mamba_fn, fake_copy_fn
def run_ref_mamba_state_in_subprocess() -> None:
ctx = mp.get_context("spawn")
proc = ctx.Process(target=_run_ref_mamba_state_worker)
proc.start()
proc.join(timeout=600)
if proc.exitcode != 0:
raise RuntimeError(f"Ref mamba state process exited with code {proc.exitcode}.")
def _run_ref_mamba_state_worker():
try:
os.environ["VLLM_ENABLE_V1_MULTIPROCESSING"] = "0"
num_generated_tokens = 8000
num_prompt_tokens = 500
sampling_params = SamplingParams(
temperature=0.0, max_tokens=num_generated_tokens
)
prompt_dataset = datasets.load_dataset("heheda/a_long_article")
full_prompt = prompt_dataset["train"][0]["text"]
fake_execute_model_fn = get_fake_execute_model_fn(GPUModelRunner.execute_model)
GPUModelRunner.execute_model = fake_execute_model_fn
fake_sample_fn = get_fake_sample_fn()
GPUModelRunner._sample = fake_sample_fn
engine = LLM(
model=MODEL,
block_size=BLOCK_SIZE,
hf_overrides={"num_hidden_layers": NUM_HIDDEN_LAYERS},
seed=42,
)
global prompt_token_ids
prompt_token_ids = engine.get_tokenizer().encode(full_prompt)
print(f"Token IDs length: {len(prompt_token_ids)}")
_outputs = engine.generate(
[TokensPrompt(prompt_token_ids=prompt_token_ids[:num_prompt_tokens])],
sampling_params,
)
# ref_mamba_kv_cache_dict = torch.load("mamba_kv_cache_dict.pth")
# check_mamba_state_equal(ref_mamba_kv_cache_dict, mamba_kv_cache_dict)
# torch.save(mamba_kv_cache_dict, "mamba_kv_cache_dict.pth")
cpu_state_ref = {
key: tuple(tensor.detach().cpu() for tensor in tensors)
for key, tensors in mamba_kv_cache_dict.items()
}
torch.save(cpu_state_ref, "mamba_kv_cache_dict_ref.pth")
mamba_kv_cache_dict.clear()
del engine
torch.cuda.empty_cache()
cleanup_dist_env_and_memory()
except Exception:
traceback.print_exc()
raise
def check_mamba_state_equal(
mamba_state_ref: dict, mamba_state_new: dict, keys_to_check: list[int]
):
atol = 1e-2
rtol = 1e-2
for key in keys_to_check:
assert key in mamba_state_new
assert key in mamba_state_ref
# mamba state new is a subset of mamba state ref
for i, (ref, new) in enumerate(zip(mamba_state_ref[key], mamba_state_new[key])):
if ref.device != new.device:
new = new.to(ref.device)
new = new[: ref.shape[0]]
if not torch.allclose(ref, new, atol=atol, rtol=rtol):
diff_mask = ~torch.isclose(ref, new, atol=atol, rtol=rtol)
diff_idx = torch.nonzero(diff_mask)
if diff_idx.shape[0] * 100 < ref.numel():
print(
f"[WARNING] found {diff_idx.shape[0] * 100 / ref.numel()}% of the elements are different" # noqa: E501
)
continue
raise ValueError(
f"Mamba state is not equal for key: {key} at index {i}"
)
return True
@dataclass
class TestConfig:
num_prompt_tokens: int
num_generated_tokens: int
num_accepted_tokens: int
step_actions: list[StepAction]
def apply_patch(monkeypatch: pytest.MonkeyPatch):
monkeypatch.setenv("VLLM_ENABLE_V1_MULTIPROCESSING", "0")
fake_sample_fn = get_fake_sample_fn()
monkeypatch.setattr(GPUModelRunner, "_sample", fake_sample_fn)
fake_propose_draft_token_ids_fn = get_fake_propose_draft_token_ids_fn()
monkeypatch.setattr(
GPUModelRunner, "propose_draft_token_ids", fake_propose_draft_token_ids_fn
)
fake_execute_model_fn = get_fake_execute_model_fn(GPUModelRunner.execute_model)
monkeypatch.setattr(GPUModelRunner, "execute_model", fake_execute_model_fn)
fake_step_action_fn = get_fake_step_action_fn(InprocClient.get_output)
monkeypatch.setattr(InprocClient, "get_output", fake_step_action_fn)
fake_allocate_slots_fn = get_fake_allocate_slots_fn(KVCacheManager.allocate_slots)
monkeypatch.setattr(KVCacheManager, "allocate_slots", fake_allocate_slots_fn)
fake_preprocess_mamba_fn, fake_post_process_mamba_fn, fake_copy_fn = (
get_fake_process_mamba_fn(
mamba_utils.preprocess_mamba,
mamba_utils.postprocess_mamba,
mamba_utils.do_mamba_copy_block,
)
)
monkeypatch.setattr(mamba_utils, "preprocess_mamba", fake_preprocess_mamba_fn)
monkeypatch.setattr(mamba_utils, "postprocess_mamba", fake_post_process_mamba_fn)
monkeypatch.setattr(mamba_utils, "do_mamba_copy_block", fake_copy_fn)
@create_new_process_for_each_test()
def test_mamba_prefix_cache(monkeypatch: pytest.MonkeyPatch):
run_ref_mamba_state_in_subprocess()
apply_patch(monkeypatch)
prompt_dataset = datasets.load_dataset("heheda/a_long_article")
full_prompt = prompt_dataset["train"][0]["text"]
tests = {
"accept_1": TestConfig(
num_prompt_tokens=554,
num_generated_tokens=20,
num_accepted_tokens=1,
step_actions=[
StepAction(0, 554, [1, 1, 1, 1], (-1, -1), (-1, -1)),
StepAction(554, 4, [], (-1, -1), (-1, -1)),
StepAction(555, 4, [1, 1, 1, 1, 1], (-1, -1), (-1, -1)),
StepAction(556, 4, [], (-1, -1), (-1, -1)),
StepAction(557, 4, [], (0, 1), (-1, -1)),
StepAction(558, 4, [], (-1, -1), (-1, -1)),
StepAction(559, 4, [], (-1, -1), (1, 0)),
StepAction(560, 4, [], (-1, -1), (-1, -1)),
StepAction(561, 4, [0, 1, 1, 1, 1], (-1, -1), (-1, -1)),
],
),
# test case 2.1: no hit, accept 2 tokens
"accept_2_1": TestConfig(
num_prompt_tokens=554,
num_generated_tokens=20,
num_accepted_tokens=2,
step_actions=[
StepAction(0, 554, [1, 1, 1, 1], (-1, -1), (-1, -1)),
StepAction(554, 4, [], (-1, -1), (-1, -1)),
StepAction(556, 4, [1, 1, 1, 1, 1], (-1, -1), (-1, -1)),
StepAction(558, 4, [], (1, 1), (2, 0)),
StepAction(560, 4, [], (-1, -1), (-1, -1)),
StepAction(562, 4, [0, 1, 1, 1, 1], (-1, -1), (-1, -1)),
],
),
# test case 2.2: no hit, accept 2 tokens
"accept_2_2": TestConfig(
num_prompt_tokens=555,
num_generated_tokens=20,
num_accepted_tokens=2,
step_actions=[
StepAction(0, 555, [1, 1, 1, 1], (-1, -1), (-1, -1)),
StepAction(555, 4, [], (-1, -1), (-1, -1)),
StepAction(557, 4, [1, 1, 1, 1, 1], (1, 1), (-1, -1)),
StepAction(559, 4, [], (-1, -1), (1, 0)),
StepAction(561, 4, [], (-1, -1), (-1, -1)),
StepAction(563, 4, [0, 1, 1, 1, 1], (-1, -1), (-1, -1)),
],
),
"accept_3_1": TestConfig(
num_prompt_tokens=553,
num_generated_tokens=20,
num_accepted_tokens=3,
step_actions=[
StepAction(0, 553, [1, 1, 1, 1], (-1, -1), (-1, -1)),
StepAction(553, 4, [], (-1, -1), (-1, -1)),
StepAction(556, 4, [1, 1, 1, 1, 1], (-1, -1), (-1, -1)),
StepAction(559, 4, [], (2, 1), (1, 0)),
StepAction(562, 4, [], (-1, -1), (-1, -1)),
StepAction(565, 4, [0, 1, 1, 1, 1], (-1, -1), (-1, -1)),
],
),
"accept_3_2": TestConfig(
num_prompt_tokens=554,
num_generated_tokens=20,
num_accepted_tokens=3,
step_actions=[
StepAction(0, 554, [1, 1, 1, 1], (-1, -1), (-1, -1)),
StepAction(554, 4, [], (-1, -1), (-1, -1)),
StepAction(557, 4, [1, 1, 1, 1, 1], (2, 1), (3, 0)),
StepAction(560, 4, [], (-1, -1), (-1, -1)),
StepAction(563, 4, [0, 1, 1, 1, 1], (-1, -1), (-1, -1)),
],
),
"accept_3_3": TestConfig(
num_prompt_tokens=555,
num_generated_tokens=20,
num_accepted_tokens=3,
step_actions=[
StepAction(0, 555, [1, 1, 1, 1], (-1, -1), (-1, -1)),
StepAction(555, 4, [], (-1, -1), (-1, -1)),
StepAction(558, 4, [1, 1, 1, 1, 1], (2, 1), (2, 0)),
StepAction(561, 4, [], (-1, -1), (-1, -1)),
StepAction(564, 4, [0, 1, 1, 1, 1], (-1, -1), (-1, -1)),
],
),
"accept_4_1": TestConfig(
num_prompt_tokens=553,
num_generated_tokens=20,
num_accepted_tokens=4,
step_actions=[
StepAction(0, 553, [1, 1, 1, 1], (-1, -1), (-1, -1)),
StepAction(553, 4, [], (-1, -1), (-1, -1)),
StepAction(557, 4, [1, 1, 1, 1, 1], (3, 1), (3, 0)),
StepAction(561, 4, [], (-1, -1), (-1, -1)),
StepAction(565, 4, [0, 1, 1, 1, 1], (-1, -1), (-1, -1)),
],
),
"accept_4_2": TestConfig(
num_prompt_tokens=554,
num_generated_tokens=25,
num_accepted_tokens=4,
step_actions=[
StepAction(0, 554, [1, 1, 1, 1], (-1, -1), (-1, -1)),
StepAction(554, 4, [], (-1, -1), (-1, -1)),
StepAction(558, 4, [1, 1, 1, 1, 1], (3, 1), (2, 0)),
StepAction(562, 4, [], (-1, -1), (-1, -1)),
StepAction(566, 4, [0, 1, 1, 1, 1], (-1, -1), (-1, -1)),
],
),
"accept_4_3": TestConfig(
num_prompt_tokens=555,
num_generated_tokens=25,
num_accepted_tokens=4,
step_actions=[
StepAction(0, 555, [1, 1, 1, 1], (-1, -1), (-1, -1)),
StepAction(555, 4, [], (-1, -1), (-1, -1)),
StepAction(559, 4, [1, 1, 1, 1, 1], (3, 1), (1, 0)),
StepAction(563, 4, [], (-1, -1), (-1, -1)),
StepAction(567, 4, [0, 1, 1, 1, 1], (-1, -1), (-1, -1)),
],
),
"accept_4_4": TestConfig(
num_prompt_tokens=556,
num_generated_tokens=25,
num_accepted_tokens=4,
step_actions=[
StepAction(0, 556, [1, 1, 1, 1], (-1, -1), (-1, -1)),
StepAction(556, 4, [], (-1, -1), (3, 0)),
StepAction(560, 4, [1, 1, 1, 1, 1], (0, 1), (-1, -1)),
StepAction(564, 4, [0, 1, 1, 1, 1], (-1, -1), (-1, -1)),
],
),
"prompt_block_size": TestConfig(
num_prompt_tokens=560,
num_generated_tokens=10,
num_accepted_tokens=4,
step_actions=[
StepAction(0, 560, [1, 1, 1, 1], (-1, -1), (-1, -1)),
StepAction(560, 4, [1, 1, 1, 1, 1], (0, 1), (-1, -1)),
],
),
"prompt_2_block_size": TestConfig(
num_prompt_tokens=560 * 2,
num_generated_tokens=10,
num_accepted_tokens=4,
step_actions=[
StepAction(0, 560, [1, 1, 1, 1], (-1, -1), (-1, -1)),
StepAction(560, 560, [1, 1, 1, 1, 1], (0, 1), (-1, -1)),
StepAction(560 * 2, 4, [0, 1, 1, 1, 1, 1], (1, 2), (-1, -1)),
],
),
"prompt_2_block_size_10": TestConfig(
num_prompt_tokens=560 * 2 + 10,
num_generated_tokens=10,
num_accepted_tokens=4,
step_actions=[
StepAction(0, 560, [1, 1, 1, 1], (-1, -1), (-1, -1)),
StepAction(560, 570, [1, 0, 1, 1, 1, 1], (0, 2), (-1, -1)),
StepAction(560 * 2 + 10, 4, [0, 0, 1, 1, 1, 1], (-1, -1), (-1, -1)),
],
),
"prompt_3_block_size": TestConfig(
num_prompt_tokens=560 * 3,
num_generated_tokens=10,
num_accepted_tokens=4,
step_actions=[
StepAction(0, 560 * 2, [0, 1, 1, 1, 1], (-1, -1), (-1, -1)),
StepAction(560 * 2, 560, [0, 1, 1, 1, 1, 1], (1, 2), (-1, -1)),
StepAction(560 * 3, 4, [0, 0, 1, 1, 1, 1, 1], (2, 3), (-1, -1)),
],
),
"prompt_3_block_size_10": TestConfig(
num_prompt_tokens=560 * 3 + 10,
num_generated_tokens=10,
num_accepted_tokens=4,
step_actions=[
StepAction(0, 560 * 2, [0, 1, 1, 1, 1], (-1, -1), (-1, -1)),
StepAction(560 * 2, 570, [0, 1, 0, 1, 1, 1, 1], (1, 3), (-1, -1)),
StepAction(560 * 3 + 10, 4, [0, 0, 0, 1, 1, 1, 1], (-1, -1), (-1, -1)),
],
),
"prompt_10_block_size": TestConfig(
num_prompt_tokens=560 * 10,
num_generated_tokens=10,
num_accepted_tokens=4,
step_actions=[
StepAction(0, 560 * 5, [0, 0, 0, 0, 1, 1, 1, 1], (-1, -1), (-1, -1)),
StepAction(
560 * 5,
560 * 4,
[0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1],
(4, 8),
(-1, -1),
),
StepAction(
560 * 9,
560,
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
(8, 9),
(-1, -1),
),
StepAction(
560 * 10,
4,
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
(9, 10),
(-1, -1),
),
],
),
"prompt_10_block_size_10": TestConfig(
num_prompt_tokens=560 * 10 + 10,
num_generated_tokens=10,
num_accepted_tokens=4,
step_actions=[
StepAction(0, 560 * 5, [0, 0, 0, 0, 1, 1, 1, 1], (-1, -1), (-1, -1)),
StepAction(
560 * 5,
560 * 4,
[0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1],
(4, 8),
(-1, -1),
),
StepAction(
560 * 9,
560 + 10,
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1],
(8, 10),
(-1, -1),
),
],
),
}
engine = LLM(
model=MODEL,
enable_prefix_caching=True,
block_size=BLOCK_SIZE,
mamba_cache_mode="align",
speculative_config={
"method": "qwen3_next_mtp",
"num_speculative_tokens": num_speculative_tokens,
},
max_num_batched_tokens=3072,
hf_overrides={"num_hidden_layers": NUM_HIDDEN_LAYERS},
seed=42,
)
global prompt_token_ids
prompt_token_ids = engine.get_tokenizer().encode(full_prompt)
print(f"Token IDs length: {len(prompt_token_ids)}")
for test_case_name, test_config in tests.items():
print(f"Running test case: {test_case_name}")
num_generated_tokens = test_config.num_generated_tokens
num_prompt_tokens = test_config.num_prompt_tokens
global num_accepted_tokens
num_accepted_tokens = test_config.num_accepted_tokens
sampling_params = SamplingParams(
temperature=0.0, max_tokens=num_generated_tokens
)
global cur_step_action_idx
cur_step_action_idx = 0
for step_action_prev, step_action_next in zip(
test_config.step_actions[:-1], test_config.step_actions[1:]
):
if (
step_action_next.kv_cache_block_ids is not None
and len(step_action_next.kv_cache_block_ids) == 0
):
prev_block_ids = step_action_prev.kv_cache_block_ids
if prev_block_ids is not None:
step_action_next.kv_cache_block_ids = prev_block_ids.copy()
global step_actions
step_actions = test_config.step_actions
_ = engine.generate(
[TokensPrompt(prompt_token_ids=prompt_token_ids[:num_prompt_tokens])],
sampling_params,
)
assert engine.llm_engine.engine_core.engine_core.scheduler.reset_prefix_cache()
print(f"End test case: {test_case_name}")
keys_to_check = [
(action.postprocess_copy_idx[1] + 1) * BLOCK_SIZE
for action in test_config.step_actions
if action.postprocess_copy_idx and action.postprocess_copy_idx[0] != -1
]
mamba_state_ref = torch.load("mamba_kv_cache_dict_ref.pth")
check_mamba_state_equal(mamba_state_ref, mamba_kv_cache_dict, keys_to_check)
mamba_kv_cache_dict.clear()
del engine
torch.cuda.empty_cache()
cleanup_dist_env_and_memory()
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/e2e/test_mamba_prefix_cache.py",
"license": "Apache License 2.0",
"lines": 747,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/v1/worker/mamba_utils.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import dataclasses
import itertools
from collections.abc import Callable
from typing import Any
import torch
from vllm.config import CacheConfig
from vllm.model_executor.layers.mamba.mamba_utils import (
MambaStateCopyFunc,
)
from vllm.triton_utils import tl, triton
from vllm.utils.math_utils import cdiv
from vllm.v1.core.sched.output import SchedulerOutput
from vllm.v1.kv_cache_interface import KVCacheConfig, MambaSpec
from vllm.v1.utils import CpuGpuBuffer
from vllm.v1.worker.gpu_input_batch import CachedRequestState
from vllm.v1.worker.lora_model_runner_mixin import GPUInputBatch
@triton.jit
def batch_memcpy_kernel(src_ptrs, dst_ptrs, sizes, BLOCK_SIZE: tl.constexpr):
pid = tl.program_id(0)
src_ptr = tl.load(src_ptrs + pid)
dst_ptr = tl.load(dst_ptrs + pid)
size = tl.load(sizes + pid)
offsets = tl.arange(0, BLOCK_SIZE)
for i in range(0, size, BLOCK_SIZE):
mask = (i + offsets) < size
curr_src_ptr = (src_ptr + i + offsets).to(tl.pointer_type(tl.uint8))
curr_dst_ptr = (dst_ptr + i + offsets).to(tl.pointer_type(tl.uint8))
data = tl.load(curr_src_ptr, mask=mask)
tl.store(curr_dst_ptr, data, mask=mask)
def batch_memcpy(src_ptrs, dst_ptrs, sizes):
batch = src_ptrs.shape[0]
assert dst_ptrs.shape[0] == batch
assert sizes.shape[0] == batch
grid = (batch,)
BLOCK_SIZE = 1024
batch_memcpy_kernel[grid](src_ptrs, dst_ptrs, sizes, BLOCK_SIZE=BLOCK_SIZE)
def get_mamba_groups(kv_cache_config: KVCacheConfig) -> tuple[list[int], MambaSpec]:
mamba_group_ids: list[int] = []
mamba_specs: list[MambaSpec] = []
for i in range(len(kv_cache_config.kv_cache_groups)):
kv_cache_spec = kv_cache_config.kv_cache_groups[i].kv_cache_spec
if isinstance(kv_cache_spec, MambaSpec):
mamba_group_ids.append(i)
mamba_specs.append(kv_cache_spec)
assert len(mamba_group_ids) > 0, "no mamba layers in the model"
assert all(mamba_specs[0] == spec for spec in mamba_specs)
return mamba_group_ids, mamba_specs[0]
@dataclasses.dataclass
class MambaCopyBuffers:
src_ptrs: CpuGpuBuffer
dst_ptrs: CpuGpuBuffer
sizes: CpuGpuBuffer
offset: int = 0
@classmethod
def create(
cls,
max_num_reqs: int,
kv_cache_config: KVCacheConfig,
copy_funcs: tuple[MambaStateCopyFunc, ...],
make_buffer: Callable[..., CpuGpuBuffer],
) -> "MambaCopyBuffers":
mamba_group_ids, _ = get_mamba_groups(kv_cache_config)
entries_per_req = sum(
len(kv_cache_config.kv_cache_groups[gid].layer_names)
for gid in mamba_group_ids
) * len(copy_funcs)
n = max_num_reqs * entries_per_req
return cls(
src_ptrs=make_buffer(n, dtype=torch.int64),
dst_ptrs=make_buffer(n, dtype=torch.int64),
sizes=make_buffer(n, dtype=torch.int32),
)
def collect_mamba_copy_meta(
copy_bufs: MambaCopyBuffers,
kv_cache_config: KVCacheConfig,
mamba_state_copy_funcs: tuple[MambaStateCopyFunc, ...],
mamba_group_ids: list[int],
src_block_idx: int,
dest_block_idx: int,
accept_token_bias: int,
req_state: CachedRequestState,
forward_context: dict[str, Any],
) -> None:
if src_block_idx == dest_block_idx and accept_token_bias == 0:
return
src_ptrs_np = copy_bufs.src_ptrs.np
dst_ptrs_np = copy_bufs.dst_ptrs.np
sizes_np = copy_bufs.sizes.np
offset = copy_bufs.offset
for mamba_group_id in mamba_group_ids:
block_ids = req_state.block_ids[mamba_group_id]
dest_block_id = block_ids[dest_block_idx]
layer_names = kv_cache_config.kv_cache_groups[mamba_group_id].layer_names
for layer_name in layer_names:
attention = forward_context[layer_name]
kv_caches: list[torch.Tensor] = attention.kv_cache[0]
for state, state_copy_func in zip(kv_caches, mamba_state_copy_funcs):
copy_spec = state_copy_func(
state, block_ids, src_block_idx, accept_token_bias + 1
)
src_ptrs_np[offset] = copy_spec.start_addr
dst_ptrs_np[offset] = state[dest_block_id].data_ptr()
sizes_np[offset] = copy_spec.num_elements * state.element_size()
offset += 1
copy_bufs.offset = offset
def do_mamba_copy_block(copy_bufs: MambaCopyBuffers):
n = copy_bufs.offset
if n == 0:
return
batch_memcpy(
copy_bufs.src_ptrs.copy_to_gpu(n),
copy_bufs.dst_ptrs.copy_to_gpu(n),
copy_bufs.sizes.copy_to_gpu(n),
)
def preprocess_mamba(
scheduler_output: SchedulerOutput,
kv_cache_config: KVCacheConfig,
cache_config: CacheConfig,
mamba_state_idx: dict[str, int],
input_batch: GPUInputBatch,
requests: dict[str, CachedRequestState],
forward_context: dict[str, Any],
mamba_state_copy_funcs: tuple[MambaStateCopyFunc, ...],
copy_bufs: MambaCopyBuffers,
):
"""
Copy the mamba state of previous step to the last
(1 + num_speculative_blocks) block.
"""
mamba_group_ids, mamba_spec = get_mamba_groups(kv_cache_config)
num_speculative_blocks = mamba_spec.num_speculative_blocks
# TODO(Chen): we need to optimize this function a lot
assert cache_config.enable_prefix_caching
block_size = mamba_spec.block_size
finished_req_ids = scheduler_output.finished_req_ids
preempted_req_ids = scheduler_output.preempted_req_ids or set()
# We need to clear mamba_state_idx for resumed requests. When requests are
# force-preempted (e.g., during reset_prefix_cache / KV cache flush),
# they appear in resumed_req_ids without a corresponding entry in
# preempted_req_ids, leaving stale mamba_state_idx entries that can
# point to block indices beyond the new (smaller) block allocation.
resumed_req_ids = scheduler_output.scheduled_cached_reqs.resumed_req_ids
for req_id in itertools.chain(finished_req_ids, preempted_req_ids, resumed_req_ids):
mamba_state_idx.pop(req_id, None)
copy_bufs.offset = 0
for i, req_id in enumerate(input_batch.req_ids):
req_state = requests[req_id]
prev_state_idx = mamba_state_idx.get(req_id)
if prev_state_idx is None:
# new / resumed request, no previous state
# if num_computed_tokens is 0, prev_state_idx will be -1
prev_state_idx = (req_state.num_computed_tokens - 1) // block_size
num_scheduled_tokens = scheduler_output.num_scheduled_tokens[req_id]
num_blocks: int = (
cdiv(req_state.num_computed_tokens + num_scheduled_tokens, block_size)
+ num_speculative_blocks
)
# We always save the current running state at the last
# (1 + num_speculative_blocks) block.
# A corner case worth mention here: assume we have block_size = 4 and
# num_speculative_tokens = 2. The request is [A, B, C] and contains 2 draft
# tokens [draft 1, draft 2]. Then we will have:
# Block 0: [A, B, C, draft 1]
# Block 1: [draft 2, TOFILL, TOFILL, TOFILL]
# Block 2: speculative block
# Block 3: speculative block
# And use block 1 to save the running state.
curr_state_idx = num_blocks - 1 - num_speculative_blocks
mamba_state_idx[req_id] = curr_state_idx
if prev_state_idx != -1 and prev_state_idx != curr_state_idx:
collect_mamba_copy_meta(
copy_bufs,
kv_cache_config,
mamba_state_copy_funcs,
mamba_group_ids,
prev_state_idx,
curr_state_idx,
input_batch.num_accepted_tokens_cpu[i] - 1,
req_state,
forward_context,
)
input_batch.num_accepted_tokens_cpu[i] = 1
do_mamba_copy_block(copy_bufs)
def postprocess_mamba(
scheduler_output: SchedulerOutput,
kv_cache_config: KVCacheConfig,
input_batch: GPUInputBatch,
requests: dict[str, CachedRequestState],
mamba_state_idx: dict[str, int],
forward_context: dict[str, Any],
mamba_state_copy_funcs: tuple[MambaStateCopyFunc, ...],
copy_bufs: MambaCopyBuffers,
):
"""
If a blocks is converted from partial block to full block in this step, copy the
state from the block for running state to the new full block.
"""
num_scheduled_tokens_dict = scheduler_output.num_scheduled_tokens
scheduled_spec_decode_tokens_dict = scheduler_output.scheduled_spec_decode_tokens
num_accepted_tokens_cpu = input_batch.num_accepted_tokens_cpu
# NOTE: can be optimized as this function always returns the same result
mamba_group_ids, mamba_spec = get_mamba_groups(kv_cache_config)
copy_bufs.offset = 0
for i, req_id in enumerate(input_batch.req_ids):
req_state = requests[req_id]
num_computed_tokens = req_state.num_computed_tokens
num_draft_tokens = len(scheduled_spec_decode_tokens_dict.get(req_id, []))
num_scheduled_tokens = num_scheduled_tokens_dict[req_id]
num_accepted_tokens = num_accepted_tokens_cpu[i]
num_tokens_running_state = (
num_computed_tokens + num_scheduled_tokens - num_draft_tokens
)
new_num_computed_tokens = num_tokens_running_state + num_accepted_tokens - 1
aligned_new_computed_tokens = (
new_num_computed_tokens // mamba_spec.block_size * mamba_spec.block_size
)
# TODO: how to ensure all blocks that cache_blocks called are cached here?
if aligned_new_computed_tokens >= num_tokens_running_state:
accept_token_bias = aligned_new_computed_tokens - num_tokens_running_state
src_block_idx = mamba_state_idx[req_id]
dest_block_idx = aligned_new_computed_tokens // mamba_spec.block_size - 1
collect_mamba_copy_meta(
copy_bufs,
kv_cache_config,
mamba_state_copy_funcs,
mamba_group_ids,
src_block_idx,
dest_block_idx,
accept_token_bias,
req_state,
forward_context,
)
if src_block_idx == dest_block_idx:
num_accepted_tokens_cpu[i] = 1
do_mamba_copy_block(copy_bufs)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/worker/mamba_utils.py",
"license": "Apache License 2.0",
"lines": 237,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/whisper_causal.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import copy
import functools
import logging
import math
from dataclasses import replace
from functools import partial
import torch
import torch.nn.functional as F
from torch import nn
from vllm.config import CacheConfig, VllmConfig
from vllm.distributed import get_tensor_model_parallel_world_size
from vllm.model_executor.layers.attention import Attention
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.linear import (
QKVParallelLinear,
RowParallelLinear,
)
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.rotary_embedding import get_rope
from vllm.model_executor.models.mistral import MistralMLP
from vllm.model_executor.models.whisper import WhisperPosEmbedType
from vllm.v1.attention.backend import (
AttentionBackend,
AttentionMetadata,
AttentionType,
CommonAttentionMetadata,
subclass_attention_backend_with_overrides,
)
from vllm.v1.attention.backends.flash_attn import FlashAttentionBackend
try:
from vllm.v1.attention.backends.rocm_aiter_fa import AiterFlashAttentionBackend
except ImportError:
AiterFlashAttentionBackend = None
from vllm.v1.attention.backends.rocm_attn import RocmAttentionBackend
from vllm.v1.attention.backends.triton_attn import TritonAttentionBackend
from vllm.v1.attention.selector import get_attn_backend
from vllm.v1.kv_cache_interface import AttentionSpec
from .utils import make_layers
logger = logging.getLogger(__name__)
CausalRMSNorm = partial(RMSNorm, eps=1e-5)
def _pad1d(
x: torch.Tensor,
paddings: tuple[int, int],
mode: str = "constant",
value: float = 0.0,
) -> torch.Tensor:
"""Tiny wrapper around F.pad, just to allow for
reflect padding on small input.
If this is the case, we insert extra 0 padding
to the right before the reflection happen.
"""
length = x.shape[-1]
padding_left, padding_right = paddings
assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right)
if mode == "reflect":
max_pad = max(padding_left, padding_right)
extra_pad = 0
if length <= max_pad:
extra_pad = max_pad - length + 1
x = F.pad(x, (0, extra_pad))
padded = F.pad(x, paddings, mode, value)
end = padded.shape[-1] - extra_pad
return padded[..., :end]
else:
return F.pad(x, paddings, mode, value)
class WhisperCausalConv1d(nn.Conv1d):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
padding: int = 0,
bias: bool = True,
) -> None:
super().__init__(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
bias=bias,
)
self._stride = self.stride[0]
self._effective_kernel_size = (kernel_size - 1) * self.dilation[0] + 1
self._padding_total = self._effective_kernel_size - self._stride
def forward(self, x: torch.Tensor) -> torch.Tensor:
n_frames = (
x.shape[-1] - self._effective_kernel_size + self._padding_total
) / self._stride + 1
target_length = (math.ceil(n_frames) - 1) * self._stride + (
self._effective_kernel_size - self._padding_total
)
extra_padding = target_length - x.shape[-1]
x = _pad1d(x, (self._padding_total, extra_padding), mode="constant")
return super().forward(x)
@functools.lru_cache
def create_whisper_attention_backend_with_block_pooling(
underlying_attn_backend: AttentionBackend, block_pool_size: int
) -> type[AttentionBackend]:
prefix = "WhisperCausalAttentionWithBlockPooling_"
underlying_builder = underlying_attn_backend.get_builder_cls()
underlying_impl = underlying_attn_backend.get_impl_cls()
class WhisperCausalAttentionWithBlockPoolingBuilder(underlying_builder): # type: ignore
def __init__(
self,
kv_cache_spec: AttentionSpec,
layer_names: list[str],
vllm_config: VllmConfig,
device: torch.device,
):
assert kv_cache_spec.num_kv_heads % block_pool_size == 0
kv_cache_spec = replace(
kv_cache_spec,
block_size=kv_cache_spec.block_size * block_pool_size,
num_kv_heads=kv_cache_spec.num_kv_heads // block_pool_size,
)
super().__init__(kv_cache_spec, layer_names, vllm_config, device)
# Override model_config-derived values with the actual
# encoder values from kv_cache_spec
self.num_heads_kv = kv_cache_spec.num_kv_heads
self.headdim = kv_cache_spec.head_size
# num_heads_q for the encoder is the same as num_kv_heads
# (no GQA in whisper encoder)
self.num_heads_q = kv_cache_spec.num_kv_heads
def build(
self,
common_prefix_len: int,
common_attn_metadata: CommonAttentionMetadata,
fast_build: bool = False,
) -> AttentionMetadata:
new_common_attn_metadata = copy.deepcopy(common_attn_metadata)
new_common_attn_metadata.query_start_loc *= block_pool_size
new_common_attn_metadata.query_start_loc_cpu *= block_pool_size
new_common_attn_metadata.seq_lens *= block_pool_size
new_common_attn_metadata._seq_lens_cpu *= block_pool_size
new_common_attn_metadata._num_computed_tokens_cpu *= block_pool_size
new_common_attn_metadata.num_actual_tokens *= block_pool_size
new_common_attn_metadata.max_query_len *= block_pool_size
new_common_attn_metadata.max_seq_len *= block_pool_size
original_slot_mapping = common_attn_metadata.slot_mapping
common_prefix_len *= block_pool_size
new_common_attn_metadata.slot_mapping = (
(
original_slot_mapping.unsqueeze(1) * block_pool_size
+ torch.arange(block_pool_size, device=original_slot_mapping.device)
)
.flatten()
.clamp(min=-1)
)
return super().build(
common_prefix_len, new_common_attn_metadata, fast_build
)
# NOTE: We need a custom impl so we can use the transformed slot_mapping
# computed by `WhisperCausalAttentionWithBlockPoolingBuilder` instead of
# the one from `forward_context.slot_mapping` (gpu_model_runner).
# This follows the same pattern as CrossAttentionImpl.
class WhisperCausalAttentionWithBlockPoolingImpl(underlying_impl): # type: ignore[valid-type,misc]
def forward(
self,
layer: torch.nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
kv_cache: torch.Tensor,
attn_metadata: AttentionMetadata,
output: torch.Tensor | None = None,
output_scale: torch.Tensor | None = None,
output_block_scale: torch.Tensor | None = None,
) -> torch.Tensor:
if (
not underlying_attn_backend.forward_includes_kv_cache_update
and attn_metadata is not None
and layer.kv_sharing_target_layer_name is None
and key is not None
and value is not None
):
self.do_kv_cache_update(
layer, key, value, kv_cache, attn_metadata.slot_mapping
)
return super().forward(
layer,
query,
key,
value,
kv_cache,
attn_metadata,
output,
output_scale,
output_block_scale,
)
_SUPPORTED_BACKENDS = tuple(
b
for b in (
AiterFlashAttentionBackend,
FlashAttentionBackend,
RocmAttentionBackend,
TritonAttentionBackend,
)
if b is not None
)
if not issubclass(underlying_attn_backend, _SUPPORTED_BACKENDS):
raise NotImplementedError(
f"{underlying_attn_backend} is not yet supported."
"Contributions to support more backends are much "
"appreciated."
)
if not issubclass(underlying_attn_backend, FlashAttentionBackend):
logger.info(
"Using %s for Whisper causal attention with block pooling. "
"This backend was recently enabled for this model. "
"If you encounter any accuracy or performance issues, "
"please open an issue at "
"https://github.com/vllm-project/vllm/issues "
"with the [ROCm] tag so it can be triaged by the "
"appropriate team.",
underlying_attn_backend.get_name(),
)
attn_backend = subclass_attention_backend_with_overrides(
name_prefix=prefix,
attention_backend_cls=underlying_attn_backend,
overrides={
"get_builder_cls": lambda: WhisperCausalAttentionWithBlockPoolingBuilder,
"get_impl_cls": lambda: WhisperCausalAttentionWithBlockPoolingImpl,
"get_kv_cache_shape": lambda num_blocks,
block_size,
num_kv_heads,
head_size,
cache_dtype_str: underlying_attn_backend.get_kv_cache_shape(
num_blocks,
# we stretch each block by `block_pool_size`
block_size * block_pool_size,
num_kv_heads // block_pool_size,
head_size,
cache_dtype_str,
),
"forward_includes_kv_cache_update": True,
},
)
return attn_backend
class WhisperCausalAttentionWithBlockPooling(Attention):
"""Attention layer with block pooling."""
def __init__(
self,
num_heads: int,
head_size: int,
scale: float,
num_kv_heads: int | None = None,
alibi_slopes: list[float] | None = None,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
logits_soft_cap: float | None = None,
per_layer_sliding_window: int | None = None,
prefix: str = "",
attn_type: str = AttentionType.DECODER,
kv_sharing_target_layer_name: str | None = None,
block_pool_size: int = 1,
attn_backend: type[AttentionBackend] | None = None,
**extra_impl_args,
) -> None:
self.block_pool_size = block_pool_size
dtype = torch.get_default_dtype()
if cache_config is not None:
kv_cache_dtype = cache_config.cache_dtype
block_size = cache_config.block_size
else:
kv_cache_dtype = "auto"
block_size = 16
underlying_attn_backend = get_attn_backend(
head_size,
dtype,
kv_cache_dtype,
block_size,
attn_type=attn_type,
)
attn_backend = create_whisper_attention_backend_with_block_pooling(
underlying_attn_backend, block_pool_size
)
super().__init__(
num_heads=num_heads,
head_size=head_size,
scale=scale,
num_kv_heads=num_kv_heads,
alibi_slopes=alibi_slopes,
cache_config=cache_config,
quant_config=quant_config,
logits_soft_cap=logits_soft_cap,
per_layer_sliding_window=per_layer_sliding_window,
prefix=prefix,
attn_type=attn_type,
kv_sharing_target_layer_name=kv_sharing_target_layer_name,
attn_backend=attn_backend,
**extra_impl_args,
)
def get_kv_cache_spec(self, vllm_config: VllmConfig):
kv_cache_spec = super().get_kv_cache_spec(vllm_config)
assert isinstance(kv_cache_spec, AttentionSpec)
kv_cache_spec = replace(
kv_cache_spec,
num_kv_heads=self.block_pool_size * kv_cache_spec.num_kv_heads,
)
return kv_cache_spec
class WhisperCausalAttention(nn.Module):
def __init__(
self,
embed_dim: int,
num_heads: int,
head_dim: int,
max_position_embeddings: int,
bias: bool = True,
attn_type: AttentionType = AttentionType.DECODER,
per_layer_sliding_window: int | None = None,
block_pool_size: int = 1,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
):
super().__init__()
self.embed_dim = embed_dim
tp_size = get_tensor_model_parallel_world_size()
self.total_num_heads = num_heads
assert self.total_num_heads % tp_size == 0
self.num_heads = self.total_num_heads // tp_size
if self.total_num_heads >= tp_size:
# Number of heads is greater than TP size, so we partition
# the KV heads across multiple tensor parallel GPUs.
assert self.total_num_heads % tp_size == 0
else:
# Number of heads is less than TP size, so we replicate
# the KV heads across multiple tensor parallel GPUs.
assert tp_size % self.total_num_heads == 0
self.num_kv_heads = max(1, self.total_num_heads // tp_size)
self.head_dim = head_dim
self.q_size = self.num_heads * self.head_dim
self.kv_size = self.num_kv_heads * self.head_dim
self.attn_type = attn_type
self.scaling = self.head_dim**-0.5
self._init_qkv(embed_dim, bias, quant_config, prefix=prefix)
self.out_proj = RowParallelLinear(
input_size=self.total_num_heads * self.head_dim,
output_size=embed_dim,
bias=bias,
quant_config=quant_config,
prefix=f"{prefix}.out_proj",
)
assert block_pool_size > 1, (
f"Causal attention only supports block_pool_size>1, not {block_pool_size}."
)
self.attn = WhisperCausalAttentionWithBlockPooling(
self.num_heads,
self.head_dim,
self.scaling,
num_kv_heads=self.num_kv_heads,
cache_config=cache_config,
quant_config=quant_config,
prefix=f"{prefix}.attn",
attn_type=AttentionType.DECODER,
per_layer_sliding_window=per_layer_sliding_window,
block_pool_size=block_pool_size,
)
assert per_layer_sliding_window is not None, (
"rope can only used in combination with a sliding window"
)
self._init_rotary_emb(max_position_embeddings)
def _init_rotary_emb(self, max_position_embeddings: int) -> None:
self.rotary_emb = get_rope(
self.head_dim,
max_position=max_position_embeddings,
is_neox_style=False,
rope_parameters={"rope_theta": 1e6},
)
def _init_qkv(
self,
embed_dim: int,
bias: bool = True,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
self.qkv_proj = QKVParallelLinear(
hidden_size=embed_dim,
head_size=self.head_dim,
total_num_heads=self.total_num_heads,
total_num_kv_heads=self.total_num_heads,
bias=bias,
quant_config=quant_config,
prefix=f"{prefix}.qkv_proj",
)
def forward(
self,
hidden_states: torch.Tensor,
positions: torch.Tensor | None = None,
):
qkv, _ = self.qkv_proj(hidden_states)
q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
assert positions is not None
q, k = self.rotary_emb(positions, q, k)
attn_output = self.attn(q, k, v)
output, _ = self.out_proj(attn_output)
return output
class WhisperCausalEncoderLayer(nn.Module):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
sliding_window = getattr(config, "sliding_window", None)
block_pool_size = config.block_pool_size
assert block_pool_size > 1
cache_config = vllm_config.cache_config
quant_config = vllm_config.quant_config
self.embed_dim = config.d_model
self.head_dim = self.embed_dim // config.encoder_attention_heads
self.self_attn = WhisperCausalAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
head_dim=config.encoder_head_dim,
max_position_embeddings=config.max_position_embeddings,
block_pool_size=block_pool_size,
per_layer_sliding_window=sliding_window,
cache_config=cache_config,
quant_config=quant_config,
prefix=f"{prefix}.self_attn",
)
self.self_attn_layer_norm = CausalRMSNorm(self.embed_dim)
self.mlp = MistralMLP(
hidden_size=config.d_model,
intermediate_size=config.encoder_ffn_dim,
hidden_act="silu",
quant_config=quant_config,
bias=True,
gate_up_proj_bias=False,
prefix=f"{prefix}.mlp",
)
self.final_layer_norm = CausalRMSNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
positions: torch.Tensor | None = None,
):
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states = self.self_attn(hidden_states=hidden_states, positions=positions)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
class WhisperCausalEncoder(nn.Module):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
embed_dim = config.d_model
assert WhisperPosEmbedType(config.pos_embed) == WhisperPosEmbedType.ROPE
assert config.is_causal
self.num_mel_bins = config.num_mel_bins
self.max_source_positions = config.max_source_positions
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
self.conv1 = WhisperCausalConv1d(self.num_mel_bins, embed_dim, kernel_size=3)
self.conv2 = WhisperCausalConv1d(embed_dim, embed_dim, stride=2, kernel_size=3)
self.total_stride = self.conv1.stride[0] * self.conv2.stride[0]
self.start_layer, self.end_layer, self.layers = make_layers(
config.encoder_layers,
lambda prefix: WhisperCausalEncoderLayer(
vllm_config=vllm_config, prefix=f"{prefix}.layers"
),
prefix=f"{prefix}.layers",
)
self.layer_norm = CausalRMSNorm(config.d_model)
def forward_conv(
self, input_features: torch.Tensor | list[torch.Tensor]
) -> torch.Tensor:
hidden_states = []
for features in input_features:
embeds = nn.functional.gelu(self.conv1(features))
embeds = nn.functional.gelu(self.conv2(embeds))
embeds = embeds.transpose(-1, -2).to(embeds.dtype)
hidden_states.append(embeds)
hidden_states = torch.cat(hidden_states)
return hidden_states
def forward(
self, hidden_states: torch.Tensor, positions: torch.Tensor
) -> torch.Tensor:
for encoder_layer in self.layers:
hidden_states = encoder_layer(hidden_states, positions)
hidden_states = self.layer_norm(hidden_states)
return hidden_states
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/whisper_causal.py",
"license": "Apache License 2.0",
"lines": 487,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/models/language/pooling/test_bge_m3.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import httpx
import openai
import pytest
import pytest_asyncio
import torch
from ....utils import RemoteOpenAIServer
from .embed_utils import run_client_embeddings
MODEL_NAME = "BAAI/bge-m3"
MAX_MODEL_LEN = 512
# Example from https://huggingface.co/BAAI/bge-m3
sentences_1 = ["What is BGE M3?", "Defination of BM25"]
sentences_2 = [
"BGE M3 is an embedding model supporting dense retrieval, "
"lexical matching and multi-vector interaction.",
"BM25 is a bag-of-words retrieval function that ranks a set "
"of documents based on the query terms appearing in each document",
]
similarity_reference = [[0.6265, 0.3477], [0.3499, 0.678]]
lexical_score_reference = [0.19554901123046875, 0.0]
colbert_score_reference = [0.7797, 0.4620]
@pytest.fixture(scope="module")
def server():
args = [
"--max-model-len",
str(MAX_MODEL_LEN),
"--hf-overrides",
'{"architectures": ["BgeM3EmbeddingModel"]}',
]
with RemoteOpenAIServer(MODEL_NAME, args) as remote_server:
yield remote_server
@pytest_asyncio.fixture
async def client(server):
async with server.get_async_client() as async_client:
yield async_client
@pytest.mark.asyncio
async def test_bge_m3_api_server_embedding(client: openai.AsyncOpenAI):
embeddings_list_1 = await run_client_embeddings(
client,
MODEL_NAME,
sentences_1,
)
embeddings_list_2 = await run_client_embeddings(
client,
MODEL_NAME,
sentences_2,
)
embeddings_1 = torch.tensor(embeddings_list_1)
embeddings_2 = torch.tensor(embeddings_list_2)
similarity = embeddings_1 @ embeddings_2.T
# reference values from BAAI/bge-m3 documentation
reference = torch.tensor(similarity_reference)
assert torch.allclose(similarity, reference, rtol=0.01)
async def tokenize(client: openai.AsyncOpenAI, sentences: list[str]) -> list[list[int]]:
futures = []
for sentence in sentences:
futures.append(
client.post(
"../tokenize",
body={"model": MODEL_NAME, "prompt": sentence},
cast_to=httpx.Response,
)
)
return [(await future).json()["tokens"] for future in futures]
async def sparse_embeddings(
client: openai.AsyncOpenAI, sentences: list[str]
) -> list[dict[int, float]]:
all_tokens = await tokenize(client, sentences)
result = await client.post(
"../pooling",
body={"model": MODEL_NAME, "input": sentences, "task": "token_classify"},
cast_to=httpx.Response,
)
all_embeddings = [data["data"] for data in result.json()["data"]]
ret = []
for sent_tokens, sent_emb in zip(all_tokens, all_embeddings):
token_embs = dict[int, float]()
if sent_tokens[0] == 0:
sent_tokens = sent_tokens[1:]
for token, val in zip(sent_tokens, sent_emb):
token_embs[token] = max(val, token_embs.get(token, 0.0))
ret.append(token_embs)
return ret
# Based on https://github.com/FlagOpen/FlagEmbedding/blob/6fd176266f2382878bcc69cd656cff425d52f49b/FlagEmbedding/inference/embedder/encoder_only/m3.py#L129
def compute_lexical_matching_score(
lw1: dict[int, float], lw2: dict[int, float]
) -> float:
scores = 0.0
for token, weight in lw1.items():
if token in lw2:
scores += weight * lw2[token]
return scores
@pytest.mark.asyncio
async def test_bge_m3_api_server_sparse_embedding(client: openai.AsyncOpenAI):
embeddings_1 = await sparse_embeddings(client, sentences_1)
embeddings_2 = await sparse_embeddings(client, sentences_2)
lexical_scores_1_0_x_2_0 = compute_lexical_matching_score(
embeddings_1[0], embeddings_2[0]
)
assert lexical_scores_1_0_x_2_0 == pytest.approx(
lexical_score_reference[0], rel=0.01
)
lexical_scores_1_0_x_1_1 = compute_lexical_matching_score(
embeddings_1[0], embeddings_1[1]
)
assert lexical_scores_1_0_x_1_1 == pytest.approx(
lexical_score_reference[1], rel=0.01
)
@pytest.mark.asyncio
async def test_bge_m3_api_server_sparse_embedding_corner_case(
client: openai.AsyncOpenAI,
):
embeddings = await sparse_embeddings(client, ["Hi"])
assert len(embeddings) == 1
assert 2673 in embeddings[0]
assert embeddings[0][2673] == pytest.approx(0.26710861921310425, rel=0.01)
# https://github.com/FlagOpen/FlagEmbedding/blob/6fd176266f2382878bcc69cd656cff425d52f49b/FlagEmbedding/inference/embedder/encoder_only/m3.py#L163
def colbert_score(q_reps: torch.Tensor, p_reps: torch.Tensor) -> torch.Tensor:
token_scores = torch.einsum("in,jn->ij", q_reps, p_reps)
scores, _ = token_scores.max(-1)
scores = torch.sum(scores) / q_reps.size(0)
return scores
@pytest.mark.asyncio
async def test_bge_m3_api_server_multi_vector(client: openai.AsyncOpenAI):
result_1 = await client.post(
"../pooling",
body={"model": MODEL_NAME, "input": sentences_1, "task": "token_embed"},
cast_to=httpx.Response,
)
embeddings_1 = [torch.tensor(data["data"]) for data in result_1.json()["data"]]
result_2 = await client.post(
"../pooling",
body={"model": MODEL_NAME, "input": sentences_2, "task": "token_embed"},
cast_to=httpx.Response,
)
embeddings_2 = [torch.tensor(data["data"]) for data in result_2.json()["data"]]
colbert_score_1_0_x_2_0 = colbert_score(embeddings_1[0], embeddings_2[0])
assert colbert_score_1_0_x_2_0 == pytest.approx(
colbert_score_reference[0], rel=0.01
)
colbert_score_1_0_x_2_1 = colbert_score(embeddings_1[0], embeddings_2[1])
assert colbert_score_1_0_x_2_1 == pytest.approx(
colbert_score_reference[1], rel=0.01
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/models/language/pooling/test_bge_m3.py",
"license": "Apache License 2.0",
"lines": 145,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/renderers/test_hf.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
from vllm.config import ModelConfig
from vllm.entrypoints.chat_utils import load_chat_template
from vllm.entrypoints.openai.chat_completion.protocol import ChatCompletionRequest
from vllm.renderers.hf import (
_get_hf_base_chat_template_params,
_try_extract_ast,
resolve_chat_template,
resolve_chat_template_content_format,
resolve_chat_template_kwargs,
safe_apply_chat_template,
)
from vllm.tokenizers import get_tokenizer
from ..models.registry import HF_EXAMPLE_MODELS
from ..utils import VLLM_PATH
EXAMPLES_DIR = VLLM_PATH / "examples"
chatml_jinja_path = VLLM_PATH / "examples/template_chatml.jinja"
assert chatml_jinja_path.exists()
# Define models, templates, and their corresponding expected outputs
MODEL_TEMPLATE_GENERATION_OUTPUT = [
(
"facebook/opt-125m",
chatml_jinja_path,
True,
False,
"""<|im_start|>user
Hello<|im_end|>
<|im_start|>assistant
Hi there!<|im_end|>
<|im_start|>user
What is the capital of<|im_end|>
<|im_start|>assistant
""",
),
(
"facebook/opt-125m",
chatml_jinja_path,
False,
False,
"""<|im_start|>user
Hello<|im_end|>
<|im_start|>assistant
Hi there!<|im_end|>
<|im_start|>user
What is the capital of""",
),
(
"facebook/opt-125m",
chatml_jinja_path,
False,
True,
"""<|im_start|>user
Hello<|im_end|>
<|im_start|>assistant
Hi there!<|im_end|>
<|im_start|>user
What is the capital of<|im_end|>
<|im_start|>assistant
The capital of""",
),
]
TEST_MESSAGES = [
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi there!"},
{"role": "user", "content": "What is the capital of"},
]
ASSISTANT_MESSAGE_TO_CONTINUE = {"role": "assistant", "content": "The capital of"}
def test_load_chat_template():
# Testing chatml template
template_content = load_chat_template(chat_template=chatml_jinja_path)
# Test assertions
assert template_content is not None
# Hard coded value for template_chatml.jinja
assert (
template_content
== """{% for message in messages %}{{'<|im_start|>' + message['role'] + '\\n' + message['content']}}{% if (loop.last and add_generation_prompt) or not loop.last %}{{ '<|im_end|>' + '\\n'}}{% endif %}{% endfor %}
{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}{{ '<|im_start|>assistant\\n' }}{% endif %}""" # noqa: E501
)
def test_no_load_chat_template_filelike():
# Testing chatml template
template = "../../examples/does_not_exist"
with pytest.raises(ValueError, match="looks like a file path"):
load_chat_template(chat_template=template)
def test_no_load_chat_template_literallike():
# Testing chatml template
template = "{{ messages }}"
template_content = load_chat_template(chat_template=template)
assert template_content == template
@pytest.mark.parametrize(
"model",
[
"Qwen/Qwen2-VL-2B-Instruct", # chat_template is of type str
"NousResearch/Hermes-3-Llama-3.1-8B", # chat_template is of type dict
],
)
@pytest.mark.parametrize("use_tools", [True, False])
def test_resolve_chat_template(sample_json_schema, model, use_tools):
"""checks that chat_template is a dict type for HF models."""
model_info = HF_EXAMPLE_MODELS.find_hf_info(model)
model_info.check_available_online(on_fail="skip")
model_config = ModelConfig(
model,
tokenizer=model_info.tokenizer or model,
tokenizer_mode=model_info.tokenizer_mode,
revision=model_info.revision,
trust_remote_code=model_info.trust_remote_code,
hf_overrides=model_info.hf_overrides,
skip_tokenizer_init=model_info.require_embed_inputs,
enable_prompt_embeds=model_info.require_embed_inputs,
enable_mm_embeds=model_info.require_embed_inputs,
enforce_eager=model_info.enforce_eager,
dtype=model_info.dtype,
)
# Build the tokenizer
tokenizer = get_tokenizer(
model,
trust_remote_code=model_config.trust_remote_code,
)
tools = (
[
{
"type": "function",
"function": {
"name": "dummy_function_name",
"description": "This is a dummy function",
"parameters": sample_json_schema,
},
}
]
if use_tools
else None
)
# Test detecting the tokenizer's chat_template
chat_template = resolve_chat_template(
tokenizer,
chat_template=None,
tools=tools,
model_config=model_config,
)
assert isinstance(chat_template, str)
@pytest.mark.parametrize(
"model, expected_kwargs",
[
(
"Qwen/Qwen2-VL-2B-Instruct",
{
"add_vision_id",
"add_generation_prompt",
"continue_final_message",
"tools",
},
),
(
"Qwen/Qwen3-8B",
{
"enable_thinking",
"add_generation_prompt",
"continue_final_message",
"tools",
},
),
],
)
def test_resolve_chat_template_kwargs(sample_json_schema, model, expected_kwargs):
"""checks that chat_template is a dict type for HF models."""
model_info = HF_EXAMPLE_MODELS.find_hf_info(model)
model_info.check_available_online(on_fail="skip")
tools = [
{
"type": "function",
"function": {
"name": "dummy_function_name",
"description": "This is a dummy function",
"parameters": sample_json_schema,
},
}
]
chat_template_kwargs = {
# both unused
"unsed_kwargs_1": 123,
"unsed_kwargs_2": "abc",
# should not appear
"chat_template": "{% Hello world! %}",
"tokenize": True,
# used by tokenizer
"continue_final_message": True,
"tools": tools,
# both used by Qwen2-VL and Qwen3
"add_generation_prompt": True,
# only used by Qwen2-VL
"add_vision_id": True,
# only used by Qwen3
"enable_thinking": True,
}
model_config = ModelConfig(
model,
tokenizer=model_info.tokenizer or model,
tokenizer_mode=model_info.tokenizer_mode,
revision=model_info.revision,
trust_remote_code=model_info.trust_remote_code,
hf_overrides=model_info.hf_overrides,
skip_tokenizer_init=model_info.require_embed_inputs,
enable_prompt_embeds=model_info.require_embed_inputs,
enable_mm_embeds=model_info.require_embed_inputs,
enforce_eager=model_info.enforce_eager,
dtype=model_info.dtype,
)
# Build the tokenizer
tokenizer = get_tokenizer(
model,
trust_remote_code=model_config.trust_remote_code,
)
# Test detecting the tokenizer's chat_template
chat_template = resolve_chat_template(
tokenizer,
chat_template=None,
tools=tools,
model_config=model_config,
)
with pytest.raises(
ValueError, match="Found unexpected chat template kwargs from request"
):
# should raise error if `chat_template_kwargs` contains
# `chat_template` or `tokenize`
resolve_chat_template_kwargs(
tokenizer,
chat_template=chat_template,
chat_template_kwargs=chat_template_kwargs,
)
resolved_chat_template_kwargs = resolve_chat_template_kwargs(
tokenizer,
chat_template=chat_template,
chat_template_kwargs=chat_template_kwargs,
raise_on_unexpected=False,
)
assert set(resolved_chat_template_kwargs.keys()) == expected_kwargs
# Additional test: Verify HF base parameters work with **kwargs tokenizers
# This validates the fix for tokenizers like Kimi K2 that use **kwargs
# to receive standard HuggingFace parameters instead of declaring them explicitly
hf_base_params = _get_hf_base_chat_template_params()
# Verify common HF parameters are in the base class
assert {"add_generation_prompt", "tools", "continue_final_message"}.issubset(
hf_base_params
), f"Expected HF base params not found in {hf_base_params}"
# Test with a mock tokenizer that uses **kwargs (like Kimi K2)
class MockTokenizerWithKwargs:
def apply_chat_template(self, conversation, **kwargs):
return "mocked_output"
mock_tokenizer = MockTokenizerWithKwargs()
mock_kwargs = {
"add_generation_prompt": True,
"tools": tools,
"continue_final_message": False,
"unknown_param": "should_be_filtered",
}
resolved_mock = resolve_chat_template_kwargs(
mock_tokenizer, chat_template, mock_kwargs, raise_on_unexpected=False
)
# HF base params should pass through even with **kwargs tokenizer
assert "add_generation_prompt" in resolved_mock
assert "tools" in resolved_mock
assert "continue_final_message" in resolved_mock
# Unknown params should be filtered out
assert "unknown_param" not in resolved_mock
# NOTE: Qwen2-Audio default chat template is specially defined inside
# processor class instead of using `tokenizer_config.json`
@pytest.mark.parametrize(
("model", "expected_format"),
[
("microsoft/Phi-3.5-vision-instruct", "string"),
("Qwen/Qwen2-VL-2B-Instruct", "openai"),
("Qwen/Qwen2.5-VL-3B-Instruct", "openai"),
("fixie-ai/ultravox-v0_5-llama-3_2-1b", "string"),
("Qwen/Qwen2-Audio-7B-Instruct", "openai"),
("meta-llama/Llama-Guard-3-1B", "openai"),
],
)
def test_resolve_content_format_hf_defined(model, expected_format):
model_info = HF_EXAMPLE_MODELS.find_hf_info(model)
model_info.check_available_online(on_fail="skip")
model_config = ModelConfig(
model,
tokenizer=model_info.tokenizer or model,
tokenizer_mode=model_info.tokenizer_mode,
revision=model_info.revision,
trust_remote_code=model_info.trust_remote_code,
hf_overrides=model_info.hf_overrides,
skip_tokenizer_init=model_info.require_embed_inputs,
enable_prompt_embeds=model_info.require_embed_inputs,
enable_mm_embeds=model_info.require_embed_inputs,
enforce_eager=model_info.enforce_eager,
dtype=model_info.dtype,
)
tokenizer = get_tokenizer(
model,
trust_remote_code=model_config.trust_remote_code,
)
# Test detecting the tokenizer's chat_template
chat_template = resolve_chat_template(
tokenizer,
chat_template=None,
tools=None,
model_config=model_config,
)
assert isinstance(chat_template, str)
print("[TEXT]")
print(chat_template)
print("[AST]")
print(_try_extract_ast(chat_template))
resolved_format = resolve_chat_template_content_format(
None, # Test detecting the tokenizer's chat_template
None,
"auto",
tokenizer,
model_config=model_config,
)
assert resolved_format == expected_format
@pytest.mark.parametrize(
("model", "expected_format"),
[
("Salesforce/blip2-opt-2.7b", "string"),
("facebook/chameleon-7b", "string"),
("deepseek-ai/deepseek-vl2-tiny", "string"),
("adept/fuyu-8b", "string"),
("google/paligemma-3b-mix-224", "string"),
("Qwen/Qwen-VL", "string"),
("Qwen/Qwen-VL-Chat", "string"),
],
)
def test_resolve_content_format_fallbacks(model, expected_format):
model_info = HF_EXAMPLE_MODELS.find_hf_info(model)
model_info.check_available_online(on_fail="skip")
model_config = ModelConfig(
model,
tokenizer=model_info.tokenizer or model,
tokenizer_mode=model_info.tokenizer_mode,
revision=model_info.revision,
trust_remote_code=model_info.trust_remote_code,
hf_overrides=model_info.hf_overrides,
skip_tokenizer_init=model_info.require_embed_inputs,
enable_prompt_embeds=model_info.require_embed_inputs,
enable_mm_embeds=model_info.require_embed_inputs,
enforce_eager=model_info.enforce_eager,
dtype=model_info.dtype,
)
tokenizer = get_tokenizer(
model_config.tokenizer,
trust_remote_code=model_config.trust_remote_code,
)
# Test detecting the tokenizer's chat_template
chat_template = resolve_chat_template(
tokenizer,
chat_template=None,
tools=None,
model_config=model_config,
)
assert isinstance(chat_template, str)
print("[TEXT]")
print(chat_template)
print("[AST]")
print(_try_extract_ast(chat_template))
resolved_format = resolve_chat_template_content_format(
None, # Test detecting the tokenizer's chat_template
None,
"auto",
tokenizer,
model_config=model_config,
)
assert resolved_format == expected_format
@pytest.mark.parametrize(
("template_path", "expected_format"),
[
("template_alpaca.jinja", "string"),
("template_baichuan.jinja", "string"),
("template_chatglm.jinja", "string"),
("template_chatglm2.jinja", "string"),
("template_chatml.jinja", "string"),
("template_falcon_180b.jinja", "string"),
("template_falcon.jinja", "string"),
("template_inkbot.jinja", "string"),
("template_teleflm.jinja", "string"),
("pooling/embed/template/dse_qwen2_vl.jinja", "openai"),
("pooling/embed/template/vlm2vec_phi3v.jinja", "openai"),
("pooling/embed/template/vlm2vec_qwen2vl.jinja", "openai"),
("tool_chat_template_granite_20b_fc.jinja", "string"),
("tool_chat_template_hermes.jinja", "string"),
("tool_chat_template_internlm2_tool.jinja", "string"),
("tool_chat_template_llama3.1_json.jinja", "openai"),
("tool_chat_template_llama3.2_json.jinja", "openai"),
("tool_chat_template_mistral_parallel.jinja", "string"),
("tool_chat_template_mistral.jinja", "string"),
],
)
def test_resolve_content_format_examples(template_path, expected_format):
model = "Qwen/Qwen2-VL-2B-Instruct" # Dummy
model_config = ModelConfig(
model,
tokenizer=model,
trust_remote_code=True,
)
dummy_tokenizer = get_tokenizer(
model,
trust_remote_code=model_config.trust_remote_code,
)
dummy_tokenizer.chat_template = None
chat_template = load_chat_template(EXAMPLES_DIR / template_path)
assert isinstance(chat_template, str)
print("[TEXT]")
print(chat_template)
print("[AST]")
print(_try_extract_ast(chat_template))
resolved_format = resolve_chat_template_content_format(
chat_template,
None,
"auto",
dummy_tokenizer,
model_config=model_config,
)
assert resolved_format == expected_format
@pytest.mark.parametrize(
"model,template,add_generation_prompt,continue_final_message,expected_output",
MODEL_TEMPLATE_GENERATION_OUTPUT,
)
def test_get_gen_prompt(
model, template, add_generation_prompt, continue_final_message, expected_output
):
model_info = HF_EXAMPLE_MODELS.find_hf_info(model)
model_info.check_available_online(on_fail="skip")
model_config = ModelConfig(
model,
tokenizer=model_info.tokenizer or model,
tokenizer_mode=model_info.tokenizer_mode,
trust_remote_code=model_info.trust_remote_code,
revision=model_info.revision,
hf_overrides=model_info.hf_overrides,
skip_tokenizer_init=model_info.require_embed_inputs,
enable_prompt_embeds=model_info.require_embed_inputs,
enable_mm_embeds=model_info.require_embed_inputs,
enforce_eager=model_info.enforce_eager,
dtype=model_info.dtype,
)
# Initialize the tokenizer
tokenizer = get_tokenizer(
tokenizer_name=model_config.tokenizer,
trust_remote_code=model_config.trust_remote_code,
)
template_content = load_chat_template(chat_template=template)
# Create a mock request object using keyword arguments
mock_request = ChatCompletionRequest(
model=model,
messages=TEST_MESSAGES + [ASSISTANT_MESSAGE_TO_CONTINUE]
if continue_final_message
else TEST_MESSAGES,
add_generation_prompt=add_generation_prompt,
continue_final_message=continue_final_message,
)
# Call the function and get the result
result = safe_apply_chat_template(
model_config,
tokenizer,
mock_request.messages,
tools=None,
chat_template=mock_request.chat_template or template_content,
add_generation_prompt=mock_request.add_generation_prompt,
continue_final_message=mock_request.continue_final_message,
tokenize=False,
)
# Test assertion
assert result == expected_output, (
f"The generated prompt does not match the expected output for "
f"model {model} and template {template}"
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/renderers/test_hf.py",
"license": "Apache License 2.0",
"lines": 474,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/renderers/test_mistral.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import asyncio
import time
from dataclasses import dataclass
from typing import Any
from unittest.mock import Mock
import pytest
from mistral_common.tokens.tokenizers.base import SpecialTokenPolicy
from vllm.renderers import ChatParams
from vllm.renderers.mistral import MistralRenderer, safe_apply_chat_template
from vllm.tokenizers.mistral import MistralTokenizer
MODEL_NAME = "mistralai/Mistral-7B-Instruct-v0.3"
@dataclass
class MockHFConfig:
model_type: str = "any"
@dataclass
class MockModelConfig:
runner_type = "generate"
model: str = MODEL_NAME
tokenizer: str = MODEL_NAME
trust_remote_code: bool = False
max_model_len: int = 100
tokenizer_revision = None
tokenizer_mode = "mistral"
hf_config = MockHFConfig()
encoder_config: dict[str, Any] | None = None
enable_prompt_embeds: bool = True
skip_tokenizer_init: bool = False
is_encoder_decoder: bool = False
is_multimodal_model: bool = False
@dataclass
class MockVllmConfig:
model_config: MockModelConfig
@pytest.mark.asyncio
async def test_async_mistral_tokenizer_does_not_block_event_loop():
expected_tokens = [1, 2, 3]
# Mock the blocking version to sleep
def mocked_apply_chat_template(*_args, **_kwargs):
time.sleep(2)
return expected_tokens
mock_model_config = MockModelConfig(skip_tokenizer_init=True)
mock_tokenizer = Mock(spec=MistralTokenizer)
mock_tokenizer.apply_chat_template = mocked_apply_chat_template
mock_renderer = MistralRenderer(
MockVllmConfig(mock_model_config),
tokenizer=mock_tokenizer,
)
task = mock_renderer.render_messages_async([], ChatParams())
# Ensure the event loop is not blocked
blocked_count = 0
for _i in range(20): # Check over ~2 seconds
start = time.perf_counter()
await asyncio.sleep(0)
elapsed = time.perf_counter() - start
# an overly generous elapsed time for slow machines
if elapsed >= 0.5:
blocked_count += 1
await asyncio.sleep(0.1)
# Ensure task completes
_, prompt = await task
assert prompt["prompt_token_ids"] == expected_tokens, (
"Mocked blocking tokenizer was not called"
)
assert blocked_count == 0, "Event loop blocked during tokenization"
def test_apply_mistral_chat_template_thinking_chunk():
messages = [
{
"role": "system",
"content": [
{"type": "text", "text": "You are a helpful assistant."},
{
"type": "thinking",
"closed": True,
"thinking": "Only return the answer when you are confident.",
},
],
},
{"role": "user", "content": "What is 2+2?"},
{
"role": "assistant",
"content": [
{"type": "text", "text": "Let me think about it."},
{"type": "thinking", "closed": True, "thinking": "2+2 = 4"},
{
"type": "text",
"text": "The answer is 4.",
},
],
},
{"role": "user", "content": "Thanks, what is 3+3?"},
]
mistral_tokenizer = MistralTokenizer.from_pretrained(
"mistralai/Magistral-Small-2509"
)
tokens_ids = safe_apply_chat_template(
mistral_tokenizer, messages, chat_template=None, tools=None
)
string_tokens = mistral_tokenizer.mistral.decode(
tokens_ids, special_token_policy=SpecialTokenPolicy.KEEP
)
expected_tokens = (
r"<s>[SYSTEM_PROMPT]You are a helpful assistant.[THINK]Only return the"
r" answer when you are confident.[/THINK][/SYSTEM_PROMPT]"
r"[INST]What is 2+2?[/INST]"
r"Let me think about it.[THINK]2+2 = 4[/THINK]The answer is 4.</s>"
r"[INST]Thanks, what is 3+3?[/INST]"
)
assert string_tokens == expected_tokens
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/renderers/test_mistral.py",
"license": "Apache License 2.0",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/renderers/deepseek_v32.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from typing import Any
from vllm.config import VllmConfig
from vllm.entrypoints.chat_utils import (
ChatCompletionMessageParam,
ConversationMessage,
parse_chat_messages,
parse_chat_messages_async,
)
from vllm.logger import init_logger
from vllm.tokenizers import cached_get_tokenizer
from vllm.tokenizers.deepseek_v32 import DeepseekV32Tokenizer
from .base import BaseRenderer
from .inputs import DictPrompt
from .inputs.preprocess import parse_dec_only_prompt
from .params import ChatParams
logger = init_logger(__name__)
class DeepseekV32Renderer(BaseRenderer[DeepseekV32Tokenizer]):
@classmethod
def from_config( # type: ignore[override]
cls,
config: VllmConfig,
tokenizer_kwargs: dict[str, Any],
) -> "DeepseekV32Renderer":
model_config = config.model_config
if model_config.skip_tokenizer_init:
tokenizer = None
else:
tokenizer = cached_get_tokenizer(
tokenizer_cls=DeepseekV32Tokenizer,
**tokenizer_kwargs,
)
return cls(config, tokenizer)
def render_messages(
self,
messages: list[ChatCompletionMessageParam],
params: ChatParams,
) -> tuple[list[ConversationMessage], DictPrompt]:
tokenizer = self.get_tokenizer()
conversation, mm_data, mm_uuids = parse_chat_messages(
messages,
self.model_config,
content_format="string",
)
prompt_raw = tokenizer.apply_chat_template(
conversation=conversation,
messages=messages,
**params.get_apply_chat_template_kwargs(),
)
prompt = parse_dec_only_prompt(prompt_raw)
if mm_data is not None:
prompt["multi_modal_data"] = mm_data
if mm_uuids is not None:
prompt["multi_modal_uuids"] = mm_uuids
return conversation, prompt
async def render_messages_async(
self,
messages: list[ChatCompletionMessageParam],
params: ChatParams,
) -> tuple[list[ConversationMessage], DictPrompt]:
tokenizer = self.get_tokenizer()
conversation, mm_data, mm_uuids = await parse_chat_messages_async(
messages,
self.model_config,
content_format="string",
)
prompt_raw = tokenizer.apply_chat_template(
conversation=conversation,
messages=messages,
**params.get_apply_chat_template_kwargs(),
)
prompt = parse_dec_only_prompt(prompt_raw)
if mm_data is not None:
prompt["multi_modal_data"] = mm_data
if mm_uuids is not None:
prompt["multi_modal_uuids"] = mm_uuids
return conversation, prompt
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/renderers/deepseek_v32.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/renderers/grok2.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from typing import Any
from vllm.config import VllmConfig
from vllm.entrypoints.chat_utils import (
ChatCompletionMessageParam,
ConversationMessage,
parse_chat_messages,
parse_chat_messages_async,
)
from vllm.logger import init_logger
from vllm.tokenizers import cached_get_tokenizer
from vllm.tokenizers.grok2 import Grok2Tokenizer
from .base import BaseRenderer
from .inputs import DictPrompt
from .inputs.preprocess import parse_dec_only_prompt
from .params import ChatParams
logger = init_logger(__name__)
class Grok2Renderer(BaseRenderer[Grok2Tokenizer]):
@classmethod
def from_config( # type: ignore[override]
cls,
config: VllmConfig,
tokenizer_kwargs: dict[str, Any],
) -> "Grok2Renderer":
model_config = config.model_config
if model_config.skip_tokenizer_init:
tokenizer = None
else:
tokenizer = cached_get_tokenizer(
tokenizer_cls=Grok2Tokenizer,
**tokenizer_kwargs,
)
return cls(config, tokenizer)
def render_messages(
self,
messages: list[ChatCompletionMessageParam],
params: ChatParams,
) -> tuple[list[ConversationMessage], DictPrompt]:
tokenizer = self.get_tokenizer()
conversation, mm_data, mm_uuids = parse_chat_messages(
messages,
self.model_config,
content_format="string",
)
prompt_raw = tokenizer.apply_chat_template(
conversation=conversation,
messages=messages,
**params.get_apply_chat_template_kwargs(),
)
prompt = parse_dec_only_prompt(prompt_raw)
if mm_data is not None:
prompt["multi_modal_data"] = mm_data
if mm_uuids is not None:
prompt["multi_modal_uuids"] = mm_uuids
return conversation, prompt
async def render_messages_async(
self,
messages: list[ChatCompletionMessageParam],
params: ChatParams,
) -> tuple[list[ConversationMessage], DictPrompt]:
tokenizer = self.get_tokenizer()
conversation, mm_data, mm_uuids = await parse_chat_messages_async(
messages,
self.model_config,
content_format="string",
)
prompt_raw = tokenizer.apply_chat_template(
conversation=conversation,
messages=messages,
**params.get_apply_chat_template_kwargs(),
)
prompt = parse_dec_only_prompt(prompt_raw)
if mm_data is not None:
prompt["multi_modal_data"] = mm_data
if mm_uuids is not None:
prompt["multi_modal_uuids"] = mm_uuids
return conversation, prompt
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/renderers/grok2.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/renderers/hf.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import inspect
import itertools
from collections import defaultdict, deque
from collections.abc import Set
from functools import lru_cache
from typing import TYPE_CHECKING, Any, cast
import jinja2
import jinja2.ext
import jinja2.meta
import jinja2.nodes
import jinja2.parser
import jinja2.sandbox
from vllm.config import ModelConfig, VllmConfig
from vllm.entrypoints.chat_utils import (
ChatCompletionMessageParam,
ChatTemplateContentFormat,
ChatTemplateContentFormatOption,
ChatTemplateResolutionError,
ConversationMessage,
load_chat_template,
parse_chat_messages,
parse_chat_messages_async,
)
from vllm.logger import init_logger
from vllm.tokenizers import cached_get_tokenizer
from vllm.tokenizers.hf import CachedHfTokenizer, HfTokenizer
from vllm.transformers_utils.chat_templates import get_chat_template_fallback_path
from vllm.transformers_utils.processor import cached_get_processor
from vllm.utils.func_utils import supports_kw
from .base import BaseRenderer
from .inputs import DictPrompt
from .inputs.preprocess import parse_dec_only_prompt
from .params import ChatParams
if TYPE_CHECKING:
from vllm.multimodal.inputs import MultiModalDataDict, MultiModalUUIDDict
else:
MultiModalDataDict = dict[str, Any]
MultiModalUUIDDict = dict[str, Any]
logger = init_logger(__name__)
_PROCESSOR_CHAT_TEMPLATES = dict[tuple[str, bool], str | None]()
"""
Used in `_try_get_processor_chat_template` to avoid calling
`cached_get_processor` again if the processor fails to be loaded.
This is needed because `lru_cache` does not cache when an exception happens.
"""
def _try_get_processor_chat_template(
tokenizer: HfTokenizer,
*,
trust_remote_code: bool,
) -> str | None:
cache_key = (tokenizer.name_or_path, trust_remote_code)
if cache_key in _PROCESSOR_CHAT_TEMPLATES:
return _PROCESSOR_CHAT_TEMPLATES[cache_key]
from transformers import (
PreTrainedTokenizer,
PreTrainedTokenizerFast,
ProcessorMixin,
)
try:
processor = cached_get_processor(
tokenizer.name_or_path,
processor_cls=(
PreTrainedTokenizer,
PreTrainedTokenizerFast,
ProcessorMixin,
),
trust_remote_code=trust_remote_code,
)
if (
isinstance(processor, ProcessorMixin)
and hasattr(processor, "chat_template")
and (chat_template := processor.chat_template) is not None
):
_PROCESSOR_CHAT_TEMPLATES[cache_key] = chat_template
return chat_template
except Exception:
logger.debug(
"Failed to load AutoProcessor chat template for %s",
tokenizer.name_or_path,
exc_info=True,
)
_PROCESSOR_CHAT_TEMPLATES[cache_key] = None
return None
def resolve_chat_template(
tokenizer: HfTokenizer,
chat_template: str | None,
tools: list[dict[str, Any]] | None,
*,
model_config: "ModelConfig",
) -> str | None:
# 1st priority: The given chat template
if chat_template is not None:
return chat_template
# 2nd priority: AutoProcessor chat template, unless tool calling is enabled
if tools is None:
chat_template = _try_get_processor_chat_template(
tokenizer,
trust_remote_code=model_config.trust_remote_code,
)
if chat_template is not None:
return chat_template
# 3rd priority: AutoTokenizer chat template
try:
return tokenizer.get_chat_template(chat_template, tools=tools)
except Exception:
logger.debug(
"Failed to load AutoTokenizer chat template for %s",
tokenizer.name_or_path,
exc_info=True,
)
# 4th priority: Predefined fallbacks
path = get_chat_template_fallback_path(
model_type=model_config.hf_config.model_type,
tokenizer_name_or_path=tokenizer.name_or_path,
)
if path is not None:
logger.info_once(
"Loading chat template fallback for %s as there isn't one "
"defined on HF Hub.",
tokenizer.name_or_path,
)
chat_template = load_chat_template(path)
else:
logger.debug_once(
"There is no chat template fallback for %s", tokenizer.name_or_path
)
return chat_template
def _is_var_access(node: jinja2.nodes.Node, varname: str) -> bool:
if isinstance(node, jinja2.nodes.Name):
return node.ctx == "load" and node.name == varname
return False
def _is_attr_access(node: jinja2.nodes.Node, varname: str, key: str) -> bool:
if isinstance(node, jinja2.nodes.Getitem):
return (
_is_var_access(node.node, varname)
and isinstance(node.arg, jinja2.nodes.Const)
and node.arg.value == key
)
if isinstance(node, jinja2.nodes.Getattr):
return _is_var_access(node.node, varname) and node.attr == key
return False
def _is_var_or_elems_access(
node: jinja2.nodes.Node,
varname: str,
key: str | None = None,
) -> bool:
if isinstance(node, jinja2.nodes.Filter):
return node.node is not None and _is_var_or_elems_access(
node.node, varname, key
)
if isinstance(node, jinja2.nodes.Test):
return _is_var_or_elems_access(node.node, varname, key)
if isinstance(node, jinja2.nodes.Getitem) and isinstance(
node.arg, jinja2.nodes.Slice
):
return _is_var_or_elems_access(node.node, varname, key)
return _is_attr_access(node, varname, key) if key else _is_var_access(node, varname)
def _iter_nodes_assign_var_or_elems(root: jinja2.nodes.Node, varname: str):
# Global variable that is implicitly defined at the root
yield root, varname
# Iterative BFS
related_varnames = deque([varname])
while related_varnames:
related_varname = related_varnames.popleft()
for assign_ast in root.find_all(jinja2.nodes.Assign):
lhs = assign_ast.target
rhs = assign_ast.node
if _is_var_or_elems_access(rhs, related_varname):
assert isinstance(lhs, jinja2.nodes.Name)
yield assign_ast, lhs.name
# Avoid infinite looping for self-assignment
if lhs.name != related_varname:
related_varnames.append(lhs.name)
# NOTE: The proper way to handle this is to build a CFG so that we can handle
# the scope in which each variable is defined, but that is too complicated
def _iter_nodes_assign_messages_item(root: jinja2.nodes.Node):
messages_varnames = [
varname for _, varname in _iter_nodes_assign_var_or_elems(root, "messages")
]
# Search for {%- for message in messages -%} loops
for loop_ast in root.find_all(jinja2.nodes.For):
loop_iter = loop_ast.iter
loop_target = loop_ast.target
for varname in messages_varnames:
if _is_var_or_elems_access(loop_iter, varname):
assert isinstance(loop_target, jinja2.nodes.Name)
yield loop_ast, loop_target.name
break
def _iter_nodes_assign_content_item(root: jinja2.nodes.Node):
message_varnames = [
varname for _, varname in _iter_nodes_assign_messages_item(root)
]
# Search for {%- for content in message['content'] -%} loops
for loop_ast in root.find_all(jinja2.nodes.For):
loop_iter = loop_ast.iter
loop_target = loop_ast.target
for varname in message_varnames:
if _is_var_or_elems_access(loop_iter, varname, "content"):
assert isinstance(loop_target, jinja2.nodes.Name)
yield loop_ast, loop_target.name
break
def _try_extract_ast(chat_template: str) -> jinja2.nodes.Template | None:
import transformers.utils.chat_template_utils as hf_chat_utils
try:
jinja_compiled = hf_chat_utils._compile_jinja_template(chat_template)
return jinja_compiled.environment.parse(chat_template)
except Exception:
logger.exception("Error when compiling Jinja template")
return None
@lru_cache(maxsize=32)
def _detect_content_format(
chat_template: str,
*,
default: ChatTemplateContentFormat,
) -> ChatTemplateContentFormat:
jinja_ast = _try_extract_ast(chat_template)
if jinja_ast is None:
return default
try:
next(_iter_nodes_assign_content_item(jinja_ast))
except StopIteration:
return "string"
except Exception:
logger.exception("Error when parsing AST of Jinja template")
return default
else:
return "openai"
def _resolve_chat_template_content_format(
chat_template: str | None,
tools: list[dict[str, Any]] | None,
tokenizer: HfTokenizer,
*,
model_config: "ModelConfig",
) -> ChatTemplateContentFormat:
resolved_chat_template = resolve_chat_template(
tokenizer,
chat_template=chat_template,
tools=tools,
model_config=model_config,
)
jinja_text = (
resolved_chat_template
if isinstance(resolved_chat_template, str)
else load_chat_template(chat_template, is_literal=True)
)
detected_format = (
"string"
if jinja_text is None
else _detect_content_format(jinja_text, default="string")
)
return detected_format
@lru_cache
def _log_chat_template_content_format(
chat_template: str | None, # For caching purposes
given_format: ChatTemplateContentFormatOption,
detected_format: ChatTemplateContentFormatOption,
):
logger.info(
"Detected the chat template content format to be '%s'. "
"You can set `--chat-template-content-format` to override this.",
detected_format,
)
if given_format != "auto" and given_format != detected_format:
logger.warning(
"You specified `--chat-template-content-format %s` "
"which is different from the detected format '%s'. "
"If our automatic detection is incorrect, please consider "
"opening a GitHub issue so that we can improve it: "
"https://github.com/vllm-project/vllm/issues/new/choose",
given_format,
detected_format,
)
def resolve_chat_template_content_format(
chat_template: str | None,
tools: list[dict[str, Any]] | None,
given_format: ChatTemplateContentFormatOption,
tokenizer: HfTokenizer,
*,
model_config: "ModelConfig",
) -> ChatTemplateContentFormat:
if given_format != "auto":
return given_format
detected_format = _resolve_chat_template_content_format(
chat_template,
tools,
tokenizer,
model_config=model_config,
)
_log_chat_template_content_format(
chat_template,
given_format=given_format,
detected_format=detected_format,
)
return detected_format
# adapted from https://github.com/huggingface/transformers/blob/v4.56.2/src/transformers/utils/chat_template_utils.py#L398-L412
# only preserve the parse function used to resolve chat template kwargs
class AssistantTracker(jinja2.ext.Extension):
tags = {"generation"}
def parse(self, parser: jinja2.parser.Parser) -> jinja2.nodes.Node:
lineno = next(parser.stream).lineno
body = parser.parse_statements(("name:endgeneration",), drop_needle=True)
call = self.call_method("_generation_support")
call_block = jinja2.nodes.CallBlock(call, [], [], body)
return call_block.set_lineno(lineno)
def _resolve_chat_template_kwargs(chat_template: str) -> Set[str]:
env = jinja2.sandbox.ImmutableSandboxedEnvironment(
trim_blocks=True,
lstrip_blocks=True,
extensions=[AssistantTracker, jinja2.ext.loopcontrols],
)
parsed_content = env.parse(chat_template)
template_vars = jinja2.meta.find_undeclared_variables(parsed_content)
return template_vars
_cached_resolve_chat_template_kwargs = lru_cache(_resolve_chat_template_kwargs)
@lru_cache
def _get_hf_base_chat_template_params() -> frozenset[str]:
from transformers import PreTrainedTokenizer
# Get standard parameters from HuggingFace's base tokenizer class.
# This dynamically extracts parameters from PreTrainedTokenizer's
# apply_chat_template method, ensuring compatibility with tokenizers
# that use **kwargs to receive standard parameters.
# Read signature from HF's base class - the single source of truth
base_sig = inspect.signature(PreTrainedTokenizer.apply_chat_template)
# Exclude VAR_KEYWORD (**kwargs) and VAR_POSITIONAL (*args) placeholders
return frozenset(
p.name
for p in base_sig.parameters.values()
if p.kind
not in (inspect.Parameter.VAR_KEYWORD, inspect.Parameter.VAR_POSITIONAL)
)
def resolve_chat_template_kwargs(
tokenizer: HfTokenizer,
chat_template: str,
chat_template_kwargs: dict[str, Any],
raise_on_unexpected: bool = True,
) -> dict[str, Any]:
# We exclude chat_template from kwargs here, because
# chat template has been already resolved at this stage
unexpected_vars = {"chat_template", "tokenize"}
if raise_on_unexpected and (
unexpected_in_kwargs := unexpected_vars & chat_template_kwargs.keys()
):
raise ValueError(
"Found unexpected chat template kwargs from request: "
f"{unexpected_in_kwargs}"
)
fn_kw = {
k
for k in chat_template_kwargs
if supports_kw(tokenizer.apply_chat_template, k, allow_var_kwargs=False)
}
template_vars = _cached_resolve_chat_template_kwargs(chat_template)
# Allow standard HF parameters even if tokenizer uses **kwargs to receive them
hf_base_params = _get_hf_base_chat_template_params()
accept_vars = (fn_kw | template_vars | hf_base_params) - unexpected_vars
return {k: v for k, v in chat_template_kwargs.items() if k in accept_vars}
def safe_apply_chat_template(
model_config: "ModelConfig",
tokenizer: HfTokenizer,
conversation: list[ConversationMessage],
*,
tools: list[dict[str, Any]] | None = None,
chat_template: str | None = None,
tokenize: bool = True,
**kwargs,
) -> str | list[int]:
chat_template = resolve_chat_template(
tokenizer,
chat_template=chat_template,
tools=tools,
model_config=model_config,
)
if chat_template is None:
raise ChatTemplateResolutionError(
"As of transformers v4.44, default chat template is no longer "
"allowed, so you must provide a chat template if the tokenizer "
"does not define one."
)
resolved_kwargs = resolve_chat_template_kwargs(
tokenizer=tokenizer,
chat_template=chat_template,
chat_template_kwargs=kwargs,
)
try:
return tokenizer.apply_chat_template(
conversation=conversation, # type: ignore[arg-type]
tools=tools, # type: ignore[arg-type]
chat_template=chat_template,
tokenize=tokenize,
**resolved_kwargs,
)
# External library exceptions can sometimes occur despite the framework's
# internal exception management capabilities.
except Exception as e:
# Log and report any library-related exceptions for further
# investigation.
logger.exception(
"An error occurred in `transformers` while applying chat template"
)
raise ValueError(str(e)) from e
def rebuild_mm_uuids_from_mm_data(
mm_uuids: "MultiModalUUIDDict",
mm_data: "MultiModalDataDict",
) -> "MultiModalUUIDDict":
"""Rebuild mm_uuids after vision_chunk processing.
When videos are split into chunks, the original UUIDs need to be updated
to reflect the new UUIDs generated for each chunk.
Args:
mm_uuids: Original UUIDs dictionary
mm_data: Processed multimodal data with vision_chunk items
Returns:
Updated UUIDs dictionary with chunk UUIDs
"""
vision_chunks = mm_data.get("vision_chunk")
if vision_chunks is None:
return mm_uuids
assert all(isinstance(item, dict) for item in vision_chunks), (
"Expected all vision_chunk items to be dicts"
)
vision_chunks = cast(list[dict[str, Any]], vision_chunks)
vision_chunk_uuids = [
uuid_val for item in vision_chunks if (uuid_val := item.get("uuid")) is not None
]
if vision_chunk_uuids:
mm_uuids = dict(mm_uuids)
mm_uuids["vision_chunk"] = vision_chunk_uuids
return mm_uuids
def build_video_prompts_from_mm_data(
mm_data: "MultiModalDataDict",
) -> list[str]:
"""Build video prompts from vision_chunk data.
Collects prompts from video chunks and groups them by video_idx.
Args:
mm_data: Processed multimodal data with vision_chunk items
Returns:
List of video prompts, one per video.
"""
vision_chunks = mm_data.get("vision_chunk")
if vision_chunks is None:
return []
# Group chunks by video_idx
video_prompts_dict: dict[int, list[str]] = defaultdict(list)
for item in vision_chunks:
# vision_chunk items are always dicts (VisionChunkImage/VisionChunkVideo)
assert isinstance(item, dict)
if item.get("type") == "video_chunk":
video_idx = item.get("video_idx", 0)
prompt = item.get("prompt", "")
video_prompts_dict[video_idx].append(prompt)
# Build prompts in video order
video_prompts = [
"".join(video_prompts_dict[video_idx])
for video_idx in sorted(video_prompts_dict.keys())
]
return video_prompts
def replace_vision_chunk_video_placeholder(
prompt_raw: str | list[int],
mm_data: "MultiModalDataDict",
video_placeholder: str | None,
) -> str | list[int]:
# get video placehoder, replace it with runtime video-chunk prompts
if video_placeholder and isinstance(prompt_raw, str):
video_prompts = build_video_prompts_from_mm_data(mm_data)
# replace in order
prompt_raw_parts = prompt_raw.split(video_placeholder)
if len(prompt_raw_parts) == len(video_prompts) + 1:
prompt_raw = "".join(
itertools.chain.from_iterable(zip(prompt_raw_parts, video_prompts))
)
prompt_raw += prompt_raw_parts[-1]
else:
logger.warning(
"Number of video placeholders (%d) does not match "
"number of videos (%d) in the request.",
len(prompt_raw_parts) - 1,
len(video_prompts),
)
return prompt_raw
class HfRenderer(BaseRenderer[HfTokenizer]):
@classmethod
def from_config( # type: ignore[override]
cls,
config: VllmConfig,
tokenizer_kwargs: dict[str, Any],
) -> "HfRenderer":
model_config = config.model_config
if model_config.skip_tokenizer_init:
tokenizer = None
else:
tokenizer = cast(
HfTokenizer,
cached_get_tokenizer(
tokenizer_cls=CachedHfTokenizer, # type: ignore[type-abstract]
**tokenizer_kwargs,
),
)
return cls(config, tokenizer)
def __init__(
self,
config: VllmConfig,
tokenizer: HfTokenizer | None,
) -> None:
super().__init__(config, tokenizer)
self.use_unified_vision_chunk = getattr(
config.model_config.hf_config, "use_unified_vision_chunk", False
)
def render_messages(
self,
messages: list[ChatCompletionMessageParam],
params: ChatParams,
) -> tuple[list[ConversationMessage], DictPrompt]:
model_config = self.model_config
tokenizer = self.get_tokenizer()
conversation, mm_data, mm_uuids = parse_chat_messages(
messages,
model_config,
content_format=resolve_chat_template_content_format(
chat_template=params.chat_template,
tools=params.chat_template_kwargs.get("tools"),
given_format=params.chat_template_content_format,
tokenizer=tokenizer,
model_config=model_config,
),
)
prompt_raw = safe_apply_chat_template(
model_config,
tokenizer,
conversation,
**params.get_apply_chat_template_kwargs(),
)
# NOTE: use_unified_vision_chunk is currently specific to Kimi-K2.5
# model which uses unified vision chunks for both images and videos.
if (
self.use_unified_vision_chunk
and mm_uuids is not None
and mm_data is not None
):
mm_uuids = rebuild_mm_uuids_from_mm_data(mm_uuids, mm_data)
# get video placeholder, replace it with runtime video-chunk prompts
video_placeholder = getattr(
model_config.hf_config, "video_placeholder", None
)
prompt_raw = replace_vision_chunk_video_placeholder(
prompt_raw,
mm_data,
video_placeholder,
)
prompt = parse_dec_only_prompt(prompt_raw)
if mm_data is not None:
prompt["multi_modal_data"] = mm_data
if mm_uuids is not None:
prompt["multi_modal_uuids"] = mm_uuids
return conversation, prompt
async def render_messages_async(
self,
messages: list[ChatCompletionMessageParam],
params: ChatParams,
) -> tuple[list[ConversationMessage], DictPrompt]:
model_config = self.model_config
tokenizer = self.get_tokenizer()
conversation, mm_data, mm_uuids = await parse_chat_messages_async(
messages,
model_config,
content_format=resolve_chat_template_content_format(
chat_template=params.chat_template,
tools=params.chat_template_kwargs.get("tools"),
given_format=params.chat_template_content_format,
tokenizer=tokenizer,
model_config=model_config,
),
)
prompt_raw = safe_apply_chat_template(
model_config,
tokenizer,
conversation,
**params.get_apply_chat_template_kwargs(),
)
# NOTE: use_unified_vision_chunk is currently specific to Kimi-K2.5
# model which uses unified vision chunks for both images and videos.
if (
self.use_unified_vision_chunk
and mm_uuids is not None
and mm_data is not None
):
# get video placeholder, replace it with runtime video-chunk prompts
video_placeholder = getattr(
model_config.hf_config, "video_placeholder", None
)
prompt_raw = replace_vision_chunk_video_placeholder(
prompt_raw,
mm_data,
video_placeholder,
)
prompt = parse_dec_only_prompt(prompt_raw)
if mm_data is not None:
prompt["multi_modal_data"] = mm_data
if mm_uuids is not None:
prompt["multi_modal_uuids"] = mm_uuids
return conversation, prompt
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/renderers/hf.py",
"license": "Apache License 2.0",
"lines": 601,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/renderers/mistral.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from concurrent.futures import ThreadPoolExecutor
from typing import Any
from vllm.config import VllmConfig
from vllm.entrypoints.chat_utils import (
ChatCompletionMessageParam,
ConversationMessage,
parse_chat_messages,
parse_chat_messages_async,
)
from vllm.logger import init_logger
from vllm.tokenizers import cached_get_tokenizer
from vllm.tokenizers.mistral import MistralTokenizer
from vllm.utils.async_utils import make_async
from .base import BaseRenderer
from .inputs import DictPrompt
from .inputs.preprocess import parse_dec_only_prompt
from .params import ChatParams
logger = init_logger(__name__)
def safe_apply_chat_template(
tokenizer: MistralTokenizer,
messages: list[ChatCompletionMessageParam],
**kwargs,
) -> str | list[int]:
from mistral_common.exceptions import MistralCommonException
try:
return tokenizer.apply_chat_template(messages, **kwargs)
# mistral-common uses assert statements to stop processing of input
# if input does not comply with the expected format.
# We convert those assertion errors to ValueErrors so they can be
# properly caught in the preprocessing_input step
except (AssertionError, MistralCommonException) as e:
raise ValueError(str(e)) from e
# External library exceptions can sometimes occur despite the framework's
# internal exception management capabilities.
except Exception as e:
# Log and report any library-related exceptions for further
# investigation.
logger.exception(
"An error occurred in `mistral_common` while applying chat template"
)
raise ValueError(str(e)) from e
class MistralRenderer(BaseRenderer[MistralTokenizer]):
@classmethod
def from_config( # type: ignore[override]
cls,
config: VllmConfig,
tokenizer_kwargs: dict[str, Any],
) -> "MistralRenderer":
model_config = config.model_config
if model_config.skip_tokenizer_init:
tokenizer = None
else:
tokenizer = cached_get_tokenizer(
tokenizer_cls=MistralTokenizer,
**tokenizer_kwargs,
)
return cls(config, tokenizer)
def __init__(
self,
config: VllmConfig,
tokenizer: MistralTokenizer | None,
) -> None:
super().__init__(config, tokenizer)
self._apply_chat_template_executor = ThreadPoolExecutor(max_workers=1)
self._apply_chat_template_async = make_async(
safe_apply_chat_template, executor=self._apply_chat_template_executor
)
def render_messages(
self,
messages: list[ChatCompletionMessageParam],
params: ChatParams,
) -> tuple[list[ConversationMessage], DictPrompt]:
tokenizer = self.get_tokenizer()
conversation, mm_data, mm_uuids = parse_chat_messages(
messages,
self.model_config,
content_format="string",
)
prompt_raw = safe_apply_chat_template(
tokenizer,
messages,
**params.get_apply_chat_template_kwargs(),
)
prompt = parse_dec_only_prompt(prompt_raw)
if mm_data is not None:
prompt["multi_modal_data"] = mm_data
if mm_uuids is not None:
prompt["multi_modal_uuids"] = mm_uuids
return conversation, prompt
async def render_messages_async(
self,
messages: list[ChatCompletionMessageParam],
params: ChatParams,
) -> tuple[list[ConversationMessage], DictPrompt]:
tokenizer = self.get_tokenizer()
conversation, mm_data, mm_uuids = await parse_chat_messages_async(
messages,
self.model_config,
content_format="string",
)
prompt_raw = await self._apply_chat_template_async(
tokenizer,
messages,
**params.get_apply_chat_template_kwargs(),
)
prompt = parse_dec_only_prompt(prompt_raw)
if mm_data is not None:
prompt["multi_modal_data"] = mm_data
if mm_uuids is not None:
prompt["multi_modal_uuids"] = mm_uuids
return conversation, prompt
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/renderers/mistral.py",
"license": "Apache License 2.0",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/renderers/registry.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any
from vllm.logger import init_logger
from vllm.tokenizers.registry import tokenizer_args_from_config
from vllm.utils.import_utils import resolve_obj_by_qualname
from .base import BaseRenderer
if TYPE_CHECKING:
from vllm.config import VllmConfig
logger = init_logger(__name__)
_VLLM_RENDERERS = {
"deepseek_v32": ("deepseek_v32", "DeepseekV32Renderer"),
"hf": ("hf", "HfRenderer"),
"grok2": ("grok2", "Grok2Renderer"),
"mistral": ("mistral", "MistralRenderer"),
"terratorch": ("terratorch", "TerratorchRenderer"),
}
@dataclass
class RendererRegistry:
# Renderer mode -> (renderer module, renderer class)
renderers: dict[str, tuple[str, str]] = field(default_factory=dict)
def register(self, renderer_mode: str, module: str, class_name: str) -> None:
if renderer_mode in self.renderers:
logger.warning(
"%s.%s is already registered for renderer_mode=%r. "
"It is overwritten by the new one.",
module,
class_name,
renderer_mode,
)
self.renderers[renderer_mode] = (module, class_name)
return None
def load_renderer_cls(self, renderer_mode: str) -> type[BaseRenderer]:
if renderer_mode not in self.renderers:
raise ValueError(f"No renderer registered for {renderer_mode=!r}.")
module, class_name = self.renderers[renderer_mode]
logger.debug_once(f"Loading {class_name} for {renderer_mode=!r}")
return resolve_obj_by_qualname(f"{module}.{class_name}")
def load_renderer(
self,
renderer_mode: str,
config: "VllmConfig",
tokenizer_kwargs: dict[str, Any],
) -> BaseRenderer:
renderer_cls = self.load_renderer_cls(renderer_mode)
return renderer_cls.from_config(config, tokenizer_kwargs)
RENDERER_REGISTRY = RendererRegistry(
{
mode: (f"vllm.renderers.{mod_relname}", cls_name)
for mode, (mod_relname, cls_name) in _VLLM_RENDERERS.items()
}
)
"""The global `RendererRegistry` instance."""
def renderer_from_config(config: "VllmConfig", **kwargs):
model_config = config.model_config
tokenizer_mode, tokenizer_name, args, kwargs = tokenizer_args_from_config(
model_config, **kwargs
)
if (
model_config.tokenizer_mode == "auto"
and model_config.model_impl == "terratorch"
):
renderer_mode = "terratorch"
else:
renderer_mode = tokenizer_mode
return RENDERER_REGISTRY.load_renderer(
renderer_mode,
config,
tokenizer_kwargs={**kwargs, "tokenizer_name": tokenizer_name},
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/renderers/registry.py",
"license": "Apache License 2.0",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/renderers/terratorch.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from typing import Any
from vllm.config import VllmConfig
from vllm.entrypoints.chat_utils import (
ChatCompletionMessageParam,
ConversationMessage,
parse_chat_messages,
parse_chat_messages_async,
)
from vllm.logger import init_logger
from .base import BaseRenderer
from .inputs import DictPrompt
from .inputs.preprocess import parse_dec_only_prompt
from .params import ChatParams
logger = init_logger(__name__)
class TerratorchRenderer(BaseRenderer):
@classmethod
def from_config(
cls,
config: VllmConfig, # type: ignore[override]
tokenizer_kwargs: dict[str, Any],
) -> "TerratorchRenderer":
model_config = config.model_config
if not model_config.skip_tokenizer_init:
raise ValueError("Terratorch renderer requires `skip_tokenizer_init=True`")
return cls(config, None)
def render_messages(
self,
messages: list[ChatCompletionMessageParam],
params: ChatParams,
) -> tuple[list[ConversationMessage], DictPrompt]:
model_config = self.model_config
conversation, mm_data, mm_uuids = parse_chat_messages(
messages,
model_config,
content_format="string",
)
prompt = parse_dec_only_prompt([1]) # Dummy token IDs
if mm_data is not None:
prompt["multi_modal_data"] = mm_data
if mm_uuids is not None:
prompt["multi_modal_uuids"] = mm_uuids
return conversation, prompt
async def render_messages_async(
self,
messages: list[ChatCompletionMessageParam],
params: ChatParams,
) -> tuple[list[ConversationMessage], DictPrompt]:
model_config = self.model_config
conversation, mm_data, mm_uuids = await parse_chat_messages_async(
messages,
model_config,
content_format="string",
)
prompt = parse_dec_only_prompt([1]) # Dummy token IDs
if mm_data is not None:
prompt["multi_modal_data"] = mm_data
if mm_uuids is not None:
prompt["multi_modal_uuids"] = mm_uuids
return conversation, prompt
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/renderers/terratorch.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/fused_moe/mori_prepare_finalize.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import mori
import torch
import vllm.model_executor.layers.fused_moe.modular_kernel as mk
from vllm.logger import init_logger
from vllm.model_executor.layers.fused_moe.config import FusedMoEQuantConfig
from vllm.platforms import current_platform
logger = init_logger(__name__)
class MoriPrepareAndFinalize(mk.FusedMoEPrepareAndFinalize):
"""
Prepare/Finalize using MoRI kernels.
"""
def __init__(
self,
mori_op: mori.ops.EpDispatchCombineOp,
max_tokens_per_rank: int,
num_dispatchers: int,
use_fp8_dispatch: bool = False,
):
super().__init__()
self.mori_op = mori_op
self.num_dispatchers_ = num_dispatchers
self.max_tokens_per_rank = max_tokens_per_rank
self.use_fp8_dispatch = use_fp8_dispatch
@property
def activation_format(self) -> mk.FusedMoEActivationFormat:
return mk.FusedMoEActivationFormat.Standard
def output_is_reduced(self) -> bool:
return True
def num_dispatchers(self):
return self.num_dispatchers_
def max_num_tokens_per_rank(self) -> int | None:
return self.max_tokens_per_rank
def topk_indices_dtype(self) -> torch.dtype | None:
return torch.int32
def supports_async(self) -> bool:
return False
def prepare(
self,
a1: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
num_experts: int,
expert_map: torch.Tensor | None,
apply_router_weight_on_input: bool,
quant_config: FusedMoEQuantConfig,
defer_input_quant: bool = False,
) -> mk.PrepareResultType:
"""
Returns a tuple of:
- quantized + dispatched a.
- Optional quantized + dispatched a1_scales.
- Optional ExpertTokensMetadata containing gpu/cpu tensors
as big as the number of local experts with the information about the
number of tokens assigned to each local expert.
- Optional dispatched expert topk IDs
- Optional dispatched expert topk weight
"""
if defer_input_quant:
raise NotImplementedError(
f"{self.__class__.__name__} does not support defer_input_quant=True. "
"Please select an MoE kernel that accepts quantized inputs."
)
assert not apply_router_weight_on_input, (
"mori does not support apply_router_weight_on_input=True now."
)
scale = None
if self.use_fp8_dispatch:
from aiter import QuantType, get_hip_quant
if quant_config.is_block_quantized:
quant_func = get_hip_quant(QuantType.per_1x128)
a1, scale = quant_func(a1, quant_dtype=current_platform.fp8_dtype())
elif quant_config.is_per_act_token:
quant_func = get_hip_quant(QuantType.per_Token)
a1, scale = quant_func(a1, quant_dtype=current_platform.fp8_dtype())
(
dispatch_a1,
dispatch_weights,
dispatch_scale,
dispatch_ids,
dispatch_recv_token_num,
) = self.mori_op.dispatch(a1, topk_weights, scale, topk_ids)
expert_tokens_meta = mk.ExpertTokensMetadata(
expert_num_tokens=dispatch_recv_token_num, expert_num_tokens_cpu=None
)
return (
dispatch_a1,
dispatch_scale,
expert_tokens_meta,
dispatch_ids,
dispatch_weights,
)
def finalize(
self,
output: torch.Tensor,
fused_expert_output: torch.Tensor,
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
apply_router_weight_on_input: bool,
weight_and_reduce_impl: mk.TopKWeightAndReduce,
) -> None:
num_token = output.shape[0]
result = self.mori_op.combine(
fused_expert_output,
None,
topk_ids,
)[0]
output.copy_(result[:num_token])
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/fused_moe/mori_prepare_finalize.py",
"license": "Apache License 2.0",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/v1/worker/gpu/sample/prompt_logprob.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Callable
import numpy as np
import torch
from vllm.sampling_params import SamplingParams
from vllm.triton_utils import tl, triton
from vllm.v1.outputs import LogprobsTensors
from vllm.v1.worker.gpu.input_batch import InputBatch
from vllm.v1.worker.gpu.sample.logprob import compute_topk_logprobs
class PromptLogprobsWorker:
def __init__(self, max_num_reqs: int):
self.max_num_reqs = max_num_reqs
self.uses_prompt_logprobs = np.zeros(self.max_num_reqs, dtype=bool)
# req_idx -> list of in-progress LogprobsTensors
self.in_progress_prompt_logprobs: dict[str, list[LogprobsTensors]] = {}
def add_request(self, req_id: str, req_idx: int, sampling_params: SamplingParams):
# For now, only support prompt logprobs for the prompt tokens (not top-k).
uses_prompt_logprobs = sampling_params.prompt_logprobs is not None
self.uses_prompt_logprobs[req_idx] = uses_prompt_logprobs
if uses_prompt_logprobs:
self.in_progress_prompt_logprobs[req_id] = []
def remove_request(self, req_id: str) -> None:
self.in_progress_prompt_logprobs.pop(req_id, None)
def compute_prompt_logprobs(
self,
logits_fn: Callable[[torch.Tensor], torch.Tensor],
hidden_states: torch.Tensor,
input_batch: InputBatch,
# [max_num_reqs, max_model_len]
all_token_ids: torch.Tensor,
# [max_num_reqs]
num_computed_tokens: torch.Tensor,
# [max_num_reqs]
prompt_lens: np.ndarray,
# [max_num_reqs]
prefill_lens: np.ndarray,
# [max_num_reqs]
num_computed_prefill_tokens: np.ndarray,
) -> dict[str, LogprobsTensors]:
idx_mapping_np = input_batch.idx_mapping_np
needs_prompt_logprobs = self.uses_prompt_logprobs[idx_mapping_np]
if not np.any(needs_prompt_logprobs):
# Common case: No request asks for prompt logprobs.
return {}
prompt_lens = prompt_lens[idx_mapping_np]
# NOTE(woosuk): -1 because the last prompt token's hidden state is not
# needed for prompt logprobs.
computed_prefill = num_computed_prefill_tokens[idx_mapping_np]
includes_prompt = computed_prefill < prompt_lens - 1
# NOTE(woosuk): If the request was resumed after preemption, its prompt
# logprobs must have been computed before preemption. Skip.
resumed_after_prompt = prompt_lens < prefill_lens[idx_mapping_np]
needs_prompt_logprobs &= includes_prompt & ~resumed_after_prompt
if not np.any(needs_prompt_logprobs):
return {}
# Get the prompt logprobs token_ids.
prompt_logprobs_token_ids = get_prompt_logprobs_token_ids(
input_batch.num_tokens,
input_batch.query_start_loc,
input_batch.idx_mapping,
num_computed_tokens,
all_token_ids,
)
# Compute the prompt logprobs.
prompt_logprobs, prompt_ranks = compute_prompt_logprobs_with_chunking(
prompt_logprobs_token_ids,
hidden_states[: input_batch.num_tokens],
logits_fn,
)
pos_after_step = computed_prefill + input_batch.num_scheduled_tokens
is_prompt_chunked = pos_after_step < prompt_lens
query_start_loc_np = input_batch.query_start_loc_np
prompt_token_ids = prompt_logprobs_token_ids.unsqueeze(-1)
prompt_logprobs_dict: dict[str, LogprobsTensors] = {}
for i, req_id in enumerate(input_batch.req_ids):
if not needs_prompt_logprobs[i]:
continue
start_idx = query_start_loc_np[i]
end_idx = query_start_loc_np[i + 1]
assert start_idx < end_idx, (
f"start_idx ({start_idx}) >= end_idx ({end_idx})"
)
if not is_prompt_chunked[i]:
end_idx -= 1
logprobs = LogprobsTensors(
logprob_token_ids=prompt_token_ids[start_idx:end_idx],
logprobs=prompt_logprobs[start_idx:end_idx],
selected_token_ranks=prompt_ranks[start_idx:end_idx],
)
prompt_logprobs_list = self.in_progress_prompt_logprobs[req_id]
if is_prompt_chunked[i]:
# Prompt is chunked. Do not return the logprobs yet.
prompt_logprobs_list.append(logprobs)
continue
if prompt_logprobs_list:
# Merge the in-progress logprobs.
prompt_logprobs_list.append(logprobs)
logprobs = LogprobsTensors(
logprob_token_ids=torch.cat(
[x.logprob_token_ids for x in prompt_logprobs_list]
),
logprobs=torch.cat([x.logprobs for x in prompt_logprobs_list]),
selected_token_ranks=torch.cat(
[x.selected_token_ranks for x in prompt_logprobs_list]
),
)
prompt_logprobs_list.clear()
prompt_logprobs_dict[req_id] = logprobs
return prompt_logprobs_dict
@triton.jit
def _prompt_logprobs_token_ids_kernel(
prompt_logprobs_token_ids_ptr,
query_start_loc_ptr,
idx_mapping_ptr,
num_computed_tokens_ptr,
all_token_ids_ptr,
all_token_ids_stride,
BLOCK_SIZE: tl.constexpr,
):
batch_idx = tl.program_id(0)
req_state_idx = tl.load(idx_mapping_ptr + batch_idx)
query_start = tl.load(query_start_loc_ptr + batch_idx)
query_end = tl.load(query_start_loc_ptr + batch_idx + 1)
query_len = query_end - query_start
num_computed_tokens = tl.load(num_computed_tokens_ptr + req_state_idx)
for i in range(0, query_len, BLOCK_SIZE):
block = i + tl.arange(0, BLOCK_SIZE)
mask = block < query_len
# NOTE(woosuk): We should shift the pos by one
# because the logprob is computed for the next token.
target_pos = num_computed_tokens + 1 + block
token_ids = tl.load(
all_token_ids_ptr + req_state_idx * all_token_ids_stride + target_pos,
mask=mask,
)
tl.store(
prompt_logprobs_token_ids_ptr + query_start + block, token_ids, mask=mask
)
def get_prompt_logprobs_token_ids(
num_tokens: int,
query_start_loc: torch.Tensor,
idx_mapping: torch.Tensor,
num_computed_tokens: torch.Tensor,
all_token_ids: torch.Tensor,
) -> torch.Tensor:
token_ids = torch.empty(num_tokens, dtype=torch.int64, device=idx_mapping.device)
num_reqs = idx_mapping.shape[0]
_prompt_logprobs_token_ids_kernel[(num_reqs,)](
token_ids,
query_start_loc,
idx_mapping,
num_computed_tokens,
all_token_ids,
all_token_ids.stride(0),
BLOCK_SIZE=1024,
)
return token_ids
def compute_prompt_logprobs_with_chunking(
prompt_token_ids: torch.Tensor,
prompt_hidden_states: torch.Tensor,
logits_fn: Callable[[torch.Tensor], torch.Tensor],
) -> tuple[torch.Tensor, torch.Tensor]:
# Since materializing the full prompt logits can take too much memory,
# we compute it in chunks.
CHUNK_SIZE = 1024
logprobs = []
ranks = []
prompt_token_ids = prompt_token_ids.to(torch.int64)
for start_idx in range(0, prompt_token_ids.shape[0], CHUNK_SIZE):
end_idx = start_idx + CHUNK_SIZE
# NOTE(woosuk): logits_fn can be slow because it involves all-gather.
prompt_logits = logits_fn(prompt_hidden_states[start_idx:end_idx])
prompt_logprobs = compute_topk_logprobs(
prompt_logits,
0, # num_logprobs
prompt_token_ids[start_idx:end_idx],
)
logprobs.append(prompt_logprobs.logprobs)
ranks.append(prompt_logprobs.selected_token_ranks)
logprobs = torch.cat(logprobs, dim=0) if len(logprobs) > 1 else logprobs[0]
ranks = torch.cat(ranks, dim=0) if len(ranks) > 1 else ranks[0]
return logprobs, ranks
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/worker/gpu/sample/prompt_logprob.py",
"license": "Apache License 2.0",
"lines": 183,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:benchmarks/kernels/benchmark_fused_topk.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import itertools
import torch
from vllm.model_executor.layers.fused_moe.router.fused_topk_router import fused_topk
from vllm.triton_utils import triton
from vllm.utils.argparse_utils import FlexibleArgumentParser
num_tokens_range = [2**i for i in range(0, 8, 2)]
num_experts_range = [16, 32, 64, 128, 256, 512]
topk_range = [3, 4]
configs = list(itertools.product(num_tokens_range, num_experts_range, topk_range))
def torch_topk(
gating_output: torch.Tensor,
topk: int,
renormalize: bool,
scoring_func: str = "softmax",
):
if scoring_func == "softmax":
scores = torch.softmax(gating_output.float(), dim=-1)
else:
scores = torch.sigmoid(gating_output.float())
topk_weights, topk_ids = torch.topk(scores, k=topk, dim=-1)
if renormalize:
topk_weights = topk_weights / topk_weights.sum(dim=-1, keepdim=True)
return topk_weights, topk_ids
def get_benchmark(scoring_func):
@triton.testing.perf_report(
triton.testing.Benchmark(
x_names=["num_tokens", "num_experts", "topk"],
x_vals=[list(_) for _ in configs],
line_arg="provider",
line_vals=["torch", "vllm"],
line_names=["Torch", "vLLM"],
styles=[("blue", "-"), ("red", "-")],
ylabel="us",
plot_name=f"fused-topk-perf-{scoring_func}",
args={},
)
)
def benchmark(num_tokens, num_experts, topk, provider):
dtype = torch.bfloat16
hidden_size = 1024
renormalize = True
hidden_states = torch.randn(
(num_tokens, hidden_size), dtype=dtype, device="cuda"
)
gating_output = torch.randn(
(num_tokens, num_experts), dtype=dtype, device="cuda"
)
quantiles = [0.5, 0.2, 0.8]
if provider == "torch":
ms, min_ms, max_ms = triton.testing.do_bench(
lambda: torch_topk(
gating_output=gating_output,
topk=topk,
renormalize=renormalize,
scoring_func=scoring_func,
),
quantiles=quantiles,
)
else:
ms, min_ms, max_ms = triton.testing.do_bench(
lambda: fused_topk(
hidden_states=hidden_states,
gating_output=gating_output,
topk=topk,
renormalize=renormalize,
scoring_func=scoring_func,
),
quantiles=quantiles,
)
return 1000 * ms, 1000 * max_ms, 1000 * min_ms
return benchmark
if __name__ == "__main__":
parser = FlexibleArgumentParser(description="Benchmark the MoE topk kernel.")
parser.add_argument("--scoring-func", type=str, default="softmax")
parser.add_argument("--save-path", type=str, default="./configs/fused_topk/")
args = parser.parse_args()
# Get the benchmark function
benchmark = get_benchmark(args.scoring_func)
# Run performance benchmark
benchmark.run(print_data=True, save_path=args.save_path)
| {
"repo_id": "vllm-project/vllm",
"file_path": "benchmarks/kernels/benchmark_fused_topk.py",
"license": "Apache License 2.0",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/kernels/moe/test_fused_topk.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Tests for the MoE fused topk kernel
Run `pytest tests/kernels/moe/test_fused_topk.py`.
"""
import pytest
import torch
from vllm.model_executor.layers.fused_moe.router.fused_topk_bias_router import (
fused_topk_bias,
)
from vllm.model_executor.layers.fused_moe.router.fused_topk_router import fused_topk
from vllm.platforms import current_platform
def torch_topk(
gating_output: torch.Tensor,
topk: int,
renormalize: bool,
e_score_correction_bias: torch.Tensor = None,
scoring_func: str = "softmax",
):
if scoring_func == "softmax":
scores = torch.softmax(gating_output.float(), dim=-1)
else:
assert scoring_func == "sigmoid"
scores = torch.sigmoid(gating_output.float())
if e_score_correction_bias is not None:
num_experts = gating_output.shape[-1]
scores_for_choice = scores.view(
-1, num_experts
) + e_score_correction_bias.unsqueeze(0)
_, topk_ids = torch.topk(scores_for_choice, k=topk, dim=-1)
topk_weights = scores.gather(1, topk_ids)
else:
topk_weights, topk_ids = torch.topk(scores, k=topk, dim=-1)
if renormalize:
topk_weights = topk_weights / topk_weights.sum(dim=-1, keepdim=True)
return topk_weights, topk_ids
@pytest.mark.skipif(
not current_platform.is_cuda(), reason="This test is skipped on non-CUDA platform."
)
@pytest.mark.parametrize("num_tokens", [1, 33, 56])
@pytest.mark.parametrize("hidden_size", [1024, 2048])
@pytest.mark.parametrize("num_experts", [6, 16])
@pytest.mark.parametrize("topk", [3, 4])
@pytest.mark.parametrize("renormalize", [True, False])
@pytest.mark.parametrize("scoring_func", ["softmax", "sigmoid"])
@pytest.mark.parametrize("dtype", [torch.bfloat16, torch.half, torch.float32])
def test_fused_topk(
num_tokens: int,
hidden_size: int,
num_experts: int,
topk: int,
renormalize: bool,
scoring_func: str,
dtype: torch.dtype,
):
torch.manual_seed(0)
hidden_states = torch.randn((num_tokens, hidden_size), dtype=dtype, device="cuda")
gating_output = torch.randn((num_tokens, num_experts), dtype=dtype, device="cuda")
topk_weights_ref, topk_ids_ref = torch_topk(
gating_output=gating_output,
topk=topk,
renormalize=renormalize,
scoring_func=scoring_func,
)
topk_weights, topk_ids, _ = fused_topk(
hidden_states=hidden_states,
gating_output=gating_output,
topk=topk,
renormalize=renormalize,
scoring_func=scoring_func,
)
torch.testing.assert_close(
topk_weights_ref.to(torch.float32), topk_weights, atol=1e-2, rtol=1e-2
)
torch.testing.assert_close(topk_ids_ref.to(torch.int32), topk_ids, atol=0, rtol=0)
@pytest.mark.skipif(
not current_platform.is_cuda(), reason="This test is skipped on non-CUDA platform."
)
@pytest.mark.parametrize("num_tokens", [1, 33, 56])
@pytest.mark.parametrize("hidden_size", [1024, 2048])
@pytest.mark.parametrize("num_experts", [6, 16])
@pytest.mark.parametrize("topk", [3, 4])
@pytest.mark.parametrize("renormalize", [True, False])
@pytest.mark.parametrize("scoring_func", ["softmax", "sigmoid"])
@pytest.mark.parametrize("dtype", [torch.bfloat16, torch.half, torch.float32])
def test_fused_topk_bias(
num_tokens: int,
hidden_size: int,
num_experts: int,
topk: int,
renormalize: bool,
scoring_func: str,
dtype: torch.dtype,
):
torch.manual_seed(0)
hidden_states = torch.randn((num_tokens, hidden_size), dtype=dtype, device="cuda")
gating_output = torch.randn((num_tokens, num_experts), dtype=dtype, device="cuda")
e_score_correction_bias = torch.randn(
(num_experts,), dtype=torch.float32, device="cuda"
)
topk_weights_ref, topk_ids_ref = torch_topk(
gating_output=gating_output,
topk=topk,
renormalize=renormalize,
e_score_correction_bias=e_score_correction_bias,
scoring_func=scoring_func,
)
topk_weights, topk_ids = fused_topk_bias(
hidden_states=hidden_states,
gating_output=gating_output,
e_score_correction_bias=e_score_correction_bias,
topk=topk,
renormalize=renormalize,
scoring_func=scoring_func,
)
torch.testing.assert_close(
topk_weights_ref.to(torch.float32), topk_weights, atol=1e-2, rtol=1e-2
)
torch.testing.assert_close(topk_ids_ref.to(torch.int32), topk_ids, atol=0, rtol=0)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/kernels/moe/test_fused_topk.py",
"license": "Apache License 2.0",
"lines": 119,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_executor/layers/sparse_attn_indexer.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Custom Sparse Attention Indexer layers."""
import torch
from vllm._aiter_ops import rocm_aiter_ops
from vllm.forward_context import get_forward_context
from vllm.logger import init_logger
from vllm.model_executor.custom_op import CustomOp
from vllm.platforms import current_platform
from vllm.utils.deep_gemm import (
fp8_mqa_logits,
fp8_mqa_logits_torch,
fp8_paged_mqa_logits,
fp8_paged_mqa_logits_torch,
is_deep_gemm_supported,
)
from vllm.utils.torch_utils import direct_register_custom_op
from vllm.v1.attention.backends.mla.indexer import (
DeepseekV32IndexerMetadata,
)
from vllm.v1.attention.ops.common import pack_seq_triton, unpack_seq_triton
from vllm.v1.worker.workspace import current_workspace_manager
if current_platform.is_cuda_alike():
from vllm import _custom_ops as ops
elif current_platform.is_xpu():
from vllm._xpu_ops import xpu_ops as ops
logger = init_logger(__name__)
def sparse_attn_indexer(
hidden_states: torch.Tensor,
k_cache_prefix: str,
kv_cache: torch.Tensor,
q_fp8: torch.Tensor,
k: torch.Tensor,
weights: torch.Tensor,
quant_block_size: int,
scale_fmt: str | None,
topk_tokens: int,
head_dim: int,
max_model_len: int,
total_seq_lens: int,
topk_indices_buffer: torch.Tensor,
) -> torch.Tensor:
# careful! this will be None in dummy run
attn_metadata = get_forward_context().attn_metadata
fp8_dtype = current_platform.fp8_dtype()
# assert isinstance(attn_metadata, dict)
if not isinstance(attn_metadata, dict):
# Reserve workspace for indexer during profiling run
current_workspace_manager().get_simultaneous(
((total_seq_lens, head_dim), torch.float8_e4m3fn),
((total_seq_lens, 4), torch.uint8),
)
return sparse_attn_indexer_fake(
hidden_states,
k_cache_prefix,
kv_cache,
q_fp8,
k,
weights,
quant_block_size,
scale_fmt,
topk_tokens,
head_dim,
max_model_len,
total_seq_lens,
topk_indices_buffer,
)
attn_metadata = attn_metadata[k_cache_prefix]
assert isinstance(attn_metadata, DeepseekV32IndexerMetadata)
slot_mapping = attn_metadata.slot_mapping
has_decode = attn_metadata.num_decodes > 0
has_prefill = attn_metadata.num_prefills > 0
num_decode_tokens = attn_metadata.num_decode_tokens
ops.indexer_k_quant_and_cache(
k,
kv_cache,
slot_mapping,
quant_block_size,
scale_fmt,
)
topk_indices_buffer[: hidden_states.shape[0]] = -1
if has_prefill:
prefill_metadata = attn_metadata.prefill
# Get the full shared workspace buffers once (will allocate on first use)
workspace_manager = current_workspace_manager()
k_fp8_full, k_scale_full = workspace_manager.get_simultaneous(
((total_seq_lens, head_dim), fp8_dtype),
((total_seq_lens, 4), torch.uint8),
)
for chunk in prefill_metadata.chunks:
k_fp8 = k_fp8_full[: chunk.total_seq_lens]
k_scale = k_scale_full[: chunk.total_seq_lens]
ops.cp_gather_indexer_k_quant_cache(
kv_cache,
k_fp8,
k_scale,
chunk.block_table,
chunk.cu_seq_lens,
)
if is_deep_gemm_supported():
logits = fp8_mqa_logits(
q_fp8[chunk.token_start : chunk.token_end],
(k_fp8, k_scale.view(torch.float32).flatten()),
weights[chunk.token_start : chunk.token_end],
chunk.cu_seqlen_ks,
chunk.cu_seqlen_ke,
clean_logits=False,
)
else:
logits = fp8_mqa_logits_torch(
q_fp8[chunk.token_start : chunk.token_end],
(k_fp8, k_scale.view(torch.float32).flatten()),
weights[chunk.token_start : chunk.token_end],
chunk.cu_seqlen_ks,
chunk.cu_seqlen_ke,
)
num_rows = logits.shape[0]
topk_indices = topk_indices_buffer[
chunk.token_start : chunk.token_end, :topk_tokens
]
torch.ops._C.top_k_per_row_prefill(
logits,
chunk.cu_seqlen_ks,
chunk.cu_seqlen_ke,
topk_indices,
num_rows,
logits.stride(0),
logits.stride(1),
topk_tokens,
)
# Compute lengths from row spans
# lengths = (chunk.cu_seqlen_ke - chunk.cu_seqlen_ks).to(torch.int32)
# torch.ops._C.large_context_topk(
# logits,
# topk_indices,
# lengths,
# chunk.cu_seqlen_ks, # row_starts
# )
if has_decode:
decode_metadata = attn_metadata.decode
# kv_cache size requirement [num_block, block_size, n_head, head_dim],
# we only have [num_block, block_size, head_dim],
kv_cache = kv_cache.unsqueeze(-2)
decode_lens = decode_metadata.decode_lens
if decode_metadata.requires_padding:
# pad in edge case where we have short chunked prefill length <
# decode_threshold since we unstrictly split
# prefill and decode by decode_threshold
# (currently set to 1 + speculative tokens)
padded_q_fp8_decode_tokens = pack_seq_triton(
q_fp8[:num_decode_tokens], decode_lens
)
else:
padded_q_fp8_decode_tokens = q_fp8[:num_decode_tokens].reshape(
decode_lens.shape[0], -1, *q_fp8.shape[1:]
)
# TODO: move and optimize below logic with triton kernels
batch_size = padded_q_fp8_decode_tokens.shape[0]
next_n = padded_q_fp8_decode_tokens.shape[1]
assert batch_size == decode_metadata.seq_lens.shape[0]
num_padded_tokens = batch_size * next_n
if is_deep_gemm_supported():
logits = fp8_paged_mqa_logits(
padded_q_fp8_decode_tokens,
kv_cache,
weights[:num_padded_tokens],
decode_metadata.seq_lens,
decode_metadata.block_table,
decode_metadata.schedule_metadata,
max_model_len=max_model_len,
clean_logits=False,
)
else:
logits = fp8_paged_mqa_logits_torch(
padded_q_fp8_decode_tokens,
kv_cache,
weights[:num_padded_tokens],
decode_metadata.seq_lens,
decode_metadata.block_table,
max_model_len=max_model_len,
)
num_rows = logits.shape[0]
topk_indices = topk_indices_buffer[:num_padded_tokens, :topk_tokens]
if decode_metadata.use_large_context_topk:
if next_n == 1:
lengths = decode_metadata.seq_lens
else:
# (bs,) -> (bs, 1) + (next_n,) -> (bs, next_n) -> (bs * next_n,)
lengths = (
decode_metadata.seq_lens.unsqueeze(1)
- next_n
+ 1
+ decode_metadata.offsets
).flatten()
torch.ops._C.large_context_topk(
logits,
topk_indices,
lengths,
None,
)
else:
torch.ops._C.top_k_per_row_decode(
logits,
next_n,
decode_metadata.seq_lens,
topk_indices,
num_rows,
logits.stride(0),
logits.stride(1),
topk_tokens,
)
if decode_metadata.requires_padding:
# if padded, we need to unpack
# the topk indices removing padded tokens
topk_indices = unpack_seq_triton(
topk_indices.reshape(batch_size, -1, topk_indices.shape[-1]),
decode_lens,
)
topk_indices_buffer[:num_decode_tokens, : topk_indices.shape[-1]] = (
topk_indices
)
return topk_indices_buffer
def sparse_attn_indexer_fake(
hidden_states: torch.Tensor,
k_cache_prefix: str,
kv_cache: torch.Tensor,
q_fp8: torch.Tensor,
k: torch.Tensor,
weights: torch.Tensor,
quant_block_size: int,
scale_fmt: str | None,
topk_tokens: int,
head_dim: int,
max_model_len: int,
total_seq_lens: int,
topk_indices_buffer: torch.Tensor | None,
) -> torch.Tensor:
return topk_indices_buffer
direct_register_custom_op(
op_name="sparse_attn_indexer",
op_func=sparse_attn_indexer,
mutates_args=["topk_indices_buffer"],
fake_impl=sparse_attn_indexer_fake,
dispatch_key=current_platform.dispatch_key,
)
@CustomOp.register("sparse_attn_indexer")
class SparseAttnIndexer(CustomOp):
"""Sparse Attention Indexer Custom Op Layer. This layer is extracted as a
separate custom op since it involves heavy custom kernels like `mqa_logits`,
`paged_mqa_logits` and `top_k_per_row`, etc. Those kernels maybe requires
specific memory layout or implementation for different hardware backends to
achieve optimal performance.
For now, the default native path will use CUDA backend path. Other platform
may requires add the corresponding Custom Op name `sparse_attn_indexer` to
`custom_ops` in `CompilationConfig` to enable the platform specific path.
"""
def __init__(
self,
k_cache,
quant_block_size: int,
scale_fmt: str,
topk_tokens: int,
head_dim: int,
max_model_len: int,
max_total_seq_len: int,
topk_indices_buffer: torch.Tensor,
):
super().__init__()
self.k_cache = k_cache
self.quant_block_size = quant_block_size
self.scale_fmt = scale_fmt
self.topk_tokens = topk_tokens
self.head_dim = head_dim
self.max_model_len = max_model_len
self.max_total_seq_len = max_total_seq_len
self.topk_indices_buffer = topk_indices_buffer
if current_platform.is_cuda() and not is_deep_gemm_supported():
logger.warning_once(
"DeepGEMM is not supported or available. SparseAttnIndexer will use a "
"less efficient PyTorch implementation. "
"Please make sure you have the required hardware and software setup "
"for DeepGEMM to achieve optimal performance."
)
def forward_native(
self,
hidden_states: torch.Tensor,
q_fp8: torch.Tensor,
k: torch.Tensor,
weights: torch.Tensor,
):
if current_platform.is_cuda():
return self.forward_cuda(hidden_states, q_fp8, k, weights)
elif current_platform.is_rocm():
return self.forward_hip(hidden_states, q_fp8, k, weights)
else:
raise NotImplementedError(
"SparseAttnIndexer native forward is only implemented for "
"CUDA and ROCm platform."
)
def forward_cuda(
self,
hidden_states: torch.Tensor,
q_fp8: torch.Tensor,
k: torch.Tensor,
weights: torch.Tensor,
):
return torch.ops.vllm.sparse_attn_indexer(
hidden_states,
self.k_cache.prefix,
self.k_cache.kv_cache[0],
q_fp8,
k,
weights,
self.quant_block_size,
self.scale_fmt,
self.topk_tokens,
self.head_dim,
self.max_model_len,
self.max_total_seq_len,
self.topk_indices_buffer,
)
def forward_hip(
self,
hidden_states: torch.Tensor,
q_fp8: torch.Tensor,
k: torch.Tensor,
weights: torch.Tensor,
):
if rocm_aiter_ops.is_enabled():
return torch.ops.vllm.rocm_aiter_sparse_attn_indexer(
hidden_states,
self.k_cache.prefix,
self.k_cache.kv_cache[0],
q_fp8,
k,
weights,
self.quant_block_size,
self.scale_fmt,
self.topk_tokens,
self.head_dim,
self.max_model_len,
self.max_total_seq_len,
self.topk_indices_buffer,
)
else:
raise RuntimeError(
"Sparse attention indexer ROCm custom op requires ROCm "
"Aiter ops to be enabled."
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/sparse_attn_indexer.py",
"license": "Apache License 2.0",
"lines": 349,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/eagle2_5_vl.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# Adapted from NVIDIA Eagle2.5-VL model
# https://huggingface.co/nvidia/Eagle2.5-8B
from collections.abc import Iterable
from typing import Annotated, Literal, TypeAlias
import torch
import torch.nn as nn
from transformers import PretrainedConfig
from vllm.config import VllmConfig
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.models.module_mapping import MultiModelKeys
from vllm.model_executor.models.siglip import SiglipVisionModel
from vllm.multimodal import MULTIMODAL_REGISTRY
from vllm.multimodal.processing import PromptUpdateDetails
from vllm.sequence import IntermediateTensors
from vllm.tokenizers import TokenizerLike
from vllm.utils.tensor_schema import TensorSchema, TensorShape
from .interfaces import (
MultiModalEmbeddings,
SupportsLoRA,
SupportsMultiModal,
SupportsPP,
)
from .internvl import (
IMG_CONTEXT,
IMG_END,
IMG_START,
BaseInternVLDummyInputsBuilder,
BaseInternVLMultiModalProcessor,
BaseInternVLProcessingInfo,
BaseInternVLProcessor,
)
from .utils import AutoWeightsLoader, init_vllm_registered_model, maybe_prefix
class Eagle2_5_VLImagePixelInputs(TensorSchema):
"""
Dimensions:
- bn: Batch size * number of images
- bnp: Batch size * number of images * (1 + num_patches)
- c: Number of channels (3)
- h: Height of each image patch
- w: Width of each image patch
"""
type: Literal["pixel_values"]
pixel_values_flat: Annotated[torch.Tensor, TensorShape("bnp", 3, "h", "w")]
num_patches: Annotated[torch.Tensor, TensorShape("bn")]
class Eagle2_5_VLImageEmbeddingInputs(TensorSchema):
"""
Dimensions:
- n: Number of images
- f: Total image feature size
- h: Hidden size (must match the hidden size of language model backbone)
"""
type: Literal["image_embeds"]
data: Annotated[torch.Tensor | list[torch.Tensor], TensorShape("n", "f", "h")]
Eagle2_5_VLImageInputs: TypeAlias = (
Eagle2_5_VLImagePixelInputs | Eagle2_5_VLImageEmbeddingInputs
)
class Eagle2_5_VLProcessor(BaseInternVLProcessor):
"""
Custom processor for Eagle2.5-VL model.
Extends BaseInternVLProcessor with Eagle-specific token handling.
"""
def __init__(
self,
config: PretrainedConfig,
tokenizer: TokenizerLike,
*,
min_dynamic_patch: int | None = None,
max_dynamic_patch: int | None = None,
dynamic_image_size: bool | None = None,
) -> None:
# Skip super().__init__() to avoid config manipulation
# Directly initialize all required attributes
self.config = config
self.tokenizer = tokenizer
# Image size with force_image_size override
image_size: int = config.vision_config.image_size
if hasattr(config, "force_image_size") and config.force_image_size:
image_size = config.force_image_size
patch_size: int = config.vision_config.patch_size
downsample_ratio: float = getattr(config, "downsample_ratio", 0.5)
# Compute num_image_token
self.num_image_token = int(
(image_size // patch_size) ** 2 * (downsample_ratio**2)
)
self.image_size = image_size
# Dynamic patch settings with defaults
self.min_dynamic_patch = (
min_dynamic_patch
if min_dynamic_patch is not None
else getattr(config, "min_dynamic_patch", 1)
)
self.max_dynamic_patch = (
max_dynamic_patch
if max_dynamic_patch is not None
else getattr(config, "max_dynamic_patch", 12)
)
self.dynamic_image_size = (
dynamic_image_size
if dynamic_image_size is not None
else getattr(config, "dynamic_image_size", True)
)
self.use_thumbnail: bool = getattr(config, "use_thumbnail", True)
@property
def image_token_id(self) -> int:
"""Get the image token ID from config or tokenizer."""
if hasattr(self.config, "image_token_index"):
return self.config.image_token_index
# Fallback to tokenizer vocab - use <IMG_CONTEXT> (ID: 151667)
vocab = self.tokenizer.get_vocab()
if IMG_CONTEXT in vocab:
return vocab[IMG_CONTEXT]
raise ValueError(f"Cannot find image token '{IMG_CONTEXT}' in vocabulary")
def get_image_repl(
self,
feature_size: int,
num_patches: int | None,
) -> PromptUpdateDetails[str]:
"""Get image replacement string for prompt."""
repl_features = IMG_CONTEXT * feature_size
repl_full = IMG_START + repl_features + IMG_END
return PromptUpdateDetails.select_text(repl_full, IMG_CONTEXT)
class Eagle2_5_VLProcessingInfo(BaseInternVLProcessingInfo):
"""Processing info for Eagle2.5-VL model."""
def get_hf_processor(self, **kwargs) -> Eagle2_5_VLProcessor:
return self.ctx.init_processor(
Eagle2_5_VLProcessor,
config=self.ctx.get_hf_config(),
tokenizer=self.get_tokenizer(),
**kwargs,
)
class Eagle2_5_VLDummyInputsBuilder(
BaseInternVLDummyInputsBuilder[Eagle2_5_VLProcessingInfo]
):
"""Dummy inputs builder for Eagle2.5-VL model."""
pass
class Eagle2_5_VLMultiModalProcessor(
BaseInternVLMultiModalProcessor[Eagle2_5_VLProcessingInfo]
):
"""Multi-modal processor for Eagle2.5-VL model."""
pass
@MULTIMODAL_REGISTRY.register_processor(
Eagle2_5_VLMultiModalProcessor,
info=Eagle2_5_VLProcessingInfo,
dummy_inputs=Eagle2_5_VLDummyInputsBuilder,
)
class Eagle2_5_VLForConditionalGeneration(
nn.Module, SupportsMultiModal, SupportsPP, SupportsLoRA
):
"""
Eagle2.5-VL model for conditional generation.
Architecture:
- Vision Encoder: SigLIP
- Language Model: Qwen2
- Projection: MLP with pixel shuffle downsampling
"""
supports_encoder_tp_data = True
@classmethod
def get_placeholder_str(cls, modality: str, i: int) -> str | None:
if modality.startswith("image"):
return "<image>"
raise ValueError("Only image modality is supported")
def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None:
super().__init__()
config = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
multimodal_config = vllm_config.model_config.multimodal_config
self.config = config
self.multimodal_config = multimodal_config
self.use_data_parallel = multimodal_config.mm_encoder_tp_mode == "data"
# Image configuration
image_size = (
getattr(config, "force_image_size", None) or config.vision_config.image_size
)
patch_size = config.vision_config.patch_size
self.patch_size = patch_size
self.downsample_ratio = getattr(config, "downsample_ratio", 0.5)
self.num_image_token = int(
(image_size // patch_size) ** 2 * (self.downsample_ratio**2)
)
self.select_layer = getattr(config, "select_layer", -1)
with self._mark_tower_model(vllm_config, "image"):
# Vision encoder (SigLIP)
self.vision_model = self._init_vision_model(
config,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "vision_model"),
)
# MLP projection
self.mlp1 = self._init_mlp1(config)
with self._mark_language_model(vllm_config):
# Language model (Qwen2)
self.language_model = init_vllm_registered_model(
vllm_config=vllm_config,
hf_config=config.text_config,
prefix=maybe_prefix(prefix, "language_model"),
)
self.img_context_token_id = None
self.make_empty_intermediate_tensors = (
self.language_model.make_empty_intermediate_tensors
)
def _init_vision_model(
self,
config: PretrainedConfig,
quant_config: QuantizationConfig | None,
prefix: str,
):
"""Initialize SigLIP vision model."""
vision_config = config.vision_config
# Determine number of hidden layers based on select_layer
vision_feature_layer = self.select_layer
if vision_feature_layer < 0:
num_hidden_layers = (
vision_config.num_hidden_layers + vision_feature_layer + 1
)
else:
num_hidden_layers = vision_feature_layer + 1
# Disable the pooling head - Eagle2.5 needs all patch tokens,
# not a single pooled output
vision_config.vision_use_head = False
return SiglipVisionModel(
vision_config,
quant_config=quant_config,
num_hidden_layers_override=num_hidden_layers,
prefix=prefix,
)
def _init_mlp1(self, config: PretrainedConfig) -> nn.Module:
"""Initialize MLP projection layer."""
vit_hidden_size = config.vision_config.hidden_size
llm_hidden_size = config.text_config.hidden_size
return nn.Sequential(
nn.LayerNorm(vit_hidden_size * int(1 / self.downsample_ratio) ** 2),
nn.Linear(
vit_hidden_size * int(1 / self.downsample_ratio) ** 2, llm_hidden_size
),
nn.GELU(),
nn.Linear(llm_hidden_size, llm_hidden_size),
)
def pixel_shuffle(self, x: torch.Tensor, scale_factor: float = 0.5) -> torch.Tensor:
"""
Pixel shuffle operation for downsampling vision features.
Args:
x: Input tensor of shape (n, w, h, c)
scale_factor: Downsampling factor
Returns:
Downsampled tensor
"""
n, w, h, c = x.size()
# N, W, H, C --> N, W, H * scale, C // scale
x = x.view(n, w, int(h * scale_factor), int(c / scale_factor))
# N, W, H * scale, C // scale --> N, H * scale, W, C // scale
x = x.permute(0, 2, 1, 3).contiguous()
# N, H * scale, W, C // scale --> N, H * scale, W * scale, C // (scale ** 2)
x = x.view(
n,
int(h * scale_factor),
int(w * scale_factor),
int(c / (scale_factor * scale_factor)),
)
x = x.permute(0, 2, 1, 3).contiguous()
return x
def extract_feature(self, pixel_values: torch.Tensor) -> torch.Tensor:
"""
Extract visual features from pixel values.
Args:
pixel_values: Input pixel values of shape (batch, channels, height, width)
Returns:
Visual embeddings
"""
vit_embeds = self.vision_model(pixel_values=pixel_values)
h = w = int(vit_embeds.shape[1] ** 0.5)
vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1)
vit_embeds = self.pixel_shuffle(vit_embeds, scale_factor=self.downsample_ratio)
vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1])
vit_embeds = self.mlp1(vit_embeds)
return vit_embeds
def _parse_and_validate_image_input(
self, **kwargs: object
) -> Eagle2_5_VLImageInputs | None:
"""Parse and validate image inputs."""
pixel_values_flat = kwargs.pop("pixel_values_flat", None)
image_num_patches = kwargs.pop("image_num_patches", None)
image_embeds = kwargs.pop("image_embeds", None)
if pixel_values_flat is None and image_embeds is None:
return None
if image_embeds is not None:
return Eagle2_5_VLImageEmbeddingInputs(
type="image_embeds",
data=image_embeds,
)
image_token_id = kwargs.get("image_token_id")
if image_token_id is not None:
if isinstance(image_token_id, torch.Tensor):
image_token_id = image_token_id.flatten().unique().item()
assert isinstance(image_token_id, int)
self.img_context_token_id = image_token_id
if pixel_values_flat is not None:
image_size = getattr(self.config, "force_image_size", None)
if image_size is None:
image_size = self.config.vision_config.image_size
expected_h = expected_w = image_size
resolve_bindings = {"h": expected_h, "w": expected_w}
return Eagle2_5_VLImagePixelInputs(
type="pixel_values",
pixel_values_flat=pixel_values_flat,
num_patches=image_num_patches,
resolve_bindings=resolve_bindings,
)
raise AssertionError("This line should be unreachable.")
def _process_image_input(
self,
image_input: Eagle2_5_VLImageInputs,
) -> tuple[torch.Tensor, ...]:
"""Process image input to get embeddings."""
if image_input["type"] == "image_embeds":
return image_input["data"]
assert self.vision_model is not None
image_embeds = self.extract_feature(image_input["pixel_values_flat"])
num_patches = image_input["num_patches"]
# Only one image in the current batch
if len(num_patches) == 1:
return (image_embeds.view(-1, self.config.text_config.hidden_size),)
# Split embeddings by image
feature_size = image_embeds.shape[1]
image_embeds = image_embeds.view(-1, self.config.text_config.hidden_size)
image_feature_sizes = [
num_patches * feature_size for num_patches in num_patches
]
return image_embeds.split(image_feature_sizes)
def embed_multimodal(self, **kwargs: object) -> MultiModalEmbeddings:
"""Embed multimodal inputs."""
image_input = self._parse_and_validate_image_input(**kwargs)
if image_input is None:
return []
image_embeddings = self._process_image_input(image_input)
return tuple(image_embeddings)
def embed_input_ids(
self,
input_ids: torch.Tensor,
multimodal_embeddings: MultiModalEmbeddings | None = None,
*,
is_multimodal: torch.Tensor | None = None,
handle_oov_mm_token: bool = False,
) -> torch.Tensor:
"""Embed input IDs with optional multimodal embeddings."""
if multimodal_embeddings is None or is_multimodal is None:
return super().embed_input_ids(input_ids)
return super().embed_input_ids(
input_ids,
multimodal_embeddings=multimodal_embeddings,
is_multimodal=is_multimodal,
handle_oov_mm_token=handle_oov_mm_token,
)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
**kwargs: object,
) -> IntermediateTensors:
"""Forward pass through the model."""
if intermediate_tensors is not None:
inputs_embeds = None
forward_kwargs = {
"input_ids": input_ids,
"positions": positions,
"intermediate_tensors": intermediate_tensors,
"inputs_embeds": inputs_embeds,
}
hidden_states = self.language_model.model(**forward_kwargs)
return hidden_states
def compute_logits(
self,
hidden_states: torch.Tensor,
) -> torch.Tensor | None:
"""Compute logits from hidden states."""
return self.language_model.compute_logits(hidden_states)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
"""Load model weights."""
loader = AutoWeightsLoader(self)
return loader.load_weights(weights)
def get_mm_mapping(self) -> MultiModelKeys:
"""Get the module prefix mapping for multimodal models."""
return MultiModelKeys.from_string_field(
language_model="language_model",
connector="mlp1",
tower_model="vision_model",
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/eagle2_5_vl.py",
"license": "Apache License 2.0",
"lines": 390,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/v1/spec_decode/test_acceptance_length.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
EAGLE3 Acceptance Length Regression Tests.
These tests verify that acceptance lengths for EAGLE3 speculative decoding
do not regress across vLLM commits. Each test runs inference on the MT-Bench
dataset and asserts that the mean acceptance length is within tolerance of
the expected baseline.
"""
from dataclasses import dataclass, field
from types import SimpleNamespace
import pytest
import torch
from tests.conftest import VllmRunner
from tests.utils import large_gpu_mark
from vllm import SamplingParams
from vllm.benchmarks.datasets import get_samples
from vllm.inputs import TokensPrompt
from vllm.platforms import current_platform
from vllm.v1.attention.backends.registry import AttentionBackendEnum
from vllm.v1.attention.selector import AttentionSelectorConfig
from vllm.v1.metrics.reader import Counter, Vector
@dataclass
class Eagle3ModelConfig:
verifier: str
drafter: str
expected_acceptance_length: float
expected_acceptance_lengths_per_pos: list[float] = field(default_factory=list)
id: str = ""
# Backends that are incompatible with this model (will be skipped)
excluded_backends: set[AttentionBackendEnum] = field(default_factory=set)
# Pytest marks for this configuration
marks: list = field(default_factory=list)
# Custom relative tolerance (defaults to DEFAULT_RTOL if None)
rtol: float | None = None
# Model configurations for EAGLE3 acceptance length tests.
# Expected acceptance lengths are determined by running baseline benchmarks
# using examples/offline_inference/spec_decode.py with the MT-Bench dataset.
EAGLE3_MODEL_CONFIGS = [
Eagle3ModelConfig(
verifier="meta-llama/Llama-3.1-8B-Instruct",
drafter="RedHatAI/Llama-3.1-8B-Instruct-speculator.eagle3",
expected_acceptance_length=2.60,
expected_acceptance_lengths_per_pos=[0.7296, 0.5208, 0.3545],
id="llama3-8b-eagle3",
),
Eagle3ModelConfig(
verifier="Qwen/Qwen3-8B",
drafter="RedHatAI/Qwen3-8B-speculator.eagle3",
expected_acceptance_length=2.26,
expected_acceptance_lengths_per_pos=[0.6541, 0.3993, 0.2020],
id="qwen3-8b-eagle3",
),
Eagle3ModelConfig(
verifier="openai/gpt-oss-20b",
drafter="RedHatAI/gpt-oss-20b-speculator.eagle3",
expected_acceptance_length=2.56,
expected_acceptance_lengths_per_pos=[0.7165, 0.5120, 0.3337],
id="gpt-oss-20b-eagle3",
# FLASHINFER incompatible: gpt-oss-20b uses sink attention which
# FLASHINFER does not support ("sink setting not supported")
excluded_backends={AttentionBackendEnum.FLASHINFER},
),
Eagle3ModelConfig(
verifier="Qwen/Qwen3-VL-30B-A3B-Instruct-FP8",
drafter="nm-testing/Speculator-Qwen3-30B-MOE-VL-Eagle3",
expected_acceptance_length=1.35,
expected_acceptance_lengths_per_pos=[0.2900, 0.0620, 0.0115],
id="qwen3-30b-moe-vl-eagle3",
marks=[
pytest.mark.slow_test,
],
rtol=0.15, # Higher tolerance due to small absolute values at position 2
),
]
# Default test parameters
DEFAULT_NUM_SPEC_TOKENS = 3
DEFAULT_NUM_PROMPTS = 80
DEFAULT_OUTPUT_LEN = 256
DEFAULT_MAX_MODEL_LEN = 16384
DEFAULT_RTOL = 0.05
# TP sizes to test
TP_SIZES = [1, 2, 4]
# Backends excluded from testing due to significantly different behavior
EXCLUDED_BACKENDS = {AttentionBackendEnum.FLEX_ATTENTION}
def get_available_attention_backends() -> list[str]:
# Check if get_valid_backends is actually defined in the platform class
# (not just returning None from __getattr__)
get_valid_backends = getattr(current_platform.__class__, "get_valid_backends", None)
if get_valid_backends is None:
if current_platform.is_rocm():
# ROCm uses Triton as its default attention backend since
# Flash Attention is not supported.
return ["TRITON_ATTN"]
else:
return ["FLASH_ATTN"]
device_capability = current_platform.get_device_capability()
if device_capability is None:
return ["FLASH_ATTN"]
attn_selector_config = AttentionSelectorConfig(
head_size=128,
dtype=torch.bfloat16,
kv_cache_dtype=None,
block_size=None,
use_mla=False,
has_sink=False,
use_sparse=False,
use_mm_prefix=False,
)
valid_backends, _ = current_platform.get_valid_backends(
device_capability=device_capability,
attn_selector_config=attn_selector_config,
)
return [
backend.name
for backend, _ in valid_backends
if backend not in EXCLUDED_BACKENDS
]
def get_attention_backend_params() -> list[str]:
return get_available_attention_backends()
def get_tp_size_params() -> list[pytest.param]:
num_gpus = torch.cuda.device_count() if torch.cuda.is_available() else 1
return [pytest.param(tp, id=f"tp{tp}") for tp in TP_SIZES if tp <= num_gpus]
def get_mt_bench_prompts(
tokenizer, num_prompts: int = DEFAULT_NUM_PROMPTS
) -> list[list[int]]:
args = SimpleNamespace(
dataset_name="hf",
dataset_path="philschmid/mt-bench",
num_prompts=num_prompts,
seed=42,
no_oversample=False,
endpoint_type="openai-chat",
input_len=None,
output_len=DEFAULT_OUTPUT_LEN,
sharegpt_output_len=DEFAULT_OUTPUT_LEN,
hf_name=None,
hf_split="train",
hf_subset=None,
hf_output_len=DEFAULT_OUTPUT_LEN,
no_stream=True,
disable_shuffle=False,
skip_chat_template=False,
)
samples = get_samples(args, tokenizer)
prompt_ids = [
tokenizer.encode(sample.prompt, add_special_tokens=False) for sample in samples
]
return prompt_ids
def extract_acceptance_metrics(metrics, num_spec_tokens: int) -> dict:
num_drafts = 0
num_accepted_tokens = 0
acceptance_counts = [0] * num_spec_tokens
for metric in metrics:
if metric.name == "vllm:spec_decode_num_drafts":
assert isinstance(metric, Counter)
num_drafts += metric.value
elif metric.name == "vllm:spec_decode_num_accepted_tokens":
assert isinstance(metric, Counter)
num_accepted_tokens += metric.value
elif metric.name == "vllm:spec_decode_num_accepted_tokens_per_pos":
assert isinstance(metric, Vector)
for pos in range(min(len(metric.values), num_spec_tokens)):
acceptance_counts[pos] += metric.values[pos]
# Calculate mean acceptance length
# Formula: 1 + (accepted_tokens / num_drafts)
acceptance_length = 1 + (num_accepted_tokens / num_drafts) if num_drafts > 0 else 1
# Calculate per-position acceptance lengths (contribution to total)
# Each position contributes: accepted_at_pos / num_drafts
acceptance_lengths_per_pos = [
count / num_drafts if num_drafts > 0 else 0.0 for count in acceptance_counts
]
return {
"acceptance_length": acceptance_length,
"acceptance_lengths_per_pos": acceptance_lengths_per_pos,
"num_drafts": num_drafts,
"num_accepted_tokens": num_accepted_tokens,
}
@large_gpu_mark(min_gb=40)
@pytest.mark.skipif(
not current_platform.is_cuda(),
reason="This test is only supported on CUDA platform.",
)
@pytest.mark.parametrize(
"model_config",
[
pytest.param(config, id=config.id, marks=config.marks)
for config in EAGLE3_MODEL_CONFIGS
],
)
@pytest.mark.parametrize("num_spec_tokens", [DEFAULT_NUM_SPEC_TOKENS])
@pytest.mark.parametrize("tp_size", get_tp_size_params())
@pytest.mark.parametrize("attention_backend", get_attention_backend_params())
def test_eagle3_acceptance_length(
model_config: Eagle3ModelConfig,
num_spec_tokens: int,
tp_size: int,
attention_backend: str,
monkeypatch: pytest.MonkeyPatch,
):
# Skip if this backend is incompatible with the model
backend_enum = AttentionBackendEnum[attention_backend]
if backend_enum in model_config.excluded_backends:
pytest.skip(f"{attention_backend} is incompatible with {model_config.id}")
with monkeypatch.context() as m:
m.setenv("VLLM_ALLOW_INSECURE_SERIALIZATION", "1")
with VllmRunner(
model_name=model_config.verifier,
speculative_config={
"method": "eagle3",
"model": model_config.drafter,
"num_speculative_tokens": num_spec_tokens,
},
attention_config={"backend": attention_backend},
tensor_parallel_size=tp_size,
gpu_memory_utilization=0.7,
disable_log_stats=False,
max_model_len=DEFAULT_MAX_MODEL_LEN,
) as vllm_runner:
tokenizer = vllm_runner.llm.get_tokenizer()
prompt_ids = get_mt_bench_prompts(tokenizer, DEFAULT_NUM_PROMPTS)
sampling_params = SamplingParams(
temperature=0,
max_tokens=DEFAULT_OUTPUT_LEN,
)
vllm_runner.llm.generate(
[TokensPrompt(prompt_token_ids=ids) for ids in prompt_ids],
sampling_params=sampling_params,
)
metrics = vllm_runner.llm.get_metrics()
results = extract_acceptance_metrics(metrics, num_spec_tokens)
actual_acceptance_length = results["acceptance_length"]
expected = model_config.expected_acceptance_length
actual_per_pos = results["acceptance_lengths_per_pos"]
expected_per_pos = model_config.expected_acceptance_lengths_per_pos
rel_error = abs(actual_acceptance_length - expected) / expected
# Overall acceptance length always uses DEFAULT_RTOL
assert rel_error <= DEFAULT_RTOL, (
f"Acceptance length regression detected for {model_config.id}!\n"
f" Expected: {expected:.3f}\n"
f" Actual: {actual_acceptance_length:.3f}\n"
f" Relative error: {rel_error:.2%} (tolerance: {DEFAULT_RTOL:.2%})\n"
f" Drafts: {results['num_drafts']}, "
f"Accepted tokens: {results['num_accepted_tokens']}"
)
if expected_per_pos and len(expected_per_pos) == len(actual_per_pos):
# Per-position checks use model-specific rtol if provided
rtol = (
model_config.rtol if model_config.rtol is not None else DEFAULT_RTOL
)
for pos, (actual, exp) in enumerate(
zip(actual_per_pos, expected_per_pos)
):
if exp > 0:
pos_rel_error = abs(actual - exp) / exp
assert pos_rel_error <= rtol, (
f"Per-position acceptance length regression at pos {pos} "
f"for {model_config.id}!\n"
f" Expected: {exp:.3f}\n"
f" Actual: {actual:.3f}\n"
f" Relative error: {pos_rel_error:.2%} "
f"(tolerance: {rtol:.2%})"
)
print(
f"\n{model_config.id} [tp={tp_size}, backend={attention_backend}]: "
f"acceptance_length={actual_acceptance_length:.3f}"
f" (expected={expected:.3f}, rel_error={rel_error:.2%})"
)
print(f" Per-position: {[f'{v:.3f}' for v in actual_per_pos]}")
if expected_per_pos:
print(f" Expected: {[f'{v:.3f}' for v in expected_per_pos]}")
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/v1/spec_decode/test_acceptance_length.py",
"license": "Apache License 2.0",
"lines": 271,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/v1/spec_decode/draft_model.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import torch
import torch.nn as nn
from typing_extensions import override
from vllm.config import VllmConfig
from vllm.logger import init_logger
from vllm.model_executor.model_loader import get_model
from vllm.v1.spec_decode.eagle import SpecDecodeBaseProposer
from vllm.v1.spec_decode.utils import create_vllm_config_for_draft_model
logger = init_logger(__name__)
class DraftModelProposer(SpecDecodeBaseProposer):
def __init__(
self,
vllm_config: VllmConfig,
device: torch.device,
runner=None,
):
super().__init__(
vllm_config=vllm_config,
device=device,
pass_hidden_states_to_model=False,
runner=runner,
)
self._raise_if_vocab_size_mismatch()
self._raise_if_draft_tp_mismatch()
def _raise_if_vocab_size_mismatch(self):
self.speculative_config.verify_equal_vocab_size_if_draft_model()
def _raise_if_draft_tp_mismatch(self):
# Note(Tomas Ruiz) If we run the target model with TP > 1 and
# the draft model with TP = 1, then the different TP ranks collide.
# Specifically when all ranks compile the draft model on rank 0
# (because TP=1), then the torch compile cache is overwritten and corrupted.
# We need a mechanism like this: https://github.com/vllm-project/vllm/pull/5414
# To prevent this error, we assert that both TP sizes must be the same.
spec_cfg = self.speculative_config
tgt_tp = spec_cfg.target_parallel_config.tensor_parallel_size
draft_tp = spec_cfg.draft_parallel_config.tensor_parallel_size
if draft_tp != tgt_tp:
raise ValueError(
f"Currently, 'draft_tensor_parallel_size' and 'tensor_parallel_size' "
f"must be the same. Got {draft_tp} and {tgt_tp}. "
"Please pass 'draft_tensor_parallel_size' in the speculative_config."
)
@override
def _get_model(self) -> nn.Module:
# Draft models may be quantized or on different parallelism,
# so we load them with a modified vllm config
from vllm.compilation.backends import set_model_tag
temp_vllm_config = create_vllm_config_for_draft_model(self.vllm_config)
with set_model_tag("draft_model"):
model = get_model(
vllm_config=temp_vllm_config,
prefix="draft_model",
)
return model
@override
def _maybe_share_embeddings(self, target_language_model: nn.Module) -> None:
# Draft models don't share embeddings with the target model
pass
@override
def _maybe_share_lm_head(self, target_language_model: nn.Module) -> None:
# Draft models don't share lm_head with the target model
pass
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/spec_decode/draft_model.py",
"license": "Apache License 2.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/kernels/helion/test_helion_available.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Tests for Helion kernel availability and basic functionality.
This module demonstrates the pattern for testing optional Helion kernels.
Tests in this directory will be skipped if Helion is not installed.
"""
import pytest
from vllm.utils.import_utils import has_helion
# Skip entire module if helion is not available
if not has_helion():
pytest.skip(
"Helion is not installed. Install with: pip install vllm[helion]",
allow_module_level=True,
)
import helion
import helion.language as hl
import torch
def test_helion_kernel_compilation_smoke():
"""Smoke test: compile and run a simple Helion kernel."""
@helion.kernel(autotune_effort="none")
def add_kernel(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
out = torch.empty_like(x)
for tile in hl.tile(x.size()):
out[tile] = x[tile] + y[tile]
return out
# Create test tensors
x = torch.randn(1024, device="cuda", dtype=torch.float32)
y = torch.randn(1024, device="cuda", dtype=torch.float32)
# Run the helion kernel
result = add_kernel(x, y)
# Verify correctness
expected = x + y
assert torch.allclose(result, expected), "Helion kernel output mismatch"
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/kernels/helion/test_helion_available.py",
"license": "Apache License 2.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_executor/models/glm4_moe_lite.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# Copyright 2025 The ZhipuAI Team.
# Copyright 2023 The vLLM team.
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inference-only GLM-4.7-Flash model compatible with HuggingFace weights."""
import typing
from collections.abc import Callable, Iterable
from itertools import islice
from typing import TYPE_CHECKING
import torch
from torch import nn
if TYPE_CHECKING:
from transformers.models.glm4_moe_lite import Glm4MoeLiteConfig
from vllm._aiter_ops import rocm_aiter_ops
from vllm.compilation.decorators import support_torch_compile
from vllm.config import VllmConfig
from vllm.distributed import (
get_pp_group,
)
from vllm.logger import init_logger
from vllm.model_executor.layers.fused_moe import SharedFusedMoE
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.vocab_parallel_embedding import (
ParallelLMHead,
VocabParallelEmbedding,
)
from vllm.model_executor.model_loader.weight_utils import (
default_weight_loader,
maybe_remap_kv_scale_name,
)
from vllm.model_executor.models.deepseek_v2 import (
DeepseekV2Attention,
DeepseekV2MLAAttention,
)
from vllm.model_executor.models.glm4_moe import (
Glm4MixtureOfExperts,
Glm4MoE,
Glm4MoeMLP,
)
from vllm.platforms import current_platform
from vllm.sequence import IntermediateTensors
from .interfaces import SupportsLoRA, SupportsPP
from .utils import (
AutoWeightsLoader,
PPMissingLayer,
is_pp_missing_parameter,
make_empty_intermediate_tensors_factory,
make_layers,
maybe_prefix,
)
logger = init_logger(__name__)
class Glm4MoeLiteMLP(Glm4MoeMLP):
pass
class Glm4MoeLite(Glm4MoE):
pass
class Glm4LiteMixtureOfExperts(Glm4MixtureOfExperts):
pass
class Glm4MoeLiteAttention(DeepseekV2Attention):
pass
class Glm4MoeLiteMLAAttention(DeepseekV2MLAAttention):
pass
class Glm4MoeLiteDecoderLayer(nn.Module):
def __init__(
self,
vllm_config: VllmConfig,
prefix: str,
config: "Glm4MoeLiteConfig | None" = None,
topk_indices_buffer: torch.Tensor | None = None,
) -> None:
super().__init__()
if config is None:
config = vllm_config.model_config.hf_config
model_config = vllm_config.model_config
cache_config = vllm_config.cache_config
quant_config = vllm_config.quant_config
self.hidden_size = config.hidden_size
max_position_embeddings = getattr(config, "max_position_embeddings", 8192)
moe_layer_freq = getattr(config, "moe_layer_freq", 1)
# DecoderLayers are created with `make_layers` which passes the prefix
# with the layer's index.
layer_idx = int(prefix.split(sep=".")[-1])
self.layer_idx = layer_idx
# verify MLA attention specific fields
qk_nope_head_dim = getattr(config, "qk_nope_head_dim", 0)
qk_rope_head_dim = getattr(config, "qk_rope_head_dim", 0)
v_head_dim = getattr(config, "v_head_dim", 0)
kv_lora_rank = getattr(config, "kv_lora_rank", 0)
if model_config.use_mla:
attn_cls = Glm4MoeLiteMLAAttention
else:
attn_cls = Glm4MoeLiteAttention
self.self_attn = attn_cls(
vllm_config=vllm_config,
config=config,
hidden_size=self.hidden_size,
num_heads=config.num_attention_heads,
qk_nope_head_dim=qk_nope_head_dim,
qk_rope_head_dim=qk_rope_head_dim,
v_head_dim=v_head_dim,
q_lora_rank=config.q_lora_rank if hasattr(config, "q_lora_rank") else None,
kv_lora_rank=kv_lora_rank,
max_position_embeddings=max_position_embeddings,
cache_config=cache_config,
quant_config=quant_config,
prefix=f"{prefix}.self_attn",
topk_indices_buffer=topk_indices_buffer,
)
if (
config.n_routed_experts is not None
and layer_idx >= config.first_k_dense_replace
and layer_idx % moe_layer_freq == 0
):
self.mlp = Glm4MoeLite(
config=config,
quant_config=quant_config,
prefix=f"{prefix}.mlp",
)
else:
self.mlp = Glm4MoeLiteMLP(
hidden_size=config.hidden_size,
intermediate_size=config.intermediate_size,
hidden_act=config.hidden_act,
quant_config=quant_config,
prefix=f"{prefix}.mlp",
)
self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = RMSNorm(
config.hidden_size, eps=config.rms_norm_eps
)
self.routed_scaling_factor = getattr(config, "routed_scaling_factor", 1.0)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
residual: torch.Tensor | None,
llama_4_scaling: torch.Tensor | None = None,
) -> torch.Tensor:
# Self Attention
if residual is None:
residual = hidden_states.clone()
hidden_states = self.input_layernorm(hidden_states)
else:
hidden_states, residual = self.input_layernorm(hidden_states, residual)
attn_kwargs = {
"positions": positions,
"hidden_states": hidden_states,
}
attn_kwargs["llama_4_scaling"] = llama_4_scaling
hidden_states = self.self_attn(**attn_kwargs)
hidden_states, residual = self.post_attention_layernorm(hidden_states, residual)
hidden_states = self.mlp(hidden_states)
return hidden_states, residual
@support_torch_compile(
dynamic_arg_dims={
"input_ids": 0,
"positions": -1,
"intermediate_tensors": 0,
"inputs_embeds": 0,
}
)
class Glm4MoeLiteModel(nn.Module):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
self.config = config
self.device = current_platform.device_type
self.vocab_size = config.vocab_size
self.is_v32 = hasattr(config, "index_topk")
if self.is_v32:
topk_tokens = config.index_topk
topk_indices_buffer = torch.empty(
vllm_config.scheduler_config.max_num_batched_tokens,
topk_tokens,
dtype=torch.int32,
device=self.device,
)
else:
topk_indices_buffer = None
if get_pp_group().is_first_rank:
self.embed_tokens = VocabParallelEmbedding(
config.vocab_size,
config.hidden_size,
quant_config=quant_config,
prefix=f"{prefix}.embed_tokens",
)
else:
self.embed_tokens = PPMissingLayer()
self.start_layer, self.end_layer, self.layers = make_layers(
config.num_hidden_layers,
lambda prefix: Glm4MoeLiteDecoderLayer(
vllm_config=vllm_config,
config=config,
prefix=prefix,
topk_indices_buffer=topk_indices_buffer,
),
prefix=f"{prefix}.layers",
)
if get_pp_group().is_last_rank:
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
else:
self.norm = PPMissingLayer()
self.make_empty_intermediate_tensors = make_empty_intermediate_tensors_factory(
["hidden_states", "residual"], config.hidden_size
)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.embed_tokens(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor | IntermediateTensors:
if get_pp_group().is_first_rank:
if inputs_embeds is not None:
hidden_states = inputs_embeds
else:
hidden_states = self.embed_input_ids(input_ids)
residual = None
else:
assert intermediate_tensors is not None
hidden_states = intermediate_tensors["hidden_states"]
residual = intermediate_tensors["residual"]
for layer in islice(self.layers, self.start_layer, self.end_layer):
hidden_states, residual = layer(positions, hidden_states, residual)
if not get_pp_group().is_last_rank:
return IntermediateTensors(
{"hidden_states": hidden_states, "residual": residual}
)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states
def make_empty_intermediate_tensors(
self, batch_size: int, dtype: torch.dtype, device: torch.device
) -> IntermediateTensors:
return IntermediateTensors(
{
"hidden_states": torch.zeros(
(batch_size, self.config.hidden_size), dtype=dtype, device=device
),
"residual": torch.zeros(
(batch_size, self.config.hidden_size), dtype=dtype, device=device
),
}
)
def get_expert_mapping(self) -> list[tuple[str, str, int, str]]:
# Params for weights, fp8 weight scales, fp8 activation scales
# (param_name, weight_name, expert_id, shard_id)
return SharedFusedMoE.make_expert_params_mapping(
self,
ckpt_gate_proj_name="gate_proj",
ckpt_down_proj_name="down_proj",
ckpt_up_proj_name="up_proj",
num_experts=self.config.n_routed_experts,
)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
rocm_aiter_moe_shared_expert_enabled = (
rocm_aiter_ops.is_fusion_moe_shared_experts_enabled()
)
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
mla_params_mapping = [
("fused_qkv_a_proj", "q_a_proj", 0),
("fused_qkv_a_proj", "kv_a_proj_with_mqa", 1),
]
stacked_params_mapping.extend(mla_params_mapping)
# Params for weights, fp8 weight scales, fp8 activation scales
# (param_name, weight_name, expert_id, shard_id)
expert_params_mapping = SharedFusedMoE.make_expert_params_mapping(
self,
ckpt_gate_proj_name="gate_proj",
ckpt_down_proj_name="down_proj",
ckpt_up_proj_name="up_proj",
num_experts=self.config.n_routed_experts
+ (
self.config.n_shared_experts
if rocm_aiter_moe_shared_expert_enabled
else 0
),
)
params_dict = dict(self.named_parameters())
loaded_params: set[str] = set()
for name, loaded_weight in weights:
if "rotary_emb.inv_freq" in name:
continue
spec_layer = get_spec_layer_idx_from_weight_name(self.config, name)
if spec_layer is not None:
continue # skip spec decode layers for main model
is_fusion_moe_shared_experts_layer = (
rocm_aiter_moe_shared_expert_enabled and ("mlp.shared_experts" in name)
)
for param_name, weight_name, shard_id in stacked_params_mapping:
# Skip non-stacked layers and experts (experts handled below).
if weight_name not in name:
continue
# We have mlp.experts[0].gate_proj in the checkpoint.
# Since we handle the experts below in expert_params_mapping,
# we need to skip here BEFORE we update the name, otherwise
# name will be updated to mlp.experts[0].gate_up_proj, which
# will then be updated below in expert_params_mapping
# for mlp.experts[0].gate_gate_up_proj, which breaks load.
if ("mlp.experts." in name) and name not in params_dict:
continue
if is_fusion_moe_shared_experts_layer:
continue
name_mapped = name.replace(weight_name, param_name)
# QKV fusion is optional, fall back to normal
# weight loading if it's not enabled
# if go with fusion option, then update name
if (
param_name == "fused_qkv_a_proj"
) and name_mapped not in params_dict:
continue
else:
name = name_mapped
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
if is_pp_missing_parameter(name, self):
continue
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
break
else:
is_expert_weight = False
# Special handling: when AITER fusion_shared_experts is enabled,
# checkpoints may provide a single widened shared_experts tensor
# without explicit expert indices
# (e.g. ...mlp.shared_experts.gate_proj.weight).
# For models with multiple shared experts, split that tensor
# evenly into per-shared-expert slices and load them into
# appended expert slots mlp.experts.{n_routed_experts + j}.*
# accordingly.
num_chunks = 1
if is_fusion_moe_shared_experts_layer:
num_chunks = getattr(self.config, "n_shared_experts", 1) or 1
# Determine split axis based on op type
# gate/up: ColumnParallel → split along dim 0
# down: RowParallel → split along dim 1
split_dim = 1 if "down_proj.weight" in name else 0
total = loaded_weight.shape[split_dim]
assert total % num_chunks == 0, (
f"Shared expert weight dim {total} "
f"not divisible by num_chunks {num_chunks}"
)
chunk_size = total // num_chunks
for j in range(num_chunks):
chunk_name = name
weight_to_load = loaded_weight
if is_fusion_moe_shared_experts_layer:
if split_dim == 0:
weight_to_load = loaded_weight[
j * chunk_size : (j + 1) * chunk_size, :
]
else:
weight_to_load = loaded_weight[
:, j * chunk_size : (j + 1) * chunk_size
]
# Synthesize an expert-style name so expert mapping
# can route it
chunk_name = name.replace(
"mlp.shared_experts",
f"mlp.experts.{self.config.n_routed_experts + j}",
)
# Use expert_params_mapping to locate the destination
# param and delegate to its expert-aware weight_loader
# with expert_id.
for mapping in expert_params_mapping:
param_name, weight_name, expert_id, shard_id = mapping
if weight_name not in chunk_name:
continue
# Anyway, this is an expert weight and should not be
# attempted to load as other weights later
is_expert_weight = True
# Do not modify `name` since the loop may continue here
# Instead, create a new variable
name_mapped = chunk_name.replace(weight_name, param_name)
if is_pp_missing_parameter(name_mapped, self):
continue
param = params_dict[name_mapped]
# We should ask the weight loader to return success or
# not here since otherwise we may skip experts with
# other available replicas.
weight_loader = typing.cast(
Callable[..., bool], param.weight_loader
)
success = weight_loader(
param,
weight_to_load,
name_mapped,
shard_id=shard_id,
expert_id=expert_id,
return_success=True,
)
if success:
if not is_fusion_moe_shared_experts_layer:
name = name_mapped
else:
loaded_params.add(name_mapped)
break
else:
if is_expert_weight:
# We've checked that this is an expert weight
# However it's not mapped locally to this rank
# So we simply skip it
continue
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
# Remapping the name of FP8 kv-scale.
name = maybe_remap_kv_scale_name(name, params_dict)
if name is None:
continue
if is_pp_missing_parameter(name, self):
continue
param = params_dict[name]
weight_loader = getattr(
param, "weight_loader", default_weight_loader
)
weight_loader(param, loaded_weight)
if not is_fusion_moe_shared_experts_layer:
loaded_params.add(name)
return loaded_params
class Glm4MoeLiteForCausalLM(
nn.Module, SupportsPP, SupportsLoRA, Glm4LiteMixtureOfExperts
):
packed_modules_mapping = {
"gate_up_proj": ["gate_proj", "up_proj"],
}
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
self.config = config
self.quant_config = quant_config
qk_nope_head_dim = getattr(config, "qk_nope_head_dim", 0)
qk_rope_head_dim = getattr(config, "qk_rope_head_dim", 0)
self.use_mha = config.model_type == "deepseek" or all(
dim == 0 for dim in (qk_nope_head_dim, qk_rope_head_dim)
)
if self.use_mha:
self.packed_modules_mapping["qkv_proj"] = ["q_proj", "k_proj", "v_proj"]
# `packed_modules_mapping` needs to be modified before
# initializing DeepseekV2Model, as it is passed inplace to
# quantization config init and may be used to select the
# quant_method for relevant layers during initialization.
self.fuse_qkv_a_proj = (
hasattr(config, "q_lora_rank") and config.q_lora_rank is not None
)
if self.fuse_qkv_a_proj:
self.packed_modules_mapping["fused_qkv_a_proj"] = [
"q_a_proj",
"kv_a_proj_with_mqa",
]
self.model = Glm4MoeLiteModel(
vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")
)
if get_pp_group().is_last_rank:
self.lm_head = ParallelLMHead(
config.vocab_size,
config.hidden_size,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "lm_head"),
)
else:
self.lm_head = PPMissingLayer()
self.logits_processor = LogitsProcessor(config.vocab_size)
self.make_empty_intermediate_tensors = (
self.model.make_empty_intermediate_tensors
)
# Set MoE hyperparameters
self.num_moe_layers = (
self.config.num_hidden_layers - self.config.first_k_dense_replace
)
self.set_moe_parameters()
def set_moe_parameters(self):
self.expert_weights = []
self.num_expert_groups = getattr(self.config, "n_group", 1)
self.moe_layers = []
self.moe_mlp_layers = []
example_moe = None
for layer in self.model.layers:
if isinstance(layer, PPMissingLayer):
continue
assert isinstance(layer, Glm4MoeLiteDecoderLayer)
if isinstance(layer.mlp, Glm4MoeLite):
# Pick last one layer since the first ones may be dense layers.
example_moe = layer.mlp
self.moe_mlp_layers.append(layer.mlp)
self.moe_layers.append(layer.mlp.experts)
self.extract_moe_parameters(example_moe)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.model.embed_input_ids(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor | IntermediateTensors:
hidden_states = self.model(
input_ids, positions, intermediate_tensors, inputs_embeds
)
return hidden_states
def compute_logits(
self,
hidden_states: torch.Tensor,
) -> torch.Tensor | None:
logits = self.logits_processor(self.lm_head, hidden_states)
return logits
def get_expert_mapping(self) -> list[tuple[str, str, int, str]]:
# Params for weights, fp8 weight scales, fp8 activation scales
# (param_name, weight_name, expert_id, shard_id)
return SharedFusedMoE.make_expert_params_mapping(
self,
ckpt_gate_proj_name="gate_proj",
ckpt_down_proj_name="down_proj",
ckpt_up_proj_name="up_proj",
num_experts=self.config.n_routed_experts,
num_redundant_experts=0,
)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
loader = AutoWeightsLoader(self)
return loader.load_weights(weights)
def get_spec_layer_idx_from_weight_name(
config: "Glm4MoeLiteConfig", weight_name: str
) -> int | None:
if hasattr(config, "num_nextn_predict_layers") and (
config.num_nextn_predict_layers > 0
):
layer_idx = config.num_hidden_layers
for i in range(config.num_nextn_predict_layers):
if f"layers.{layer_idx + i}." in weight_name:
return layer_idx + i
return None
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/glm4_moe_lite.py",
"license": "Apache License 2.0",
"lines": 558,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/glm4_moe_lite_mtp.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# Copyright 2025 The ZhipuAI Team.
# Copyright 2023 The vLLM team.
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inference-only GLM-4.7-Flash MTP model compatible with HuggingFace weights."""
import typing
from collections.abc import Callable, Iterable
import torch
import torch.nn as nn
from transformers import PretrainedConfig
from vllm._aiter_ops import rocm_aiter_ops
from vllm.config import VllmConfig
from vllm.model_executor.layers.fused_moe import FusedMoE, SharedFusedMoE
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.vocab_parallel_embedding import (
ParallelLMHead,
VocabParallelEmbedding,
)
from vllm.model_executor.model_loader.weight_utils import (
default_weight_loader,
maybe_remap_kv_scale_name,
)
from vllm.platforms import current_platform
from vllm.sequence import IntermediateTensors
from .glm4_moe_lite import (
Glm4MixtureOfExperts,
Glm4MoeLite,
Glm4MoeLiteDecoderLayer,
get_spec_layer_idx_from_weight_name,
)
from .interfaces import SupportsPP
from .utils import maybe_prefix
class SharedHead(nn.Module):
def __init__(
self,
config: PretrainedConfig,
prefix: str,
quant_config: QuantizationConfig | None = None,
) -> None:
super().__init__()
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.head = ParallelLMHead(
config.vocab_size,
config.hidden_size,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "head"),
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return self.norm(hidden_states)
class Glm4MoeLiteMultiTokenPredictorLayer(nn.Module):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.speculative_config.draft_model_config.hf_config
self.config = config
quant_config = vllm_config.quant_config
self.enorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.hnorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.eh_proj = nn.Linear(config.hidden_size * 2, config.hidden_size, bias=False)
self.device = current_platform.device_type
self.is_v32 = hasattr(config, "index_topk")
if self.is_v32:
topk_tokens = config.index_topk
topk_indices_buffer = torch.empty(
vllm_config.scheduler_config.max_num_batched_tokens,
topk_tokens,
dtype=torch.int32,
device=self.device,
)
else:
topk_indices_buffer = None
self.shared_head = SharedHead(
config=config, prefix=prefix, quant_config=quant_config
)
self.mtp_block = Glm4MoeLiteDecoderLayer(
vllm_config=vllm_config,
prefix=prefix,
config=self.config,
topk_indices_buffer=topk_indices_buffer,
)
def forward(
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
previous_hidden_states: torch.Tensor,
inputs_embeds: torch.Tensor | None = None,
spec_step_index: int = 0,
) -> torch.Tensor:
assert inputs_embeds is not None
# masking inputs at position 0, as not needed by MTP
inputs_embeds[positions == 0] = 0
inputs_embeds = self.enorm(inputs_embeds)
previous_hidden_states = self.hnorm(previous_hidden_states)
hidden_states = self.eh_proj(
torch.cat([inputs_embeds, previous_hidden_states], dim=-1)
)
hidden_states, residual = self.mtp_block(
positions=positions, hidden_states=hidden_states, residual=None
)
hidden_states = residual + hidden_states
return hidden_states
class Glm4MoeLiteMultiTokenPredictor(nn.Module):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
self.mtp_start_layer_idx = config.num_hidden_layers
self.num_mtp_layers = config.num_nextn_predict_layers
# to map the exact layer index from weights
self.layers = torch.nn.ModuleDict(
{
str(idx): Glm4MoeLiteMultiTokenPredictorLayer(
vllm_config=vllm_config,
prefix=f"{prefix}.layers.{idx}",
)
for idx in range(
self.mtp_start_layer_idx,
self.mtp_start_layer_idx + self.num_mtp_layers,
)
}
)
self.embed_tokens = VocabParallelEmbedding(
config.vocab_size,
config.hidden_size,
)
self.logits_processor = LogitsProcessor(config.vocab_size)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.embed_tokens(input_ids)
def forward(
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
previous_hidden_states: torch.Tensor,
inputs_embeds: torch.Tensor | None = None,
spec_step_idx: int = 0,
) -> torch.Tensor:
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
current_step_idx = spec_step_idx % self.num_mtp_layers
return self.layers[str(self.mtp_start_layer_idx + current_step_idx)](
input_ids,
positions,
previous_hidden_states,
inputs_embeds,
current_step_idx,
)
def compute_logits(
self,
hidden_states: torch.Tensor,
spec_step_idx: int = 0,
) -> torch.Tensor:
current_step_idx = spec_step_idx % self.num_mtp_layers
mtp_layer = self.layers[str(self.mtp_start_layer_idx + current_step_idx)]
logits = self.logits_processor(
mtp_layer.shared_head.head, mtp_layer.shared_head(hidden_states)
)
return logits
class Glm4MoeLiteMTP(nn.Module, SupportsPP, Glm4MixtureOfExperts):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
self.config = vllm_config.model_config.hf_config
self.model = Glm4MoeLiteMultiTokenPredictor(
vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")
)
self.expert_weights = []
# Set MoE hyperparameters
self.num_moe_layers = self.config.num_nextn_predict_layers
self.num_expert_groups = self.config.n_group
self.moe_layers: list[FusedMoE] = []
self.moe_mlp_layers: list[Glm4MoeLite] = []
example_moe = None
for layer in self.model.layers.values():
assert isinstance(layer, Glm4MoeLiteMultiTokenPredictorLayer)
layer = layer.mtp_block
assert isinstance(layer, Glm4MoeLiteDecoderLayer)
if isinstance(layer.mlp, Glm4MoeLite):
example_moe = layer.mlp
self.moe_mlp_layers.append(layer.mlp)
self.moe_layers.append(layer.mlp.experts)
self.extract_moe_parameters(example_moe)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.model.embed_input_ids(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
hidden_states: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
spec_step_idx: int = 0,
) -> torch.Tensor:
hidden_states = self.model(
input_ids, positions, hidden_states, inputs_embeds, spec_step_idx
)
return hidden_states
def compute_logits(
self,
hidden_states: torch.Tensor,
spec_step_idx: int = 0,
) -> torch.Tensor | None:
return self.model.compute_logits(hidden_states, spec_step_idx)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
rocm_aiter_moe_shared_expert_enabled = (
rocm_aiter_ops.is_fusion_moe_shared_experts_enabled()
)
stacked_params_mapping = [
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
("fused_qkv_a_proj", "q_a_proj", 0),
("fused_qkv_a_proj", "kv_a_proj_with_mqa", 1),
]
expert_params_mapping = SharedFusedMoE.make_expert_params_mapping(
self,
ckpt_gate_proj_name="gate_proj",
ckpt_down_proj_name="down_proj",
ckpt_up_proj_name="up_proj",
num_experts=self.config.n_routed_experts
+ (
self.config.n_shared_experts
if rocm_aiter_moe_shared_expert_enabled
else 0
),
)
params_dict = dict(self.named_parameters())
loaded_params: set[str] = set()
for name, loaded_weight in weights:
if "rotary_emb.inv_freq" in name:
continue
spec_layer = get_spec_layer_idx_from_weight_name(self.config, name)
if spec_layer is None:
continue
is_fusion_moe_shared_experts_layer = (
rocm_aiter_moe_shared_expert_enabled and ("mlp.shared_experts" in name)
)
name = self._rewrite_spec_layer_name(spec_layer, name)
for param_name, weight_name, shard_id in stacked_params_mapping:
# Skip non-stacked layers and experts (experts handled below).
if weight_name not in name:
continue
# We have mlp.experts[0].gate_proj in the checkpoint.
# Since we handle the experts below in expert_params_mapping,
# we need to skip here BEFORE we update the name, otherwise
# name will be updated to mlp.experts[0].gate_up_proj, which
# will then be updated below in expert_params_mapping
# for mlp.experts[0].gate_gate_up_proj, which breaks load.
if ("mlp.experts." in name) and name not in params_dict:
continue
if is_fusion_moe_shared_experts_layer:
continue
name_mapped = name.replace(weight_name, param_name)
# QKV fusion is optional, fall back to normal
# weight loading if it's not enabled
if (
param_name == "fused_qkv_a_proj"
) and name_mapped not in params_dict:
continue
else:
name = name_mapped
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
break
else:
# Special handling: when AITER fusion_shared_experts is enabled,
# checkpoints may provide a single widened shared_experts tensor
# without explicit expert indices
# (e.g. ...mlp.shared_experts.gate_proj.weight).
# For models with multiple shared experts, split that tensor
# evenly into per-shared-expert slices and load them into
# appended expert slots mlp.experts.{n_routed_experts + j}.*
# accordingly.
num_chunks = 1
if is_fusion_moe_shared_experts_layer:
num_chunks = getattr(self.config, "n_shared_experts", 1) or 1
# Determine split axis based on op type
# gate/up: ColumnParallel → split along dim 0
# down: RowParallel → split along dim 1
split_dim = 1 if "down_proj.weight" in name else 0
total = loaded_weight.shape[split_dim]
assert total % num_chunks == 0, (
f"Shared expert weight dim {total} "
f"not divisible by num_chunks {num_chunks}"
)
chunk_size = total // num_chunks
for j in range(num_chunks):
chunk_name = name
weight_to_load = loaded_weight
if is_fusion_moe_shared_experts_layer:
if split_dim == 0:
weight_to_load = loaded_weight[
j * chunk_size : (j + 1) * chunk_size, :
]
else:
weight_to_load = loaded_weight[
:, j * chunk_size : (j + 1) * chunk_size
]
# Synthesize an expert-style name so expert mapping
# can route it
chunk_name = name.replace(
"mlp.shared_experts",
f"mlp.experts.{self.config.n_routed_experts + j}",
)
# Use expert_params_mapping to locate the destination
# param and delegate to its expert-aware weight_loader
# with expert_id.
is_expert_weight = False
for mapping in expert_params_mapping:
param_name, weight_name, expert_id, shard_id = mapping
if weight_name not in chunk_name:
continue
# Anyway, this is an expert weight and should not be
# attempted to load as other weights later
is_expert_weight = True
# Do not modify `name` since the loop may continue here
# Instead, create a new variable
name_mapped = chunk_name.replace(weight_name, param_name)
param = params_dict[name_mapped]
# We should ask the weight loader to return success or
# not here since otherwise we may skip experts with
# other available replicas.
weight_loader = typing.cast(
Callable[..., bool], param.weight_loader
)
success = weight_loader(
param,
weight_to_load,
name_mapped,
shard_id=shard_id,
expert_id=expert_id,
return_success=True,
)
if success:
if not is_fusion_moe_shared_experts_layer:
name = name_mapped
else:
loaded_params.add(name_mapped)
break
else:
if is_expert_weight:
# We've checked that this is an expert weight
# However it's not mapped locally to this rank
# So we simply skip it
continue
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
name = maybe_remap_kv_scale_name(name, params_dict)
if name is None:
continue
# According to DeepSeek-V3 Technical Report, MTP modules
# shares embedding layer. We only load the first weights.
if (
spec_layer != self.model.mtp_start_layer_idx
and ".layers" not in name
):
continue
param = params_dict[name]
weight_loader = getattr(
param, "weight_loader", default_weight_loader
)
weight_loader(param, loaded_weight)
if not is_fusion_moe_shared_experts_layer:
loaded_params.add(name)
return loaded_params
def _rewrite_spec_layer_name(self, spec_layer: int, name: str) -> str:
"""
Rewrite the weight name to match the format of the original model.
Add .mtp_block for modules in transformer layer block for spec layer
and rename shared layer weights to be top level.
"""
spec_layer_weight_names = [
"embed_tokens",
"enorm",
"hnorm",
"eh_proj",
"shared_head",
]
shared_weight_names = ["embed_tokens"]
spec_layer_weight = False
shared_weight = False
for weight_name in spec_layer_weight_names:
if weight_name in name:
spec_layer_weight = True
if weight_name in shared_weight_names:
shared_weight = True
break
if not spec_layer_weight:
# treat rest weights as weights for transformer layer block
name = name.replace(
f"model.layers.{spec_layer}.", f"model.layers.{spec_layer}.mtp_block."
)
elif shared_weight:
# treat shared weights as top level weights
name = name.replace(f"model.layers.{spec_layer}.", "model.")
return name
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/glm4_moe_lite_mtp.py",
"license": "Apache License 2.0",
"lines": 416,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/entrypoints/openai/test_render.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Tests for the /render endpoints that expose prompt preprocessing."""
import httpx
import pytest
import pytest_asyncio
from ...utils import RemoteOpenAIServer
MODEL_NAME = "hmellor/tiny-random-LlamaForCausalLM"
@pytest.fixture(scope="module")
def server():
args: list[str] = []
with RemoteOpenAIServer(MODEL_NAME, args) as remote_server:
yield remote_server
@pytest_asyncio.fixture
async def client(server):
async with httpx.AsyncClient(
base_url=server.url_for(""), timeout=30.0
) as http_client:
yield http_client
@pytest.mark.asyncio
async def test_completion_render_basic(client):
"""Test basic completion render endpoint."""
# Make request to render endpoint
response = await client.post(
"/v1/completions/render",
json={
"model": MODEL_NAME,
"prompt": "When should a chat-completions handler return an empty string?",
},
)
assert response.status_code == 200
data = response.json()
# Verify response structure
assert isinstance(data, list)
assert len(data) > 0
# Verify first prompt
first_prompt = data[0]
assert "prompt_token_ids" in first_prompt
assert "prompt" in first_prompt
assert isinstance(first_prompt["prompt_token_ids"], list)
assert len(first_prompt["prompt_token_ids"]) > 0
assert isinstance(first_prompt["prompt"], str)
# Verify prompt text is preserved
assert (
"When should a chat-completions handler return an empty string?"
in first_prompt["prompt"]
)
@pytest.mark.asyncio
async def test_chat_completion_render_basic(client):
"""Test basic chat completion render endpoint."""
# Make request to render endpoint
response = await client.post(
"/v1/chat/completions/render",
json={
"model": MODEL_NAME,
"messages": [
{
"role": "user",
"content": (
"Returning an empty string for the prompt may be confusing."
),
}
],
},
)
assert response.status_code == 200
data = response.json()
# Verify response structure - should be [conversation, engine_prompts]
assert isinstance(data, list)
assert len(data) == 2
conversation, engine_prompts = data
# Verify conversation
assert isinstance(conversation, list)
assert len(conversation) > 0
assert conversation[0]["role"] == "user"
assert "empty string" in conversation[0]["content"]
# Verify engine_prompts
assert isinstance(engine_prompts, list)
assert len(engine_prompts) > 0
first_prompt = engine_prompts[0]
assert "prompt_token_ids" in first_prompt
assert "prompt" in first_prompt
assert isinstance(first_prompt["prompt_token_ids"], list)
assert len(first_prompt["prompt_token_ids"]) > 0
# Verify chat template was applied (should have instruction markers)
assert "[INST]" in first_prompt["prompt"]
assert "[/INST]" in first_prompt["prompt"]
# Verify token IDs are correctly preserved as integers
token_ids = first_prompt["prompt_token_ids"]
assert all(isinstance(tid, int) for tid in token_ids)
# Verify BOS token (usually 1 for LLaMA models)
assert token_ids[0] == 1
@pytest.mark.asyncio
async def test_completion_render_multiple_prompts(client):
"""Test completion render with multiple prompts."""
response = await client.post(
"/v1/completions/render",
json={
"model": MODEL_NAME,
"prompt": ["Hello world", "Goodbye world"],
},
)
assert response.status_code == 200
data = response.json()
# Should return two prompts
assert isinstance(data, list)
assert len(data) == 2
# Verify both prompts have required fields
for prompt in data:
assert "prompt_token_ids" in prompt
assert "prompt" in prompt
assert len(prompt["prompt_token_ids"]) > 0
@pytest.mark.asyncio
async def test_chat_completion_render_multi_turn(client):
"""Test chat completion render with multi-turn conversation."""
response = await client.post(
"/v1/chat/completions/render",
json={
"model": MODEL_NAME,
"messages": [
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi there!"},
{"role": "user", "content": "How are you?"},
],
},
)
assert response.status_code == 200
data = response.json()
conversation, engine_prompts = data
# Verify all messages preserved
assert len(conversation) == 3
assert conversation[0]["role"] == "user"
assert conversation[1]["role"] == "assistant"
assert conversation[2]["role"] == "user"
# Verify tokenization occurred
assert len(engine_prompts) > 0
assert len(engine_prompts[0]["prompt_token_ids"]) > 0
@pytest.mark.asyncio
async def test_completion_render_error_invalid_model(client):
"""Test completion render with invalid model returns error."""
response = await client.post(
"/v1/completions/render",
json={
"model": "invalid-model-name",
"prompt": "Hello",
},
)
assert response.status_code == 404
data = response.json()
assert "error" in data
@pytest.mark.asyncio
async def test_chat_completion_render_error_invalid_model(client):
"""Test chat completion render with invalid model returns error."""
response = await client.post(
"/v1/chat/completions/render",
json={
"model": "invalid-model-name",
"messages": [{"role": "user", "content": "Hello"}],
},
)
assert response.status_code == 404
data = response.json()
assert "error" in data
@pytest.mark.asyncio
async def test_completion_render_no_generation(client):
"""Verify render endpoint does not generate text."""
# This test verifies that calling render is fast (no generation)
import time
start = time.perf_counter()
response = await client.post(
"/v1/completions/render",
json={
"model": MODEL_NAME,
"prompt": "Tell me a very long story about " * 10,
},
)
elapsed = time.perf_counter() - start
assert response.status_code == 200
# Render should be fast (< 1 second) since no generation
assert elapsed < 1.0
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/entrypoints/openai/test_render.py",
"license": "Apache License 2.0",
"lines": 180,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/v1/worker/gpu/mm/encoder_runner.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import numpy as np
import torch
from vllm.model_executor.models.interfaces import SupportsMultiModal
from vllm.multimodal.inputs import MultiModalKwargsItem
from vllm.multimodal.utils import group_mm_kwargs_by_modality
from vllm.v1.worker.gpu.mm.encoder_cache import EncoderCache
from vllm.v1.worker.utils import sanity_check_mm_encoder_outputs
class EncoderRunner:
def __init__(
self,
model: SupportsMultiModal,
max_num_tokens: int,
hidden_size: int,
encoder_cache: EncoderCache,
dtype: torch.dtype,
device: torch.device,
):
self.model = model
self.max_num_tokens = max_num_tokens
self.hidden_size = hidden_size
self.encoder_cache = encoder_cache
self.dtype = dtype
self.device = device
self.inputs_embeds = torch.zeros(
max_num_tokens, hidden_size, dtype=dtype, device=device
)
def prepare_mm_inputs(
self, scheduled_encoder_inputs: dict[str, list[int]]
) -> tuple[list[str], list[tuple[str, MultiModalKwargsItem]]]:
mm_hashes: list[str] = []
mm_kwargs: list[tuple[str, MultiModalKwargsItem]] = []
for req_id, encoder_input_ids in scheduled_encoder_inputs.items():
mm_features = self.encoder_cache.mm_features[req_id]
for mm_input_id in encoder_input_ids:
mm_feature = mm_features[mm_input_id]
if mm_feature.data is None:
continue
mm_hashes.append(mm_feature.identifier)
mm_kwargs.append((mm_feature.modality, mm_feature.data))
return mm_hashes, mm_kwargs
@torch.inference_mode()
def execute_mm_encoder(
self,
mm_kwargs: list[tuple[str, MultiModalKwargsItem]],
) -> list[torch.Tensor]:
encoder_outputs: list[torch.Tensor] = []
for modality, num_items, mm_kwargs_group in group_mm_kwargs_by_modality(
mm_kwargs, device=self.device, pin_memory=False
):
curr_group_outputs = self.model.embed_multimodal(**mm_kwargs_group)
sanity_check_mm_encoder_outputs(
curr_group_outputs, expected_num_items=num_items
)
encoder_outputs.extend(curr_group_outputs)
return encoder_outputs
def gather_mm_embeddings(
self,
req_ids: list[str],
total_num_scheduled_tokens: int,
num_scheduled_tokens: np.ndarray,
query_start_loc: np.ndarray,
prefill_lens: np.ndarray,
computed_prefill_lens: np.ndarray,
) -> tuple[list[torch.Tensor], torch.Tensor]:
is_prefilling = (computed_prefill_lens < prefill_lens).tolist()
all_decode = not any(is_prefilling)
if all_decode:
# All decode requests, so no need to gather any embeddings.
return [], torch.zeros(
total_num_scheduled_tokens, dtype=torch.bool, device=self.device
)
query_start = computed_prefill_lens.tolist()
query_end = (computed_prefill_lens + num_scheduled_tokens).tolist()
mm_embeds: list[torch.Tensor] = []
is_mm_embed = torch.zeros(
total_num_scheduled_tokens, dtype=torch.bool, device="cpu", pin_memory=True
)
for i, req_id in enumerate(req_ids):
if not is_prefilling[i]:
# OPTIMIZATION: Skip decode requests.
continue
mm_features = self.encoder_cache.mm_features[req_id]
for mm_feature in mm_features:
pos_info = mm_feature.mm_position
start_pos = pos_info.offset
num_encoder_tokens = pos_info.length
if start_pos >= query_end[i]:
# The encoder output is not needed in this step.
break
if start_pos + num_encoder_tokens <= query_start[i]:
# The encoder output is already processed and stored
# in the decoder's KV cache.
continue
start_idx = max(query_start[i] - start_pos, 0)
end_idx = min(query_end[i] - start_pos, num_encoder_tokens)
assert start_idx < end_idx
curr_embeds_start, curr_embeds_end = (
pos_info.get_embeds_indices_in_range(start_idx, end_idx)
)
# If there are no embeddings in the current range, we skip
# gathering the embeddings.
if curr_embeds_start == curr_embeds_end:
continue
mm_hash = mm_feature.identifier
encoder_output = self.encoder_cache.encoder_outputs.get(mm_hash, None)
assert encoder_output is not None, f"Encoder cache miss for {mm_hash}."
if (is_embed := pos_info.is_embed) is not None:
is_embed = is_embed[start_idx:end_idx]
mm_embeds_item = encoder_output[curr_embeds_start:curr_embeds_end]
else:
mm_embeds_item = encoder_output[start_idx:end_idx]
req_start_pos = query_start_loc[i] + start_pos - query_start[i]
is_mm_embed[req_start_pos + start_idx : req_start_pos + end_idx] = (
True if is_embed is None else is_embed
)
mm_embeds.append(mm_embeds_item)
# Copy the is_mm_embed tensor to the GPU.
is_mm_embed = is_mm_embed.to(device=self.device, non_blocking=True)
return mm_embeds, is_mm_embed
@torch.inference_mode()
def get_inputs_embeds(
self,
input_ids: torch.Tensor,
mm_embeds: list[torch.Tensor],
is_mm_embed: torch.Tensor,
) -> torch.Tensor:
x = self.model.embed_input_ids(
input_ids, multimodal_embeddings=mm_embeds, is_multimodal=is_mm_embed
)
# Copy to the pre-allocated buffer for CUDA graphs.
self.inputs_embeds[: x.shape[0]] = x
return self.inputs_embeds
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/worker/gpu/mm/encoder_runner.py",
"license": "Apache License 2.0",
"lines": 134,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/kernels/moe/test_routing.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Callable
import pytest
import torch
from vllm.distributed.eplb.eplb_state import EplbLayerState
from vllm.model_executor.layers.fused_moe.router.router_factory import (
create_fused_moe_router,
)
from vllm.model_executor.models.llama4 import Llama4MoE
# Test parameters
MK_S = [(32, 256), (64, 512)]
TOP_KS = [2, 4, 6]
NUM_EXPERTS = [8, 16, 64]
def setup_eplb_state(enable_eplb: bool, global_num_experts: int) -> EplbLayerState:
if not enable_eplb:
return EplbLayerState()
# Initialize EPLB state with proper tensors for testing
# For testing purposes, we use a simple 1:1 mapping (no redundant experts)
# expert_load_view: tracks load on each expert (shape: num_experts)
expert_load_view = torch.zeros(global_num_experts, dtype=torch.int32, device="cuda")
# logical_to_physical_map: maps logical experts to physical experts
# Shape: (num_logical_experts, max_slots)
# For testing, use simple 1:1 mapping with single slot per expert
logical_to_physical_map = torch.arange(
global_num_experts, dtype=torch.int64, device="cuda"
).unsqueeze(-1)
# logical_replica_count: number of replicas per logical expert
# Shape: (num_logical_experts,)
# For testing, each logical expert has exactly 1 replica
logical_replica_count = torch.ones(
global_num_experts, dtype=torch.int64, device="cuda"
)
return EplbLayerState(
expert_load_view=expert_load_view,
logical_to_physical_map=logical_to_physical_map,
logical_replica_count=logical_replica_count,
)
def make_test_data(
m: int, k: int, num_experts: int
) -> tuple[torch.Tensor, torch.Tensor]:
hidden_states = torch.randn((m, k), device="cuda") / 10
logits = torch.randn((m, num_experts), device="cuda")
return hidden_states, logits
def make_e_score_correction_bias(
e_score_correction_bias_val: float,
num_experts: int,
) -> torch.Tensor:
# return torch.randn(num_experts, device="cuda") * e_score_correction_bias_val
return torch.full(
(num_experts,), e_score_correction_bias_val, device="cuda", dtype=torch.float32
)
def assert_routing_results_close(
topk_weights: torch.Tensor,
topk_ids: torch.Tensor,
baseline_weights: torch.Tensor,
baseline_ids: torch.Tensor,
rtol: float = 1e-3,
atol: float = 1e-3,
):
"""
Compare routing results, sorting by expert ID first to handle non-deterministic
ordering from sorted=False in topk.
"""
# Sort both results by expert IDs for consistent comparison
sorted_indices_actual = torch.argsort(topk_ids, dim=-1)
sorted_indices_baseline = torch.argsort(baseline_ids.to(topk_ids.dtype), dim=-1)
# Gather the sorted values
topk_ids_sorted = torch.gather(topk_ids, 1, sorted_indices_actual)
topk_weights_sorted = torch.gather(topk_weights, 1, sorted_indices_actual)
baseline_ids_sorted = torch.gather(
baseline_ids.to(topk_ids.dtype), 1, sorted_indices_baseline
)
baseline_weights_sorted = torch.gather(baseline_weights, 1, sorted_indices_baseline)
# Compare
torch.testing.assert_close(topk_ids_sorted, baseline_ids_sorted)
torch.testing.assert_close(
topk_weights_sorted, baseline_weights_sorted, rtol=rtol, atol=atol
)
def baseline_fused_topk(
router_logits: torch.Tensor, top_k: int, renormalize: bool
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Baseline for standard fused top-k routing.
Algorithm:
1. Apply softmax to router logits
2. Select top-k experts
3. Optionally renormalize the weights
"""
scores = torch.softmax(router_logits, dim=-1, dtype=torch.float32)
# Use sorted=False to match vllm implementation (vllm_is_batch_invariant
# defaults to False)
topk_weights, topk_ids = torch.topk(scores, top_k, dim=-1, sorted=False)
if renormalize:
topk_weights = topk_weights / topk_weights.sum(dim=-1, keepdim=True)
return topk_weights.to(torch.float32), topk_ids.to(torch.int32)
def baseline_fused_topk_bias(
router_logits: torch.Tensor,
top_k: int,
renormalize: bool,
e_score_correction_bias: torch.Tensor,
routed_scaling_factor: float,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Baseline for fused top-k with bias correction.
Algorithm:
1. Apply softmax to router logits
2. Add bias to scores for expert selection
3. Select top-k experts using biased scores
4. Get weights from original (unbiased) scores
5. Apply routed scaling factor
6. Optionally renormalize the weights
"""
# Apply softmax to get scores
scores = torch.softmax(router_logits, dim=-1, dtype=torch.float32)
# Add bias for expert selection
scores_for_choice = scores + e_score_correction_bias.unsqueeze(0)
# Select top-k using biased scores (sorted=False to match implementation)
topk_ids = torch.topk(scores_for_choice, k=top_k, dim=-1, sorted=False)[1]
# Get weights from original scores (not biased)
topk_weights = scores.gather(1, topk_ids)
# Renormalize if needed (BEFORE applying scaling factor)
if renormalize:
topk_weights = topk_weights / topk_weights.sum(dim=-1, keepdim=True)
# Apply scaling factor (AFTER renormalization, if applicable)
if routed_scaling_factor != 1.0:
topk_weights *= routed_scaling_factor
return topk_weights.to(torch.float32), topk_ids.to(torch.int32)
def baseline_grouped_topk(
router_logits: torch.Tensor,
top_k: int,
num_expert_group: int,
topk_group: int,
scoring_func: str,
renormalize: bool,
e_score_correction_bias: torch.Tensor | None,
routed_scaling_factor: float,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Baseline for grouped top-k routing (e.g., DeepSeek).
Algorithm:
1. Apply scoring function (softmax or sigmoid)
2. Optionally add bias
3. Select top-k groups based on max scores within each group
4. Mask scores to only include selected groups
5. Select top-k experts from masked scores
6. Apply scaling factor
7. Optionally renormalize
"""
num_token = router_logits.shape[0]
# Apply scoring function
if scoring_func == "softmax":
scores = torch.softmax(router_logits, dim=-1, dtype=torch.float32)
elif scoring_func == "sigmoid":
scores = torch.sigmoid(router_logits.float())
else:
raise ValueError(f"Unsupported scoring function: {scoring_func}")
# Handle bias correction
if e_score_correction_bias is not None:
original_scores = scores
scores = scores + e_score_correction_bias.unsqueeze(0)
# For bias case, use sum of top-2 scores in each group
group_scores = (
scores.view(num_token, num_expert_group, -1).topk(2, dim=-1)[0].sum(dim=-1)
)
else:
# Use max score in each group
group_scores = scores.view(num_token, num_expert_group, -1).max(dim=-1).values
# Select top-k groups
group_idx = torch.topk(group_scores, k=topk_group, dim=-1, sorted=False)[1]
# Create mask for selected groups
group_mask = torch.zeros_like(group_scores)
group_mask.scatter_(1, group_idx, 1)
# Expand mask to all experts
score_mask = (
group_mask.unsqueeze(-1)
.expand(num_token, num_expert_group, scores.shape[-1] // num_expert_group)
.reshape(num_token, -1)
)
# Mask scores (set non-selected to -inf)
tmp_scores = scores.masked_fill(~score_mask.bool(), float("-inf"))
# Select top-k experts
if e_score_correction_bias is not None:
topk_ids = torch.topk(tmp_scores, k=top_k, dim=-1, sorted=False)[1]
topk_weights = original_scores.gather(1, topk_ids)
else:
topk_weights, topk_ids = torch.topk(tmp_scores, k=top_k, dim=-1, sorted=False)
# Renormalize if needed
if renormalize:
topk_weights = topk_weights / topk_weights.sum(dim=-1, keepdim=True)
# Apply scaling factor
if routed_scaling_factor != 1.0:
topk_weights *= routed_scaling_factor
return topk_weights.to(torch.float32), topk_ids.to(torch.int32)
def baseline_custom_llama4(
router_logits: torch.Tensor, top_k: int
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Baseline for Llama4 custom routing.
Algorithm:
1. Select top-k expert indices (without softmax)
2. Apply sigmoid to the selected scores
"""
router_scores, router_indices = torch.topk(router_logits, top_k, dim=-1)
router_scores = torch.sigmoid(router_scores.float())
return router_scores.to(torch.float32), router_indices.to(torch.int32)
@pytest.mark.parametrize("m,k", MK_S)
@pytest.mark.parametrize("top_k", TOP_KS)
@pytest.mark.parametrize("global_num_experts", NUM_EXPERTS)
@pytest.mark.parametrize("renormalize", [False, True])
@pytest.mark.parametrize("enable_eplb", [False, True])
def test_fused_topk(
m: int,
k: int,
top_k: int,
global_num_experts: int,
renormalize: bool,
enable_eplb: bool,
):
if top_k > global_num_experts:
pytest.skip(f"top_k ({top_k}) > global_num_experts ({global_num_experts})")
eplb_state = setup_eplb_state(enable_eplb, global_num_experts)
router = create_fused_moe_router(
top_k=top_k,
global_num_experts=global_num_experts,
renormalize=renormalize,
enable_eplb=enable_eplb,
eplb_state=eplb_state,
)
hidden_states, router_logits = make_test_data(m, k, global_num_experts)
# Get router output
topk_weights, topk_ids = router.select_experts(hidden_states, router_logits)
# Compute baseline
baseline_weights, baseline_ids = baseline_fused_topk(
router_logits, top_k, renormalize
)
# Compare results
assert_routing_results_close(topk_weights, topk_ids, baseline_weights, baseline_ids)
@pytest.mark.parametrize("m,k", MK_S)
@pytest.mark.parametrize("top_k", TOP_KS)
@pytest.mark.parametrize("global_num_experts", NUM_EXPERTS)
@pytest.mark.parametrize("renormalize", [False, True])
@pytest.mark.parametrize("enable_eplb", [False, True])
@pytest.mark.parametrize("e_score_correction_bias_val", [0.9])
@pytest.mark.parametrize("routed_scaling_factor", [1.0, 1.1])
def test_fused_topk_bias(
m: int,
k: int,
top_k: int,
global_num_experts: int,
renormalize: bool,
enable_eplb: bool,
e_score_correction_bias_val: float,
routed_scaling_factor: float,
):
if top_k > global_num_experts:
pytest.skip(f"top_k ({top_k}) > global_num_experts ({global_num_experts})")
eplb_state = setup_eplb_state(enable_eplb, global_num_experts)
e_score_correction_bias = make_e_score_correction_bias(
e_score_correction_bias_val,
global_num_experts,
)
router = create_fused_moe_router(
e_score_correction_bias=e_score_correction_bias,
routed_scaling_factor=routed_scaling_factor,
top_k=top_k,
global_num_experts=global_num_experts,
renormalize=renormalize,
enable_eplb=enable_eplb,
eplb_state=eplb_state,
)
hidden_states, router_logits = make_test_data(m, k, global_num_experts)
# Get router output
topk_weights, topk_ids = router.select_experts(hidden_states, router_logits)
# Compute baseline
baseline_weights, baseline_ids = baseline_fused_topk_bias(
router_logits,
top_k,
renormalize,
e_score_correction_bias,
routed_scaling_factor,
)
# Compare results
assert_routing_results_close(topk_weights, topk_ids, baseline_weights, baseline_ids)
@pytest.mark.parametrize("m,k", MK_S)
@pytest.mark.parametrize("top_k", TOP_KS)
@pytest.mark.parametrize(
"global_num_experts,num_expert_group,topk_group",
[
(64, 8, 4), # 8 groups of 8 experts, select 4 groups
(32, 4, 2), # 4 groups of 8 experts, select 2 groups
],
)
@pytest.mark.parametrize("renormalize", [False, True])
@pytest.mark.parametrize("enable_eplb", [False, True])
@pytest.mark.parametrize("e_score_correction_bias_val", [0.9])
@pytest.mark.parametrize("routed_scaling_factor", [1.0, 1.1])
@pytest.mark.parametrize("scoring_func", ["sigmoid", "softmax"])
def test_grouped_topk(
m: int,
k: int,
top_k: int,
global_num_experts: int,
renormalize: bool,
enable_eplb: bool,
num_expert_group: int,
topk_group: int,
scoring_func: str,
e_score_correction_bias_val: float,
routed_scaling_factor: float,
):
if top_k > global_num_experts:
pytest.skip(f"top_k ({top_k}) > global_num_experts ({global_num_experts})")
eplb_state = setup_eplb_state(enable_eplb, global_num_experts)
e_score_correction_bias = make_e_score_correction_bias(
e_score_correction_bias_val,
global_num_experts,
)
router = create_fused_moe_router(
use_grouped_topk=True,
num_expert_group=num_expert_group,
topk_group=topk_group,
scoring_func=scoring_func,
e_score_correction_bias=e_score_correction_bias,
routed_scaling_factor=routed_scaling_factor,
top_k=top_k,
global_num_experts=global_num_experts,
renormalize=renormalize,
enable_eplb=enable_eplb,
eplb_state=eplb_state,
)
hidden_states, router_logits = make_test_data(m, k, global_num_experts)
# Get router output
topk_weights, topk_ids = router.select_experts(hidden_states, router_logits)
# Compute baseline
baseline_weights, baseline_ids = baseline_grouped_topk(
router_logits,
top_k,
num_expert_group,
topk_group,
scoring_func,
renormalize,
e_score_correction_bias,
routed_scaling_factor,
)
# Compare results
assert_routing_results_close(topk_weights, topk_ids, baseline_weights, baseline_ids)
@pytest.mark.parametrize("m,k", MK_S)
@pytest.mark.parametrize("top_k", TOP_KS)
@pytest.mark.parametrize("global_num_experts", NUM_EXPERTS)
@pytest.mark.parametrize("renormalize", [False, True])
@pytest.mark.parametrize("enable_eplb", [False, True])
@pytest.mark.parametrize("custom_routing_function", [Llama4MoE.custom_routing_function])
def test_custom(
m: int,
k: int,
top_k: int,
global_num_experts: int,
renormalize: bool,
enable_eplb: bool,
custom_routing_function: Callable,
):
if top_k > global_num_experts:
pytest.skip(f"top_k ({top_k}) > global_num_experts ({global_num_experts})")
eplb_state = setup_eplb_state(enable_eplb, global_num_experts)
router = create_fused_moe_router(
top_k=top_k,
global_num_experts=global_num_experts,
custom_routing_function=custom_routing_function,
renormalize=renormalize,
enable_eplb=enable_eplb,
eplb_state=eplb_state,
)
hidden_states, router_logits = make_test_data(m, k, global_num_experts)
# Get router output
topk_weights, topk_ids = router.select_experts(hidden_states, router_logits)
# Compute baseline (Llama4 uses sigmoid)
baseline_weights, baseline_ids = baseline_custom_llama4(router_logits, top_k)
# Compare results
assert_routing_results_close(topk_weights, topk_ids, baseline_weights, baseline_ids)
# TODO: is other test sufficient?
# # See tests/test_routing_simulatator.py
# @pytest.mark.parametrize("m,k", MK_S)
# @pytest.mark.parametrize("top_k", TOP_KS)
# @pytest.mark.parametrize("global_num_experts", NUM_EXPERTS)
# @pytest.mark.parametrize("renormalize", [False, True])
# @pytest.mark.parametrize("enable_eplb", [False, True])
# @pytest.mark.parameterize("strategy", ["uniform_random", "normal_routing"])
# def test_simulated(
# m: int,
# k: int,
# top_k: int,
# global_num_experts: int,
# renormalize: bool,
# enable_eplb: bool,
# strategy: str,
# monkeypatch,
# ):
# eplb_state = setup_eplb_state(enable_eplb)
# monkeypatch.setenv("VLLM_MOE_ROUTING_SIMULATION_STRATEGY", strategy)
# router = create_fused_moe_router(
# top_k=top_k,
# global_num_experts=global_num_experts,
# enable_eplb=enable_eplb,
# eplb_state=eplb_state,
# )
# hidden_states, router_logits = make_test_data(m, k, global_num_experts)
# topk_weights, topk_ids = router.select_experts(hidden_states, router_logits)
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/kernels/moe/test_routing.py",
"license": "Apache License 2.0",
"lines": 408,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/model_executor/layers/fused_moe/router/base_router.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from abc import abstractmethod
from collections.abc import Callable
import torch
from vllm.distributed.eplb.eplb_state import EplbLayerState
from vllm.model_executor.layers.fused_moe.router.fused_moe_router import (
FusedMoERouter,
)
from vllm.platforms import current_platform
if current_platform.is_cuda_alike():
@torch.compile(dynamic=True, backend=current_platform.simple_compile_backend)
def eplb_map_to_physical_and_record(
topk_ids: torch.Tensor,
expert_load_view: torch.Tensor,
logical_to_physical_map: torch.Tensor,
logical_replica_count: torch.Tensor,
) -> torch.Tensor:
"""
Map the logical expert ids to physical expert ids
and record the expert load metrics.
This will select a pseudo-random replica for each logical expert.
Only used for EPLB.
Args:
topk_ids: The logical expert ids.
expert_load_view: The expert load view.
logical_to_physical_map: The logical to physical map.
logical_replica_count: The logical replica count.
Returns:
The physical expert ids.
"""
# 1. Convert the logical expert ids to physical expert ids
# Directly select a random replica for each logical expert
# In case `indices_type` is not `torch.long` or `torch.int`,
# e.g. `torch.uint32` as required by dispatch/combine kernels
topk_ids_long = topk_ids.long()
# Use (token position) modulo (replica count)
# to deterministically choose a replica
replica_count = logical_replica_count[topk_ids_long]
# Flatten-position based index, reshaped back to `topk_ids` shape
pos_indices = torch.arange(
topk_ids.numel(), device=topk_ids.device, dtype=torch.long
).reshape_as(topk_ids)
# Compute pseudo-random indices by modulo
replica_indices = (pos_indices % replica_count).unsqueeze(-1)
physical_ids = (
logical_to_physical_map[topk_ids_long]
.gather(-1, replica_indices)
.squeeze(-1)
)
topk_ids = physical_ids
# 2. Record expert load metrics.
# TODO(bowen): When using `FusedMoEModularKernel`, this
# can be done in a more unified way, since
# `FusedMoEPrepareAndFinalize` will return the expert
# token count, in some cases directly from the kernel.
# However, now there are many code paths not using
# the modular kernel, e.g. calling `fused_experts`,
# so we decide to keep the logic here.
#
# If later refactor moved all the MoE kernel calls
# to the modular kernel, we can move this logic there
# to achieve better efficiency.
# `expert_load_view`: (num_physical_experts,)
# `torch.bincount` is not compilable, so use `scatter_add_` instead.
topk_ids_flatten = topk_ids.flatten()
expert_load_view.scatter_add_(
dim=0,
index=topk_ids_flatten.long(),
src=torch.ones_like(topk_ids_flatten).to(expert_load_view),
)
return topk_ids
else:
def eplb_map_to_physical_and_record(
topk_ids: torch.Tensor,
expert_load_view: torch.Tensor,
logical_to_physical_map: torch.Tensor,
logical_replica_count: torch.Tensor,
) -> torch.Tensor:
# CPU fallback: no EPLB so just return as is
return topk_ids
class BaseRouter(FusedMoERouter):
"""
Base router class that provides common functionality for all router implementations.
This class implements the template method pattern where select_experts() handles
common pre-processing and post-processing, delegating the actual routing logic
to the abstract _compute_routing() method.
"""
def __init__(
self,
top_k: int,
global_num_experts: int,
eplb_state: EplbLayerState,
enable_eplb: bool = False,
# TODO(bnell): Once the MK is constructed at layer init time, we
# can make this a plain value instead of a callback.
indices_type_getter: Callable[[], torch.dtype | None] | None = None,
):
"""
Note: the indices dtype might not be available at router construction
time, so we need to supply a callback to get it at runtime. This is
because the indices type is supplied by modular kernels which are
created after MoE layer/router construction.
"""
super().__init__()
self.top_k = top_k
self.global_num_experts = global_num_experts
self.eplb_state = eplb_state
self.enable_eplb = enable_eplb
self.indices_type_getter = indices_type_getter
self.capture_fn: Callable[[torch.Tensor], None] | None = None
def set_capture_fn(self, capture_fn: Callable[[torch.Tensor], None] | None) -> None:
"""Set a capture callback for logical routed expert IDs."""
self.capture_fn = capture_fn
def _validate_eplb_state(self) -> None:
"""Validate that EPLB state is properly initialized if EPLB is enabled."""
if self.enable_eplb:
if self.eplb_state.expert_load_view is None:
raise ValueError("enable_eplb=True requires expert_load_view != None")
if self.eplb_state.logical_to_physical_map is None:
raise ValueError(
"enable_eplb=True requires logical_to_physical_map != None"
)
if self.eplb_state.logical_replica_count is None:
raise ValueError(
"enable_eplb=True requires logical_replica_count != None"
)
def _get_indices_type(self) -> torch.dtype | None:
"""Get the desired indices dtype from the getter function."""
return (
self.indices_type_getter() if self.indices_type_getter is not None else None
)
def _apply_eplb_mapping(self, topk_ids: torch.Tensor) -> torch.Tensor:
"""Apply EPLB mapping to convert logical expert IDs to physical expert IDs."""
if self.enable_eplb:
assert self.eplb_state.expert_load_view is not None
assert self.eplb_state.logical_to_physical_map is not None
assert self.eplb_state.logical_replica_count is not None
return eplb_map_to_physical_and_record(
topk_ids=topk_ids,
expert_load_view=self.eplb_state.expert_load_view,
logical_to_physical_map=self.eplb_state.logical_to_physical_map,
logical_replica_count=self.eplb_state.logical_replica_count,
)
return topk_ids
def _convert_indices_dtype(
self, topk_ids: torch.Tensor, indices_type: torch.dtype | None
) -> torch.Tensor:
"""Convert topk_ids to the desired dtype if needed."""
if (indices_type is not None) and topk_ids.dtype != indices_type:
topk_ids = topk_ids.to(dtype=indices_type)
assert topk_ids.dtype == indices_type or indices_type is None
return topk_ids
@abstractmethod
def _compute_routing(
self,
hidden_states: torch.Tensor,
router_logits: torch.Tensor,
indices_type: torch.dtype | None,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Compute the actual routing logic.
This method must be implemented by subclasses to provide the specific
routing algorithm (e.g., grouped_topk, fused_topk, custom routing, etc.).
Args:
hidden_states: Input hidden states
router_logits: Router logits for expert selection
indices_type: Desired dtype for expert indices (may be None)
Returns:
tuple of (topk_weights, topk_ids)
"""
raise NotImplementedError
def select_experts(
self,
hidden_states: torch.Tensor,
router_logits: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor]:
"""
Route the input hidden states to the top-k experts based on the
router logits.
This method implements the template method pattern:
1. Validates EPLB state
2. Gets indices type
3. Calls _compute_routing() to get topk_weights and topk_ids
4. Applies EPLB mapping if enabled
5. Converts indices dtype if needed
Returns:
(topk_weights, topk_ids)
(tuple[torch.Tensor, torch.Tensor]):
The weights and expert ids computation result.
**Compatibility**: When EPLB is not enabled, the returned ids are
equivalent to global logical ids, so should be compatible with
plain MoE implementations without redundant experts.
"""
# Step 1: Validate EPLB state
self._validate_eplb_state()
# Step 2: Get indices type.
indices_type = self._get_indices_type()
# Step 3: Compute routing (delegated to subclass)
topk_weights, topk_ids = self._compute_routing(
hidden_states, router_logits, indices_type
)
# Capture logical ids before EPLB mapping.
if self.capture_fn is not None:
self.capture_fn(topk_ids)
# Step 4: Apply EPLB mapping
topk_ids = self._apply_eplb_mapping(topk_ids)
# Step 5: Convert indices dtype
topk_ids = self._convert_indices_dtype(topk_ids, indices_type)
return topk_weights, topk_ids
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/fused_moe/router/base_router.py",
"license": "Apache License 2.0",
"lines": 210,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/fused_moe/router/custom_routing_router.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Callable
import torch
from vllm.distributed.eplb.eplb_state import EplbLayerState
from vllm.model_executor.layers.fused_moe.config import RoutingMethodType
from vllm.model_executor.layers.fused_moe.router.base_router import BaseRouter
class CustomRoutingRouter(BaseRouter):
"""Router using a custom user-provided routing function."""
def __init__(
self,
top_k: int,
global_num_experts: int,
eplb_state: EplbLayerState,
custom_routing_function: Callable,
renormalize: bool = True,
enable_eplb: bool = False,
indices_type_getter: Callable[[], torch.dtype | None] | None = None,
):
super().__init__(
top_k=top_k,
global_num_experts=global_num_experts,
eplb_state=eplb_state,
enable_eplb=enable_eplb,
indices_type_getter=indices_type_getter,
)
self.custom_routing_function = custom_routing_function
self.renormalize = renormalize
@property
def routing_method_type(self) -> RoutingMethodType:
from vllm.model_executor.models.llama4 import Llama4MoE
# NOTE: FLASHINFER_TRTLLM support the Llama4 router.
if self.custom_routing_function == Llama4MoE.custom_routing_function:
return RoutingMethodType.Llama4
return RoutingMethodType.Custom
def _compute_routing(
self,
hidden_states: torch.Tensor,
router_logits: torch.Tensor,
indices_type: torch.dtype | None,
) -> tuple[torch.Tensor, torch.Tensor]:
"""Compute routing using the custom routing function."""
topk_weights, topk_ids = self.custom_routing_function(
hidden_states=hidden_states,
gating_output=router_logits,
topk=self.top_k,
renormalize=self.renormalize,
)
return topk_weights.to(torch.float32), topk_ids.to(
torch.int32 if indices_type is None else indices_type
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/fused_moe/router/custom_routing_router.py",
"license": "Apache License 2.0",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/fused_moe/router/fused_topk_bias_router.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Callable
import torch
import vllm._custom_ops as ops
from vllm._aiter_ops import rocm_aiter_ops
from vllm.distributed.eplb.eplb_state import EplbLayerState
from vllm.model_executor.layers.batch_invariant import (
vllm_is_batch_invariant,
)
from vllm.model_executor.layers.fused_moe.config import (
RoutingMethodType,
get_routing_method_type,
)
from vllm.model_executor.layers.fused_moe.router.base_router import BaseRouter
def vllm_topk_softmax(
topk_weights: torch.Tensor,
topk_indices: torch.Tensor,
token_expert_indices: torch.Tensor,
gating_output: torch.Tensor,
renormalize: bool = False,
e_score_correction_bias: torch.Tensor | None = None,
) -> tuple[torch.Tensor, ...]:
ops.topk_softmax(
topk_weights,
topk_indices,
token_expert_indices,
gating_output,
renormalize,
e_score_correction_bias,
)
return topk_weights, topk_indices
def vllm_topk_sigmoid(
topk_weights: torch.Tensor,
topk_indices: torch.Tensor,
token_expert_indices: torch.Tensor,
gating_output: torch.Tensor,
renormalize: bool = False,
e_score_correction_bias: torch.Tensor | None = None,
) -> tuple[torch.Tensor, ...]:
ops.topk_sigmoid(
topk_weights,
topk_indices,
token_expert_indices,
gating_output,
renormalize,
e_score_correction_bias,
)
return topk_weights, topk_indices
def fused_topk_bias(
hidden_states: torch.Tensor,
gating_output: torch.Tensor,
e_score_correction_bias: torch.Tensor,
topk: int,
renormalize: bool,
scoring_func: str = "softmax",
indices_type: torch.dtype | None = None,
):
if not rocm_aiter_ops.is_fused_moe_enabled():
assert hidden_states.size(0) == gating_output.size(0), (
"Number of tokens mismatch"
)
M, _ = hidden_states.size()
topk_weights = torch.empty(
M, topk, dtype=torch.float32, device=hidden_states.device
)
topk_ids = torch.empty(
M,
topk,
dtype=torch.int32 if indices_type is None else indices_type,
device=hidden_states.device,
)
token_expert_indices = torch.empty(
M, topk, dtype=torch.int32, device=hidden_states.device
)
if scoring_func == "softmax":
topk_weights, topk_ids = vllm_topk_softmax(
topk_weights,
topk_ids,
token_expert_indices,
gating_output,
renormalize,
e_score_correction_bias,
)
return topk_weights, topk_ids
elif scoring_func == "sigmoid":
topk_weights, topk_ids = vllm_topk_sigmoid(
topk_weights,
topk_ids,
token_expert_indices,
gating_output,
renormalize,
e_score_correction_bias,
)
return topk_weights, topk_ids
else:
raise ValueError(f"Unsupported scoring function: {scoring_func}")
n_routed_experts = gating_output.shape[-1]
if scoring_func == "softmax":
scores = gating_output.softmax(dim=-1)
elif scoring_func == "sigmoid":
scores = gating_output.sigmoid()
else:
raise ValueError(f"Unsupported scoring function: {scoring_func}")
scores_for_choice = scores.view(
-1, n_routed_experts
) + e_score_correction_bias.unsqueeze(0)
# For batch invariance, use sorted=True to ensure deterministic expert selection
use_sorted = vllm_is_batch_invariant()
topk_indices = torch.topk(scores_for_choice, k=topk, dim=-1, sorted=use_sorted)[1]
topk_weights = scores.gather(1, topk_indices)
if renormalize:
topk_weights = topk_weights / topk_weights.sum(dim=-1, keepdim=True)
return topk_weights.to(torch.float32), topk_indices.to(
torch.int32 if indices_type is None else indices_type
)
class FusedTopKBiasRouter(BaseRouter):
"""Router using fused top-k with e_score_correction_bias."""
def __init__(
self,
top_k: int,
global_num_experts: int,
eplb_state: EplbLayerState,
e_score_correction_bias: torch.Tensor,
scoring_func: str,
renormalize: bool = True,
routed_scaling_factor: float = 1.0,
enable_eplb: bool = False,
indices_type_getter: Callable[[], torch.dtype | None] | None = None,
):
super().__init__(
top_k=top_k,
global_num_experts=global_num_experts,
eplb_state=eplb_state,
enable_eplb=enable_eplb,
indices_type_getter=indices_type_getter,
)
self.e_score_correction_bias = e_score_correction_bias
self.renormalize = renormalize
self.scoring_func = scoring_func
self.routed_scaling_factor = routed_scaling_factor
@property
def routing_method_type(self) -> RoutingMethodType:
return get_routing_method_type(
scoring_func=self.scoring_func,
top_k=self.top_k,
renormalize=self.renormalize,
num_expert_group=None,
has_e_score_bias=True,
)
def _compute_routing(
self,
hidden_states: torch.Tensor,
router_logits: torch.Tensor,
indices_type: torch.dtype | None,
) -> tuple[torch.Tensor, torch.Tensor]:
"""Compute routing using fused top-k with bias."""
topk_weights, topk_ids = fused_topk_bias(
hidden_states=hidden_states,
gating_output=router_logits,
e_score_correction_bias=self.e_score_correction_bias.data,
topk=self.top_k,
renormalize=self.renormalize,
scoring_func=self.scoring_func,
indices_type=indices_type,
)
if self.routed_scaling_factor != 1.0:
topk_weights *= self.routed_scaling_factor
return topk_weights, topk_ids
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/fused_moe/router/fused_topk_bias_router.py",
"license": "Apache License 2.0",
"lines": 169,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/fused_moe/router/fused_topk_router.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Callable
import torch
import vllm._custom_ops as ops
from vllm._aiter_ops import rocm_aiter_ops
from vllm.distributed.eplb.eplb_state import EplbLayerState
from vllm.model_executor.layers.fused_moe.config import (
RoutingMethodType,
get_routing_method_type,
)
from vllm.model_executor.layers.fused_moe.router.base_router import BaseRouter
def vllm_topk_softmax(
topk_weights: torch.Tensor,
topk_indices: torch.Tensor,
token_expert_indices: torch.Tensor,
gating_output: torch.Tensor,
renormalize: bool = False,
) -> tuple[torch.Tensor, ...]:
ops.topk_softmax(
topk_weights,
topk_indices,
token_expert_indices,
gating_output,
renormalize,
)
return topk_weights, topk_indices
def vllm_topk_sigmoid(
topk_weights: torch.Tensor,
topk_indices: torch.Tensor,
token_expert_indices: torch.Tensor,
gating_output: torch.Tensor,
renormalize: bool = False,
) -> tuple[torch.Tensor, ...]:
ops.topk_sigmoid(
topk_weights,
topk_indices,
token_expert_indices,
gating_output,
renormalize,
)
return topk_weights, topk_indices
def dispatch_topk_softmax_func(
use_rocm_aiter: bool = False,
) -> Callable[..., tuple[torch.Tensor, ...]]:
if use_rocm_aiter:
return rocm_aiter_ops.topk_softmax
return vllm_topk_softmax
def dispatch_topk_sigmoid_func(
use_rocm_aiter: bool = False,
) -> Callable[..., tuple[torch.Tensor, ...]]:
if use_rocm_aiter:
return rocm_aiter_ops.topk_sigmoid
return vllm_topk_sigmoid
def fused_topk(
hidden_states: torch.Tensor,
gating_output: torch.Tensor,
topk: int,
renormalize: bool,
indices_type: torch.dtype | None = None,
scoring_func: str = "softmax",
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
assert hidden_states.size(0) == gating_output.size(0), "Number of tokens mismatch"
M, _ = hidden_states.size()
topk_weights = torch.empty(
M, topk, dtype=torch.float32, device=hidden_states.device
)
topk_ids = torch.empty(
M,
topk,
dtype=torch.int32 if indices_type is None else indices_type,
device=hidden_states.device,
)
token_expert_indices = torch.empty(
M, topk, dtype=torch.int32, device=hidden_states.device
)
if scoring_func == "softmax":
topk_func = dispatch_topk_softmax_func(
use_rocm_aiter=rocm_aiter_ops.is_fused_moe_enabled()
)
topk_weights, topk_ids = topk_func(
topk_weights, topk_ids, token_expert_indices, gating_output, renormalize
)
return topk_weights, topk_ids, token_expert_indices
elif scoring_func == "sigmoid":
topk_func = dispatch_topk_sigmoid_func(
use_rocm_aiter=rocm_aiter_ops.is_fused_moe_enabled()
)
topk_weights, topk_ids = topk_func(
topk_weights, topk_ids, token_expert_indices, gating_output, renormalize
)
return topk_weights, topk_ids, token_expert_indices
else:
raise ValueError(f"Unsupported scoring function: {scoring_func}")
class FusedTopKRouter(BaseRouter):
"""Default router using standard fused top-k routing."""
def __init__(
self,
top_k: int,
global_num_experts: int,
eplb_state: EplbLayerState,
scoring_func: str = "softmax",
renormalize: bool = True,
enable_eplb: bool = False,
indices_type_getter: Callable[[], torch.dtype | None] | None = None,
):
super().__init__(
top_k=top_k,
global_num_experts=global_num_experts,
eplb_state=eplb_state,
enable_eplb=enable_eplb,
indices_type_getter=indices_type_getter,
)
self.renormalize = renormalize
self.scoring_func = scoring_func
@property
def routing_method_type(self) -> RoutingMethodType:
return get_routing_method_type(
scoring_func=self.scoring_func,
top_k=self.top_k,
renormalize=self.renormalize,
num_expert_group=None,
has_e_score_bias=False,
)
def _compute_routing(
self,
hidden_states: torch.Tensor,
router_logits: torch.Tensor,
indices_type: torch.dtype | None,
) -> tuple[torch.Tensor, torch.Tensor]:
"""Compute routing using standard fused top-k."""
topk_weights, topk_ids, token_expert_indices = fused_topk(
hidden_states=hidden_states,
gating_output=router_logits,
topk=self.top_k,
renormalize=self.renormalize,
indices_type=indices_type,
scoring_func=self.scoring_func,
)
return topk_weights, topk_ids
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/fused_moe/router/fused_topk_router.py",
"license": "Apache License 2.0",
"lines": 140,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/fused_moe/router/grouped_topk_router.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Callable
from functools import partial
import torch
from vllm import _custom_ops as ops
from vllm import envs as envs
from vllm._aiter_ops import rocm_aiter_ops
from vllm.distributed.eplb.eplb_state import EplbLayerState
from vllm.model_executor.custom_op import CustomOp
from vllm.model_executor.layers.batch_invariant import (
vllm_is_batch_invariant,
)
from vllm.model_executor.layers.fused_moe.config import (
RoutingMethodType,
get_routing_method_type,
)
from vllm.model_executor.layers.fused_moe.rocm_aiter_fused_moe import (
rocm_aiter_grouped_topk,
)
from vllm.model_executor.layers.fused_moe.router.base_router import BaseRouter
from vllm.model_executor.layers.fused_moe.router.fused_topk_bias_router import (
fused_topk_bias,
)
from vllm.model_executor.layers.fused_moe.router.fused_topk_router import fused_topk
from vllm.model_executor.utils import maybe_disable_graph_partition
from vllm.platforms import current_platform
def fused_grouped_topk(
hidden_states: torch.Tensor,
gating_output: torch.Tensor,
topk: int,
renormalize: bool,
e_score_correction_bias: torch.Tensor,
num_expert_group: int = 0,
topk_group: int = 0,
scoring_func: str = "softmax",
routed_scaling_factor: float = 1.0,
) -> tuple[torch.Tensor, torch.Tensor]:
assert hidden_states.size(0) == gating_output.size(0), "Number of tokens mismatch"
if scoring_func == "sigmoid":
# Fully fused kernel path for sigmoid
topk_values, topk_indices = ops.grouped_topk(
gating_output, # raw logits
num_expert_group,
topk_group,
topk,
renormalize,
routed_scaling_factor,
e_score_correction_bias,
1, # scoring_func=1 for sigmoid
)
elif scoring_func == "softmax":
# Apply softmax in Python, then use fused kernel
# TODO: Add support for softmax in kernel
scores = torch.softmax(gating_output, dim=-1)
topk_values, topk_indices = ops.grouped_topk(
scores, # pre-computed scores
num_expert_group,
topk_group,
topk,
renormalize,
routed_scaling_factor,
e_score_correction_bias,
0, # scoring_func=0 (no activation, scores already computed)
)
else:
raise ValueError(f"Unsupported scoring function: {scoring_func}")
# Fused kernel outputs float32 values and int32 indices directly
return topk_values, topk_indices
# This is used by the Deepseek-V2 and Deepseek-V3 model
@torch.compile(
dynamic=True,
backend=current_platform.simple_compile_backend,
options=maybe_disable_graph_partition(current_platform.simple_compile_backend),
)
def grouped_topk(
hidden_states: torch.Tensor,
gating_output: torch.Tensor,
topk: int,
renormalize: bool,
num_expert_group: int = 0,
topk_group: int = 0,
scoring_func: str = "softmax",
routed_scaling_factor: float = 1.0,
e_score_correction_bias: torch.Tensor | None = None,
) -> tuple[torch.Tensor, torch.Tensor]:
if (
envs.VLLM_USE_FUSED_MOE_GROUPED_TOPK
and current_platform.is_cuda()
and num_expert_group <= 32
and topk <= 32
and e_score_correction_bias is not None
):
return fused_grouped_topk(
hidden_states=hidden_states,
gating_output=gating_output,
topk=topk,
renormalize=renormalize,
e_score_correction_bias=e_score_correction_bias,
num_expert_group=num_expert_group,
topk_group=topk_group,
scoring_func=scoring_func,
routed_scaling_factor=routed_scaling_factor,
)
assert hidden_states.size(0) == gating_output.size(0), "Number of tokens mismatch"
if scoring_func == "softmax":
scores = torch.softmax(gating_output, dim=-1)
elif scoring_func == "sigmoid":
scores = gating_output.sigmoid()
else:
raise ValueError(f"Unsupported scoring function: {scoring_func}")
num_token = scores.size(0)
if e_score_correction_bias is not None:
# Store original scores before applying correction bias. We use biased
# scores for expert selection but original scores for routing weights
original_scores = scores
scores = scores + e_score_correction_bias.unsqueeze(0)
group_scores = (
scores.view(num_token, num_expert_group, -1).topk(2, dim=-1)[0].sum(dim=-1)
)
else:
group_scores = (
scores.view(num_token, num_expert_group, -1).max(dim=-1).values
) # [n, n_group]
# For batch invariance, use sorted=True to ensure deterministic expert selection
use_sorted = vllm_is_batch_invariant()
group_idx = torch.topk(group_scores, k=topk_group, dim=-1, sorted=use_sorted)[
1
] # [n, top_k_group]
group_mask = torch.zeros_like(group_scores) # [n, n_group]
group_mask.scatter_(1, group_idx, 1) # [n, n_group]
score_mask = (
group_mask.unsqueeze(-1)
.expand(num_token, num_expert_group, scores.size(-1) // num_expert_group)
.reshape(num_token, -1)
) # [n, e]
tmp_scores = scores.masked_fill(~score_mask.bool(), float("-inf")) # [n, e]
if e_score_correction_bias is not None:
topk_ids = torch.topk(tmp_scores, k=topk, dim=-1, sorted=use_sorted)[1]
# Use original unbiased scores for the routing weights
topk_weights = original_scores.gather(1, topk_ids)
else:
topk_weights, topk_ids = torch.topk(
tmp_scores, k=topk, dim=-1, sorted=use_sorted
)
if renormalize:
topk_weights = topk_weights / topk_weights.sum(dim=-1, keepdim=True)
if routed_scaling_factor != 1.0:
topk_weights = topk_weights * routed_scaling_factor
return topk_weights.to(torch.float32), topk_ids.to(torch.int32)
# --8<-- [start:grouped_topk]
@CustomOp.register("grouped_topk")
class GroupedTopk(CustomOp):
"""GroupedTopk used by the Deepseek-V2 and Deepseek-V3 model."""
# --8<-- [end:grouped_topk]
def __init__(
self,
topk: int,
renormalize: bool,
num_expert_group: int = 0,
topk_group: int = 0,
scoring_func: str = "softmax",
routed_scaling_factor: float = 1.0,
num_fused_shared_experts: int = 0,
) -> None:
super().__init__()
self.native_impl = grouped_topk
self.topk = topk
self.renormalize = renormalize
self.num_expert_group = num_expert_group
self.topk_group = topk_group
self.scoring_func = scoring_func
self.routed_scaling_factor = routed_scaling_factor
self.num_fused_shared_experts = num_fused_shared_experts
def forward_native(
self,
hidden_states: torch.Tensor,
gating_output: torch.Tensor,
e_score_correction_bias: torch.Tensor | None = None,
) -> tuple[torch.Tensor, torch.Tensor]:
return self.native_impl(
hidden_states,
gating_output,
self.topk,
self.renormalize,
self.num_expert_group,
self.topk_group,
self.scoring_func,
self.routed_scaling_factor,
e_score_correction_bias,
)
def forward_cuda(
self,
hidden_states: torch.Tensor,
gating_output: torch.Tensor,
e_score_correction_bias: torch.Tensor | None = None,
) -> tuple[torch.Tensor, torch.Tensor]:
return self.forward_native(
hidden_states, gating_output, e_score_correction_bias
)
def forward_hip(
self,
hidden_states: torch.Tensor,
gating_output: torch.Tensor,
e_score_correction_bias: torch.Tensor | None = None,
) -> tuple[torch.Tensor, torch.Tensor]:
if rocm_aiter_ops.is_fused_moe_enabled():
if not rocm_aiter_ops.is_fusion_moe_shared_experts_enabled():
assert self.num_fused_shared_experts == 0
return rocm_aiter_grouped_topk(
hidden_states,
gating_output,
self.topk,
self.renormalize,
self.num_expert_group,
self.topk_group,
self.scoring_func,
self.routed_scaling_factor,
e_score_correction_bias,
self.num_fused_shared_experts,
)
else:
return self.forward_native(
hidden_states, gating_output, e_score_correction_bias
)
class GroupedTopKRouter(BaseRouter):
"""Router using grouped top-k routing (e.g., DeepSeekV2/V3)."""
def __init__(
self,
top_k: int,
global_num_experts: int,
eplb_state: EplbLayerState,
num_expert_group: int,
topk_group: int,
renormalize: bool = True,
scoring_func: str = "softmax",
routed_scaling_factor: float = 1.0,
e_score_correction_bias: torch.Tensor | None = None,
num_fused_shared_experts: int = 0,
enable_eplb: bool = False,
indices_type_getter: Callable[[], torch.dtype | None] | None = None,
):
super().__init__(
top_k=top_k,
global_num_experts=global_num_experts,
eplb_state=eplb_state,
enable_eplb=enable_eplb,
indices_type_getter=indices_type_getter,
)
self.num_expert_group = num_expert_group
self.topk_group = topk_group
self.renormalize = renormalize
self.scoring_func = scoring_func
self.routed_scaling_factor = routed_scaling_factor
self.e_score_correction_bias = e_score_correction_bias
self.num_fused_shared_experts = num_fused_shared_experts
@property
def routing_method_type(self) -> RoutingMethodType:
return get_routing_method_type(
scoring_func=self.scoring_func,
top_k=self.top_k,
renormalize=self.renormalize,
num_expert_group=self.num_expert_group,
has_e_score_bias=self.e_score_correction_bias is not None,
)
def _compute_routing(
self,
hidden_states: torch.Tensor,
router_logits: torch.Tensor,
indices_type: torch.dtype | None,
) -> tuple[torch.Tensor, torch.Tensor]:
"""Compute routing using grouped top-k."""
def valid_grouping() -> bool:
# Check if num_experts is greater than num_expert_group
# and is divisible by num_expert_group
num_experts = router_logits.shape[-1]
if num_experts <= self.num_expert_group:
return False
return num_experts % self.num_expert_group == 0
if not valid_grouping():
if self.e_score_correction_bias is not None:
topk_weights, topk_ids = fused_topk_bias(
hidden_states=hidden_states,
gating_output=router_logits,
e_score_correction_bias=self.e_score_correction_bias.data,
topk=self.top_k,
renormalize=self.renormalize,
)
if self.routed_scaling_factor != 1.0:
topk_weights *= self.routed_scaling_factor
else:
topk_weights, topk_ids, token_expert_indices = fused_topk(
hidden_states=hidden_states,
gating_output=router_logits,
topk=self.top_k,
renormalize=self.renormalize,
indices_type=indices_type,
)
return topk_weights, topk_ids
# Select grouped_topk implementation
if rocm_aiter_ops.is_fused_moe_enabled():
if not rocm_aiter_ops.is_fusion_moe_shared_experts_enabled():
assert self.num_fused_shared_experts == 0
grouped_topk_impl = partial(
rocm_aiter_grouped_topk,
num_fused_shared_experts=self.num_fused_shared_experts,
)
else:
grouped_topk_impl = grouped_topk
topk_weights, topk_ids = grouped_topk_impl(
hidden_states=hidden_states,
gating_output=router_logits,
topk=self.top_k,
renormalize=self.renormalize,
num_expert_group=self.num_expert_group,
topk_group=self.topk_group,
scoring_func=self.scoring_func,
routed_scaling_factor=self.routed_scaling_factor,
e_score_correction_bias=self.e_score_correction_bias,
)
return topk_weights, topk_ids
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/fused_moe/router/grouped_topk_router.py",
"license": "Apache License 2.0",
"lines": 321,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/fused_moe/router/router_factory.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import Callable
import torch
import vllm.envs as envs
from vllm.distributed.eplb.eplb_state import EplbLayerState
from vllm.model_executor.layers.fused_moe.config import RoutingMethodType
from vllm.model_executor.layers.fused_moe.router.custom_routing_router import (
CustomRoutingRouter,
)
from vllm.model_executor.layers.fused_moe.router.fused_moe_router import (
FusedMoERouter,
)
from vllm.model_executor.layers.fused_moe.router.fused_topk_bias_router import (
FusedTopKBiasRouter,
)
from vllm.model_executor.layers.fused_moe.router.fused_topk_router import (
FusedTopKRouter,
)
from vllm.model_executor.layers.fused_moe.router.grouped_topk_router import (
GroupedTopKRouter,
)
from vllm.model_executor.layers.fused_moe.router.routing_simulator_router import (
RoutingSimulatorRouter,
)
EMPTY_EPLB_STATE: EplbLayerState = EplbLayerState()
def create_fused_moe_router(
# common parameters
top_k: int,
global_num_experts: int,
renormalize: bool = True,
indices_type_getter: Callable[[], torch.dtype | None] | None = None,
# grouped topk parameters
use_grouped_topk: bool = False,
num_expert_group: int | None = None,
topk_group: int | None = None,
scoring_func: str = "softmax",
num_fused_shared_experts: int = 0,
# grouped topk + fused topk bias parameters
routed_scaling_factor: float = 1.0,
e_score_correction_bias: torch.Tensor | None = None,
# custom routing paramaters
custom_routing_function: Callable | None = None,
# eplb parameters
enable_eplb: bool = False,
eplb_state: EplbLayerState = EMPTY_EPLB_STATE,
) -> FusedMoERouter:
"""
Factory function to create the appropriate FusedMoERouter subclass based on
the provided parameters.
The selection logic follows this priority order:
1. RoutingSimulatorRouter - if VLLM_MOE_ROUTING_SIMULATION_STRATEGY env var is set
2. GroupedTopKRouter - if use_grouped_topk is True
3. CustomRoutingRouter - if custom_routing_function is not None
4. FusedTopKBiasRouter - if e_score_correction_bias is not None
5. FusedTopKRouter - default fallback
Common arguments:
top_k: Number of experts to select per token
global_num_experts: Total number of experts in the model
renormalize: Whether to renormalize the routing weights
indices_type_getter: Function to get the desired indices dtype
routing_method_type: Optional explicit routing method type
Grouped topk arguments:
use_grouped_topk: Whether to use grouped top-k routing
num_expert_group: Number of expert groups (for grouped routing)
topk_group: Top-k within each group (for grouped routing)
scoring_func: Scoring function to use ("softmax" or "sigmoid")
num_fused_shared_experts: Number of fused shared experts (for ROCm AITER)
Grouped topk and fused topk bias arguments:
routed_scaling_factor: Scaling factor for routed weights
e_score_correction_bias: Optional bias correction for expert scores
Custom routing arguments:
custom_routing_function: Optional custom routing function
EPLB arguments:
enable_eplb: Whether EPLB is enabled
eplb_state: EPLB (Expert Parallelism Load Balancing) state
Returns:
An instance of the appropriate FusedMoERouter subclass
"""
routing_strategy = envs.VLLM_MOE_ROUTING_SIMULATION_STRATEGY
if routing_strategy != "":
return RoutingSimulatorRouter(
top_k=top_k,
global_num_experts=global_num_experts,
eplb_state=eplb_state,
enable_eplb=enable_eplb,
indices_type_getter=indices_type_getter,
)
if use_grouped_topk:
assert custom_routing_function is None
if num_expert_group is None or topk_group is None:
raise ValueError(
"num_expert_group and topk_group must be provided when "
"use_grouped_topk is True"
)
grouped_topk_router = GroupedTopKRouter(
top_k=top_k,
global_num_experts=global_num_experts,
eplb_state=eplb_state,
num_expert_group=num_expert_group,
topk_group=topk_group,
renormalize=renormalize,
scoring_func=scoring_func,
routed_scaling_factor=routed_scaling_factor,
e_score_correction_bias=e_score_correction_bias,
num_fused_shared_experts=num_fused_shared_experts,
enable_eplb=enable_eplb,
indices_type_getter=indices_type_getter,
)
if (
grouped_topk_router.routing_method_type != RoutingMethodType.Unspecified
or num_expert_group > 1
or topk_group > 1
):
return grouped_topk_router
# If routing_method for GroupedTopKRouter is Unspecified and there is only
# one group, fallback to standard top-k routing
use_grouped_topk = False
num_expert_group = None
topk_group = None
if custom_routing_function is not None:
return CustomRoutingRouter(
top_k=top_k,
global_num_experts=global_num_experts,
eplb_state=eplb_state,
custom_routing_function=custom_routing_function,
renormalize=renormalize,
enable_eplb=enable_eplb,
indices_type_getter=indices_type_getter,
)
if e_score_correction_bias is not None:
return FusedTopKBiasRouter(
top_k=top_k,
global_num_experts=global_num_experts,
eplb_state=eplb_state,
e_score_correction_bias=e_score_correction_bias,
scoring_func=scoring_func,
renormalize=renormalize,
routed_scaling_factor=routed_scaling_factor,
enable_eplb=enable_eplb,
indices_type_getter=indices_type_getter,
)
return FusedTopKRouter(
top_k=top_k,
global_num_experts=global_num_experts,
eplb_state=eplb_state,
renormalize=renormalize,
scoring_func=scoring_func,
enable_eplb=enable_eplb,
indices_type_getter=indices_type_getter,
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/fused_moe/router/router_factory.py",
"license": "Apache License 2.0",
"lines": 151,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/step1.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Shared Step decoder blocks and the Step1 text model."""
from __future__ import annotations
import math
from collections.abc import Iterable
import torch
from torch import nn
from vllm.config import CacheConfig, VllmConfig
from vllm.distributed import (
get_pp_group,
get_tensor_model_parallel_rank,
get_tensor_model_parallel_world_size,
)
from vllm.model_executor.layers.activation import SiluAndMul
from vllm.model_executor.layers.attention import Attention
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.linear import (
MergedColumnParallelLinear,
QKVParallelLinear,
RowParallelLinear,
)
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.vocab_parallel_embedding import (
ParallelLMHead,
VocabParallelEmbedding,
)
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
from vllm.model_executor.models.interfaces import SupportsPP
from vllm.model_executor.models.utils import (
AutoWeightsLoader,
PPMissingLayer,
is_pp_missing_parameter,
make_empty_intermediate_tensors_factory,
make_layers,
maybe_prefix,
)
from vllm.sequence import IntermediateTensors
from vllm.v1.attention.backend import AttentionType
STEP_PACKED_MODULES_MAPPING = {
"qkv_proj": ["q_proj", "k_proj", "v_proj"],
"gate_up_proj": ["gate_proj", "up_proj"],
}
def _get_step_alibi_slopes(total_num_heads: int) -> torch.Tensor:
"""Reference ALiBi slopes used by Step models."""
closest_power_of_2 = 2 ** math.floor(math.log2(total_num_heads))
base = torch.tensor(
2 ** (-8.0 / closest_power_of_2),
dtype=torch.float32,
)
slopes = torch.pow(
base,
torch.arange(1, 1 + closest_power_of_2, dtype=torch.int32),
)
if closest_power_of_2 != total_num_heads:
extra_base = torch.tensor(
2 ** (-4.0 / closest_power_of_2),
dtype=torch.float32,
)
num_remaining_heads = total_num_heads - closest_power_of_2
extra_powers = torch.arange(
1,
1 + 2 * num_remaining_heads,
2,
dtype=torch.int32,
)
slopes = torch.cat(
[slopes, torch.pow(extra_base, extra_powers)],
dim=0,
)
return slopes
class StepAttention(nn.Module):
def __init__(
self,
config,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
):
super().__init__()
self.hidden_size = config.hidden_size
tp_size = get_tensor_model_parallel_world_size()
self.total_num_heads = config.num_attention_heads
assert self.total_num_heads % tp_size == 0
self.num_heads = self.total_num_heads // tp_size
self.head_dim = self.hidden_size // self.total_num_heads
total_num_kv_heads = getattr(
config, "num_attention_groups", getattr(config, "num_key_value_heads", 1)
)
if total_num_kv_heads is None or total_num_kv_heads <= 0:
total_num_kv_heads = 1
self.total_num_kv_heads = total_num_kv_heads
if self.total_num_kv_heads >= tp_size:
assert self.total_num_kv_heads % tp_size == 0
else:
assert tp_size % self.total_num_kv_heads == 0
self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
self.qkv_proj = QKVParallelLinear(
hidden_size=self.hidden_size,
head_size=self.head_dim,
total_num_heads=self.total_num_heads,
total_num_kv_heads=self.total_num_kv_heads,
bias=getattr(config, "attention_bias", False),
quant_config=quant_config,
prefix=f"{prefix}.qkv_proj",
)
self.q_size = self.num_heads * self.head_dim
self.kv_size = self.num_kv_heads * self.head_dim
self.o_proj = RowParallelLinear(
input_size=self.total_num_heads * self.head_dim,
output_size=self.hidden_size,
bias=getattr(config, "attention_bias", False),
quant_config=quant_config,
prefix=f"{prefix}.o_proj",
)
tp_rank = get_tensor_model_parallel_rank()
head_start = tp_rank * self.num_heads
head_end = (tp_rank + 1) * self.num_heads
alibi_slopes = _get_step_alibi_slopes(self.total_num_heads)[head_start:head_end]
alibi_slopes = alibi_slopes.tolist()
self.scale = self.head_dim**-0.5
self.attn = Attention(
self.num_heads,
self.head_dim,
self.scale,
num_kv_heads=self.num_kv_heads,
cache_config=cache_config,
quant_config=quant_config,
alibi_slopes=alibi_slopes,
prefix=f"{prefix}.attn",
use_alibi_sqrt=True,
attn_type=AttentionType.DECODER,
)
def forward(
self,
hidden_states: torch.Tensor,
) -> torch.Tensor:
qkv, _ = self.qkv_proj(hidden_states)
q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
attn_output = self.attn(q, k, v)
output, _ = self.o_proj(attn_output)
return output
class StepMLP(nn.Module):
def __init__(
self,
hidden_size: int,
intermediate_size: int,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
bias: bool = False,
):
super().__init__()
self.gate_up_proj = MergedColumnParallelLinear(
input_size=hidden_size,
output_sizes=[intermediate_size, intermediate_size],
bias=bias,
quant_config=quant_config,
prefix=f"{prefix}.gate_up_proj",
)
self.down_proj = RowParallelLinear(
input_size=intermediate_size,
output_size=hidden_size,
bias=bias,
quant_config=quant_config,
prefix=f"{prefix}.down_proj",
)
self.act_fn = SiluAndMul()
def forward(self, x: torch.Tensor) -> torch.Tensor:
x, _ = self.gate_up_proj(x)
x = self.act_fn(x)
x, _ = self.down_proj(x)
return x
class StepDecoderLayer(nn.Module):
def __init__(self, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
cache_config = vllm_config.cache_config
quant_config = vllm_config.quant_config
self.hidden_size = config.hidden_size
self.self_attn = StepAttention(
config=config,
cache_config=cache_config,
quant_config=quant_config,
prefix=f"{prefix}.self_attn",
)
self.mlp = StepMLP(
hidden_size=self.hidden_size,
intermediate_size=config.intermediate_size,
quant_config=quant_config,
prefix=f"{prefix}.mlp",
bias=getattr(config, "mlp_bias", False),
)
self.input_layernorm = RMSNorm(
self.hidden_size,
eps=config.rms_norm_eps,
)
self.post_attention_layernorm = RMSNorm(
self.hidden_size,
eps=config.rms_norm_eps,
)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
residual: torch.Tensor | None,
) -> tuple[torch.Tensor, torch.Tensor]:
if residual is None:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
else:
hidden_states, residual = self.input_layernorm(hidden_states, residual)
hidden_states = self.self_attn(hidden_states=hidden_states)
hidden_states, residual = self.post_attention_layernorm(hidden_states, residual)
hidden_states = self.mlp(hidden_states)
return hidden_states, residual
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
stacked_params_mapping = [
(".qkv_proj", ".q_proj", "q"),
(".qkv_proj", ".k_proj", "k"),
(".qkv_proj", ".v_proj", "v"),
(".gate_up_proj", ".gate_proj", 0),
(".gate_up_proj", ".up_proj", 1),
]
params_dict = dict(self.named_parameters())
loaded_params: set[str] = set()
for name, loaded_weight in weights:
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
if is_pp_missing_parameter(name, self):
continue
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
break
else:
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
if is_pp_missing_parameter(name, self):
continue
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader) # type: ignore[name-defined]
weight_loader(param, loaded_weight)
loaded_params.add(name)
return loaded_params
class StepDecoderModel(nn.Module):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
self.config = config
self.quant_config = quant_config
# Need embed_tokens on first rank, and also on last rank if tie_word_embeddings
if get_pp_group().is_first_rank or (
config.tie_word_embeddings and get_pp_group().is_last_rank
):
self.embed_tokens = VocabParallelEmbedding(
config.vocab_size,
config.hidden_size,
quant_config=quant_config,
)
else:
self.embed_tokens = PPMissingLayer()
self.start_layer, self.end_layer, self.layers = make_layers(
config.num_hidden_layers,
lambda prefix: StepDecoderLayer(vllm_config=vllm_config, prefix=prefix),
prefix=maybe_prefix(prefix, "layers"),
)
if get_pp_group().is_last_rank:
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
else:
self.norm = PPMissingLayer()
self.aux_hidden_state_layers: tuple[int, ...] = getattr(
config, "aux_hidden_state_layers", ()
)
self.make_empty_intermediate_tensors = make_empty_intermediate_tensors_factory(
["hidden_states", "residual"],
config.hidden_size,
)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.embed_tokens(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor | IntermediateTensors | tuple[torch.Tensor, list[torch.Tensor]]:
if get_pp_group().is_first_rank:
if inputs_embeds is not None:
hidden_states = inputs_embeds
else:
assert input_ids is not None
hidden_states = self.embed_input_ids(input_ids)
residual = None
else:
assert intermediate_tensors is not None
hidden_states = intermediate_tensors["hidden_states"]
residual = intermediate_tensors["residual"]
aux_hidden_states = []
for idx, layer in enumerate(self.layers[self.start_layer : self.end_layer]):
if idx in self.aux_hidden_state_layers:
if residual is None:
aux_hidden_states.append(hidden_states)
else:
aux_hidden_states.append(hidden_states + residual)
hidden_states, residual = layer(positions, hidden_states, residual)
if not get_pp_group().is_last_rank:
return IntermediateTensors(
{"hidden_states": hidden_states, "residual": residual}
)
hidden_states, _ = self.norm(hidden_states, residual)
if aux_hidden_states:
return hidden_states, aux_hidden_states
return hidden_states
class Step1ForCausalLM(nn.Module, SupportsPP):
packed_modules_mapping = STEP_PACKED_MODULES_MAPPING
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
self.config = config
self.quant_config = quant_config
self.model = StepDecoderModel(
vllm_config=vllm_config,
prefix=maybe_prefix(prefix, "model"),
)
if get_pp_group().is_last_rank:
self.lm_head = ParallelLMHead(
config.vocab_size,
config.hidden_size,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "lm_head"),
)
if getattr(config, "tie_word_embeddings", True):
self.lm_head = self.lm_head.tie_weights(self.model.embed_tokens)
self.logits_processor = LogitsProcessor(config.vocab_size)
else:
self.lm_head = PPMissingLayer()
self.logits_processor = None # type: ignore[assignment]
self.make_empty_intermediate_tensors = (
self.model.make_empty_intermediate_tensors
)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.model.embed_input_ids(input_ids)
def forward(
self,
input_ids: torch.LongTensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None,
inputs_embeds: torch.Tensor | None = None,
) -> torch.Tensor | IntermediateTensors | tuple[torch.Tensor, list[torch.Tensor]]:
return self.model(
input_ids,
positions,
intermediate_tensors,
inputs_embeds=inputs_embeds,
)
def compute_logits(
self,
hidden_states: torch.Tensor,
) -> torch.Tensor | None:
if not get_pp_group().is_last_rank:
return None
return self.logits_processor(self.lm_head, hidden_states)
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
loader = AutoWeightsLoader(self)
return loader.load_weights(weights)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/step1.py",
"license": "Apache License 2.0",
"lines": 374,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tools/generate_versions_json.py | #!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Generate docker/versions.json from Dockerfile ARG defaults.
This script parses the Dockerfile and extracts ARG defaults to create
a bake-native versions.json file that can be used directly with:
docker buildx bake -f docker/docker-bake.hcl -f docker/versions.json
Usage:
python tools/generate_versions_json.py [--check]
Options:
--check Verify versions.json matches Dockerfile (for CI validation)
Requirements:
pip install dockerfile-parse
"""
import json
import sys
from pathlib import Path
from dockerfile_parse import DockerfileParser
REPO_ROOT = Path(__file__).resolve().parent.parent
DOCKERFILE = REPO_ROOT / "docker" / "Dockerfile"
VERSIONS_JSON = REPO_ROOT / "docker" / "versions.json"
# Map Dockerfile ARG names (lowercase) to bake variable names (uppercase)
# This matches docker-bake.hcl variable naming convention
BAKE_VAR_NAMES = {
"torch_cuda_arch_list": "TORCH_CUDA_ARCH_LIST",
"max_jobs": "MAX_JOBS",
"nvcc_threads": "NVCC_THREADS",
}
def parse_dockerfile_args(dockerfile_path: Path) -> dict[str, str]:
"""Extract all ARG defaults from Dockerfile using dockerfile-parse."""
parser = DockerfileParser(path=str(dockerfile_path))
# Extract ARGs from structure (more reliable for multi-stage Dockerfiles)
args = {}
for item in parser.structure:
if item["instruction"] != "ARG":
continue
value = item["value"]
if "=" not in value:
continue
# Parse ARG NAME=value (handle quotes)
name, _, default = value.partition("=")
name = name.strip()
if name in args:
# Keep first occurrence
continue
# Strip surrounding quotes if present
default = default.strip()
if (default.startswith('"') and default.endswith('"')) or (
default.startswith("'") and default.endswith("'")
):
default = default[1:-1]
if default:
args[name] = default
# Resolve variable interpolation (e.g., ${CUDA_VERSION} -> 12.9.1)
resolved = {}
for name, value in args.items():
if "${" in value:
# Substitute ${VAR} references with their values
for ref_name, ref_value in args.items():
value = value.replace(f"${{{ref_name}}}", ref_value)
# Skip if still has unresolved references (no default available)
if "${" not in value:
resolved[name] = value
return resolved
def generate_bake_native_json(args: dict[str, str]) -> dict:
"""Generate bake-native JSON structure."""
variables = {}
for name, value in args.items():
# Use uppercase bake variable name if mapped, otherwise keep as-is
bake_name = BAKE_VAR_NAMES.get(name, name)
variables[bake_name] = {"default": value}
return {
"_comment": (
"Auto-generated from Dockerfile ARGs. "
"Do not edit manually. Run: python tools/generate_versions_json.py"
),
"variable": variables,
}
def main():
check_mode = "--check" in sys.argv
# Parse Dockerfile
args = parse_dockerfile_args(DOCKERFILE)
# Generate bake-native JSON
data = generate_bake_native_json(args)
new_content = json.dumps(data, indent=2) + "\n"
if check_mode:
# Verify existing file matches
if not VERSIONS_JSON.exists():
print(f"ERROR: {VERSIONS_JSON} does not exist")
sys.exit(1)
existing_content = VERSIONS_JSON.read_text()
if existing_content != new_content:
print("ERROR: docker/versions.json is out of sync with Dockerfile")
print("Run: python tools/generate_versions_json.py")
sys.exit(1)
print("✅ docker/versions.json is in sync with Dockerfile")
sys.exit(0)
# Write versions.json
VERSIONS_JSON.write_text(new_content)
print(f"✅ Generated {VERSIONS_JSON}")
# Print summary
print("\nExtracted versions:")
for name, value in args.items():
print(f" {name}: {value}")
if __name__ == "__main__":
main()
| {
"repo_id": "vllm-project/vllm",
"file_path": "tools/generate_versions_json.py",
"license": "Apache License 2.0",
"lines": 107,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:examples/pooling/classify/classification_online.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Example Python client for classification API using vLLM API server
NOTE:
start a supported classification model server with `vllm serve`, e.g.
vllm serve jason9693/Qwen2.5-1.5B-apeach
"""
import argparse
import pprint
import requests
headers = {"accept": "application/json", "Content-Type": "application/json"}
def parse_args():
parse = argparse.ArgumentParser()
parse.add_argument("--host", type=str, default="localhost")
parse.add_argument("--port", type=int, default=8000)
return parse.parse_args()
def main(args):
base_url = f"http://{args.host}:{args.port}"
models_url = base_url + "/v1/models"
classify_url = base_url + "/classify"
tokenize_url = base_url + "/tokenize"
response = requests.get(models_url, headers=headers)
model = response.json()["data"][0]["id"]
# /classify can accept str as input
prompts = [
"Hello, my name is",
"The president of the United States is",
"The capital of France is",
"The future of AI is",
]
payload = {
"model": model,
"input": prompts,
}
response = requests.post(classify_url, headers=headers, json=payload)
pprint.pprint(response.json())
# /classify can accept token ids as input
token_ids = []
for prompt in prompts:
response = requests.post(
tokenize_url,
json={"model": model, "prompt": prompt},
)
token_ids.append(response.json()["tokens"])
payload = {
"model": model,
"input": token_ids,
}
response = requests.post(classify_url, headers=headers, json=payload)
pprint.pprint(response.json())
if __name__ == "__main__":
args = parse_args()
main(args)
| {
"repo_id": "vllm-project/vllm",
"file_path": "examples/pooling/classify/classification_online.py",
"license": "Apache License 2.0",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/step_vl.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""This is basically a copy from perception_models/core/vision_encoder/pe.py"""
from collections.abc import Callable
from functools import partial
import torch
from einops import rearrange, repeat
from torch import nn
from torch.nn import functional as F
from vllm.config import VllmConfig
from vllm.distributed import get_tensor_model_parallel_world_size
from vllm.model_executor.layers.activation import get_act_fn
from vllm.model_executor.layers.attention.mm_encoder_attention import MMEncoderAttention
from vllm.model_executor.layers.conv import Conv2dLayer
from vllm.model_executor.layers.linear import (
ColumnParallelLinear,
QKVParallelLinear,
RowParallelLinear,
)
from vllm.model_executor.layers.quantization import QuantizationConfig
from .step3_vl import Step3VLForConditionalGeneration
from .utils import WeightsMapper, init_vllm_registered_model, maybe_prefix
from .vision import is_vit_use_data_parallel, run_dp_sharded_vision_model
_DEFAULT_NORM_LAYER = partial(nn.LayerNorm, eps=1e-5)
def rotate_half(x):
x = rearrange(x, "... (d r) -> ... d r", r=2)
x1, x2 = x.unbind(dim=-1)
x = torch.stack((-x2, x1), dim=-1)
return rearrange(x, "... d r -> ... (d r)")
def apply_rotary_emb(freqs, t, start_index=0, scale=1.0, seq_dim=-2):
dtype = t.dtype
if t.ndim == 3:
seq_len = t.shape[seq_dim]
freqs = freqs[-seq_len:]
rot_dim = freqs.shape[-1]
end_index = start_index + rot_dim
assert rot_dim <= t.shape[-1], (
"feature dimension {} is not of sufficient size to rotate in all the "
"positions {}".format(t.shape[-1], rot_dim)
)
t_left, t, t_right = (
t[..., :start_index],
t[..., start_index:end_index],
t[..., end_index:],
)
t = (t * freqs.cos() * scale) + (rotate_half(t) * freqs.sin() * scale)
out = torch.cat((t_left, t, t_right), dim=-1)
return out.type(dtype)
class PerceptionEncoderRope2D(nn.Module):
def __init__(
self,
dim: int,
max_grid_height: int,
max_grid_width: int,
use_cls_token: bool = False,
theta=10000,
max_freq=10,
num_freqs=1,
theta_rescale_factor=1.0,
):
super().__init__()
self.dim = dim
self.max_grid_height = max_grid_height
self.max_grid_width = max_grid_width
self.use_cls_token = use_cls_token
self.theta = theta * theta_rescale_factor ** (dim / (dim - 2))
self.max_freq = max_freq
self.num_freqs = num_freqs
cache = self._compute_2d_freqs()
self.register_buffer("freqs_cache", cache, persistent=False)
def _compute_inv_freq(self, base: int | float, dim: int) -> torch.Tensor:
freqs = 1.0 / (base ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
return freqs
def _compute_freqs(self, t: torch.Tensor, inv_freq: torch.Tensor):
freqs = torch.einsum("..., f -> ... f", t.type(inv_freq.dtype), inv_freq)
freqs = repeat(freqs, "... n -> ... (n r)", r=2)
return freqs
def _compute_2d_freqs(self) -> torch.Tensor:
grid_h_range = torch.arange(self.max_grid_height, dtype=torch.float)
grid_w_range = torch.arange(self.max_grid_width, dtype=torch.float)
if self.use_cls_token:
grid_h_range += 1
grid_w_range += 1
inv_freq = self._compute_inv_freq(self.theta, self.dim // 2)
freqs_h = self._compute_freqs(grid_h_range, inv_freq)[:, None].expand(
self.max_grid_height, self.max_grid_width, -1
)
freqs_w = self._compute_freqs(grid_w_range, inv_freq)[None, :].expand(
self.max_grid_height, self.max_grid_width, -1
)
freqs = torch.cat([freqs_w, freqs_h], dim=-1).reshape(
self.max_grid_height * self.max_grid_width, -1
)
if self.use_cls_token:
freqs = torch.cat([torch.zeros(1, freqs.shape[-1]), freqs], dim=0)
freqs = freqs[None, None, ...]
return freqs
def forward(self, q: torch.Tensor, k: torch.Tensor, grid_hw: tuple[int, int]):
if grid_hw[0] != self.max_grid_height or grid_hw[1] != self.max_grid_width:
rows = torch.arange(grid_hw[0], device=q.device).view(-1, 1)
cols = torch.arange(grid_hw[1], device=q.device).view(1, -1)
positions = (rows * self.max_grid_width + cols).reshape(-1).to(torch.long)
if self.use_cls_token:
positions = torch.cat(
[torch.zeros(1, device=q.device), positions + 1], dim=0
)
positions = positions.to(torch.long)
freqs = self.freqs_cache.index_select(2, positions)
else:
freqs = self.freqs_cache
q = apply_rotary_emb(freqs, q)
k = apply_rotary_emb(freqs, k)
return q, k
class PerceptionEncoderLayerScale(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
return x.mul_(self.gamma) if self.inplace else x * self.gamma
class PerceptionEncoderMLP(nn.Module):
def __init__(
self,
input_dim: int,
hidden_dim: int,
act_layer: Callable[[], nn.Module],
quant_config: QuantizationConfig | None = None,
prefix: str = "",
):
super().__init__()
use_data_parallel = is_vit_use_data_parallel()
self.fc1 = ColumnParallelLinear(
input_dim,
hidden_dim,
bias=True,
quant_config=quant_config,
prefix=f"{prefix}.fc1",
disable_tp=use_data_parallel,
)
self.activation = act_layer
self.fc2 = RowParallelLinear(
hidden_dim,
input_dim,
bias=True,
quant_config=quant_config,
prefix=f"{prefix}.fc2",
disable_tp=use_data_parallel,
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x, _ = self.fc1(x)
x = self.activation(x)
x, _ = self.fc2(x)
return x
class PerceptionEncoderVisionAttention(nn.Module):
def __init__(
self,
embed_dim: int,
num_heads: int,
max_grid_height: int,
max_grid_width: int,
use_cls_token: bool = False,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
):
super().__init__()
self.embed_dim = embed_dim
self.total_num_heads = num_heads
self.head_dim = embed_dim // num_heads
self.scale = self.head_dim**-0.5
use_data_parallel = is_vit_use_data_parallel()
tp_size = 1 if use_data_parallel else get_tensor_model_parallel_world_size()
assert self.total_num_heads % tp_size == 0, (
"embed_dim must be divisible by num_heads"
)
self.num_heads = self.total_num_heads // tp_size
self.qkv_proj = QKVParallelLinear(
embed_dim,
self.head_dim,
self.total_num_heads,
bias=True,
quant_config=quant_config,
prefix=f"{prefix}.qkv_proj",
disable_tp=use_data_parallel,
)
self.out_proj = RowParallelLinear(
embed_dim,
embed_dim,
bias=True,
quant_config=quant_config,
prefix=f"{prefix}.out_proj",
disable_tp=use_data_parallel,
)
self.attn = MMEncoderAttention(
self.num_heads,
self.head_dim,
self.scale,
prefix=f"{prefix}.attn",
)
self.rope = PerceptionEncoderRope2D(
dim=self.head_dim,
max_grid_height=max_grid_height,
max_grid_width=max_grid_width,
use_cls_token=use_cls_token,
)
def forward(self, x: torch.Tensor, grid_hw: tuple[int, int]) -> torch.Tensor:
bsz, seq_len, _ = x.shape
qkv, _ = self.qkv_proj(x)
q, k, v = qkv.chunk(chunks=3, dim=-1)
q = q.view(bsz, seq_len, self.num_heads, self.head_dim).permute(0, 2, 1, 3)
k = k.view(bsz, seq_len, self.num_heads, self.head_dim).permute(0, 2, 1, 3)
q, k = self.rope(q, k, grid_hw=grid_hw)
q = q.permute(0, 2, 1, 3).reshape(bsz, seq_len, self.num_heads * self.head_dim)
k = k.permute(0, 2, 1, 3).reshape(bsz, seq_len, self.num_heads * self.head_dim)
attn_output = self.attn(q, k, v)
attn_output, _ = self.out_proj(attn_output)
return attn_output
class PerceptionEncoderVisionBlock(nn.Module):
def __init__(
self,
d_model: int,
n_head: int,
max_grid_height: int,
max_grid_width: int,
mlp_ratio: float = 4.0,
ls_init_value: float = None,
act_layer: Callable = nn.GELU,
norm_layer: Callable = nn.LayerNorm,
use_cls_token: bool = False,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
):
super().__init__()
self.attn = PerceptionEncoderVisionAttention(
d_model,
n_head,
max_grid_height=max_grid_height,
max_grid_width=max_grid_width,
use_cls_token=use_cls_token,
quant_config=quant_config,
prefix=f"{prefix}.attn",
)
self.ls_1 = (
PerceptionEncoderLayerScale(d_model, ls_init_value)
if ls_init_value is not None
else nn.Identity()
)
self.ls_2 = (
PerceptionEncoderLayerScale(d_model, ls_init_value)
if ls_init_value is not None
else nn.Identity()
)
self.ln_1 = norm_layer(d_model)
self.ln_2 = norm_layer(d_model)
hidden_dim = int(d_model * mlp_ratio)
self.mlp = PerceptionEncoderMLP(
d_model,
hidden_dim,
act_layer,
quant_config=quant_config,
prefix=f"{prefix}.mlp",
)
def forward(self, x: torch.Tensor, grid_hw: tuple[int, int]):
x = x + self.ls_1(self.attn(self.ln_1(x), grid_hw=grid_hw))
x = x + self.ls_2(self.mlp(self.ln_2(x)))
return x
class PerceptionEncoderVisionTransformer(nn.Module):
def __init__(
self,
width: int,
layers: int,
heads: int,
max_grid_height: int,
max_grid_width: int,
mlp_ratio: float = 4.0,
ls_init_value: float = None,
act_layer: Callable = nn.GELU,
norm_layer: Callable = nn.LayerNorm,
use_cls_token: bool = False,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.ModuleList(
[
PerceptionEncoderVisionBlock(
d_model=width,
n_head=heads,
max_grid_height=max_grid_height,
max_grid_width=max_grid_width,
mlp_ratio=mlp_ratio,
ls_init_value=ls_init_value,
act_layer=act_layer,
norm_layer=norm_layer,
use_cls_token=use_cls_token,
quant_config=quant_config,
prefix=f"{prefix}.resblocks.{i}",
)
for i in range(layers)
]
)
def forward(self, x: torch.Tensor, grid_hw: tuple[int, int]):
for block in self.resblocks:
x = block(x, grid_hw=grid_hw)
return x
class PerceptionEncoder(nn.Module):
def __init__(
self,
config,
act_layer: Callable,
norm_layer: Callable = _DEFAULT_NORM_LAYER,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
):
super().__init__()
self.patch_size = config.patch_size
self.output_dim = config.output_dim or config.width
self.heads = config.heads
self.width = config.width
self.layers = config.layers
self.use_abs_posemb = config.use_abs_posemb
self.use_cls_token = config.use_cls_token
self.use_rope2d = config.use_rope2d
if not self.use_rope2d:
raise ValueError("use_rope2d must be True")
self.image_size = config.image_size
self.conv1 = Conv2dLayer(
in_channels=3,
out_channels=config.width,
kernel_size=config.patch_size,
stride=config.patch_size,
bias=False,
)
self.ln_pre = norm_layer(config.width) if config.use_ln_pre else nn.Identity()
self.ln_post = norm_layer(self.width) if config.use_ln_post else nn.Identity()
self.transformer = PerceptionEncoderVisionTransformer(
config.width,
config.layers,
config.heads,
max_grid_height=self.image_size // self.patch_size,
max_grid_width=self.image_size // self.patch_size,
mlp_ratio=config.mlp_ratio,
ls_init_value=config.ls_init_value,
act_layer=act_layer,
norm_layer=norm_layer,
use_cls_token=self.use_cls_token,
quant_config=quant_config,
prefix=f"{prefix}.transformer",
)
self.vit_downsampler1 = Conv2dLayer(
config.width, config.width * 2, kernel_size=3, stride=2, padding=1
)
self.vit_downsampler2 = Conv2dLayer(
config.width * 2, config.width * 4, kernel_size=3, stride=2, padding=1
)
if self.use_cls_token:
self.class_embedding = nn.Parameter(
(self.width**-0.5) * torch.randn(self.width)
)
if self.use_abs_posemb:
self.posemb_grid_size = self.image_size // self.patch_size
self.positional_embedding = nn.Parameter(
(self.width**-0.5)
* torch.randn(
int(self.use_cls_token) + self.posemb_grid_size**2,
self.width,
)
)
def sample_abs_posemb(self, grid_h: int, grid_w: int):
if self.posemb_grid_size == grid_h and self.posemb_grid_size == grid_w:
return self.positional_embedding[None, ...]
pos_embed = self.positional_embedding
if self.use_cls_token:
cls_token_embed, pos_embed = pos_embed[:1], pos_embed[1:]
pos_embed = (
pos_embed.reshape(1, self.posemb_grid_size, self.posemb_grid_size, -1)
.permute(0, 3, 1, 2)
.contiguous()
)
pos_embed = F.interpolate(
pos_embed, size=(grid_h, grid_w), mode="bilinear", align_corners=False
)
pos_embed = pos_embed.permute(0, 2, 3, 1).reshape(-1, self.width)
if self.use_cls_token:
pos_embed = torch.cat([cls_token_embed, pos_embed], dim=0)
return pos_embed[None, ...]
def forward_features(self, x: torch.Tensor):
batch, _, h, w = x.shape
grid_h, grid_w = h // self.patch_size, w // self.patch_size
x = self.conv1(x)
x = x.permute(0, 2, 3, 1).reshape(batch, -1, self.width)
if self.use_cls_token:
x = torch.cat(
[self.class_embedding.view(1, 1, -1).expand(batch, -1, -1), x], dim=1
)
if self.use_abs_posemb:
x = x + self.sample_abs_posemb(grid_h, grid_w)
x = self.ln_pre(x)
x = self.transformer(x, grid_hw=(grid_h, grid_w))
x = self.ln_post(x)
if self.use_cls_token:
x = x[:, 1:, :]
return x
def forward(self, x: torch.Tensor):
x = self.forward_features(x)
B, P, C = x.shape
T = int(P**0.5)
x = x.transpose(2, 1).contiguous()
x = x.view(B, C, T, T)
x = self.vit_downsampler1(x)
x = self.vit_downsampler2(x)
B, C, T, T = x.shape
return x.view(B, -1, T * T).transpose(1, 2)
class StepVLForConditionalGeneration(Step3VLForConditionalGeneration):
hf_to_vllm_mapper = WeightsMapper(
orig_to_new_prefix={
"model.": "language_model.model.",
"lm_head.": "language_model.lm_head.",
},
orig_to_new_substr={
".attn.in_proj_weight": ".attn.qkv_proj.weight",
".attn.in_proj_bias": ".attn.qkv_proj.bias",
".mlp.c_fc": ".mlp.fc1",
".mlp.c_proj": ".mlp.fc2",
},
)
def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None:
super(Step3VLForConditionalGeneration, self).__init__()
config = vllm_config.model_config.hf_config
multimodal_config = vllm_config.model_config.multimodal_config
quant_config = vllm_config.quant_config
self.config = config
self.multimodal_config = multimodal_config
self.use_data_parallel = multimodal_config.mm_encoder_tp_mode == "data"
with self._mark_tower_model(vllm_config, "image"):
self.vision_model = PerceptionEncoder(
config.vision_config,
get_act_fn(config.vision_config.hidden_act),
quant_config=quant_config,
prefix=maybe_prefix(prefix, "vision_model"),
)
self.vit_large_projector = ColumnParallelLinear(
config.vision_config.width * 4,
config.text_config.hidden_size,
bias=config.projector_bias,
gather_output=True,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "vit_large_projector"),
disable_tp=self.use_data_parallel,
)
with self._mark_language_model(vllm_config):
self.language_model = init_vllm_registered_model(
vllm_config=vllm_config,
hf_config=config.text_config,
prefix=maybe_prefix(prefix, "language_model"),
)
self.make_empty_intermediate_tensors = (
self.language_model.make_empty_intermediate_tensors
)
def _get_vision_model_output(
self, input_tensor: torch.Tensor | None
) -> torch.Tensor | None:
if input_tensor is None:
return None
if self.use_data_parallel:
return run_dp_sharded_vision_model(input_tensor, self.vision_model)
return self.vision_model(input_tensor)
def _process_image_features(self, image_features: torch.Tensor) -> torch.Tensor:
image_features, _ = self.vit_large_projector(image_features)
return image_features
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/step_vl.py",
"license": "Apache License 2.0",
"lines": 471,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/layers/fused_moe/oracle/unquantized.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from enum import Enum
import torch
from torch.nn import Module
import vllm.envs as envs
import vllm.model_executor.layers.fused_moe.modular_kernel as mk
from vllm._aiter_ops import rocm_aiter_ops
from vllm.config.kernel import MoEBackend
from vllm.logger import init_logger
from vllm.model_executor.layers.fused_moe.config import (
FusedMoEConfig,
FusedMoEQuantConfig,
)
from vllm.model_executor.layers.fused_moe.flashinfer_trtllm_moe import (
is_supported_config_trtllm_bf16,
)
from vllm.model_executor.layers.fused_moe.prepare_finalize import (
MoEPrepareAndFinalizeNoEP,
)
from vllm.model_executor.layers.quantization.utils.flashinfer_utils import (
swap_w13_to_w31,
)
from vllm.platforms import current_platform
from vllm.utils.flashinfer import has_flashinfer, has_flashinfer_cutlass_fused_moe
logger = init_logger(__name__)
class UnquantizedMoeBackend(Enum):
FLASHINFER_TRTLLM = "FlashInfer TRTLLM"
FLASHINFER_CUTLASS = "FlashInfer CUTLASS"
AITER = "ROCm AITER"
TRITON = "TRITON"
CPU = "CPU"
XPU = "XPU"
TPU = "TPU"
OOT = "OOT"
# NOTE(zyongye): Unsupported backend means backend
# that is not conform with Modular kernel format.
# We will directly call the kernel for those backend
UNSUPPORTED_BACKEND = [
UnquantizedMoeBackend.FLASHINFER_TRTLLM,
UnquantizedMoeBackend.CPU,
UnquantizedMoeBackend.TPU,
UnquantizedMoeBackend.OOT,
]
def map_unquantized_backend(runner_backend: MoEBackend) -> UnquantizedMoeBackend:
"""Map user's MoEBackend to UnquantizedMoeBackend."""
mapping = {
"triton": UnquantizedMoeBackend.TRITON,
"flashinfer_trtllm": UnquantizedMoeBackend.FLASHINFER_TRTLLM,
"flashinfer_cutlass": UnquantizedMoeBackend.FLASHINFER_CUTLASS,
"aiter": UnquantizedMoeBackend.AITER,
}
if backend := mapping.get(runner_backend):
return backend
raise ValueError(
f"moe_backend='{runner_backend}' is not supported for unquantized MoE. "
f"Expected one of {list(mapping.keys())}."
)
def select_unquantized_moe_backend(
moe_config: FusedMoEConfig,
use_ep: bool,
use_dp: bool,
) -> UnquantizedMoeBackend:
"""
Select the primary Unquantized MoE backend
Note: Shape-specific fallbacks may still occur at runtime.
"""
def _make_log_backend(backend: UnquantizedMoeBackend):
return f"Using {backend.value} backend for Unquantized MoE"
activation_format = (
mk.FusedMoEActivationFormat.BatchedExperts
if moe_config.moe_parallel_config.use_batched_activation_format
else mk.FusedMoEActivationFormat.Standard
)
# Check if FlashInfer TRTLLM BF16 MoE is supported
trtllm_supported, _ = is_supported_config_trtllm_bf16(
moe_config=moe_config,
activation_format=activation_format,
)
flashinfer_trtllm_available = has_flashinfer() and trtllm_supported
# FlashInfer CUTLASS MoE is only supported on Hopper and later GPUS
flashinfer_cutlass_available = (
has_flashinfer_cutlass_fused_moe()
and use_ep
and (not use_dp)
and current_platform.has_device_capability(90)
)
flashinfer_trtllm_moe_enabled = (
flashinfer_trtllm_available
and envs.VLLM_USE_FLASHINFER_MOE_FP16
and envs.VLLM_FLASHINFER_MOE_BACKEND == "latency"
)
flashinfer_cutlass_moe_enabled = (
flashinfer_cutlass_available and envs.VLLM_USE_FLASHINFER_MOE_FP16
)
rocm_aiter_moe_enabled = rocm_aiter_ops.is_fused_moe_enabled()
# Handle explicit moe_backend from user.
runner_backend = moe_config.moe_backend
if runner_backend != "auto":
requested_backend = map_unquantized_backend(runner_backend)
if requested_backend == UnquantizedMoeBackend.FLASHINFER_TRTLLM:
if not flashinfer_trtllm_available:
raise ValueError(
"FlashInfer TRTLLM MoE backend is not available for this "
"configuration."
)
elif requested_backend == UnquantizedMoeBackend.FLASHINFER_CUTLASS:
if not flashinfer_cutlass_available:
raise ValueError(
"FlashInfer CUTLASS MoE backend is not available for this "
"configuration."
)
elif requested_backend == UnquantizedMoeBackend.AITER and not (
current_platform.is_rocm() and rocm_aiter_moe_enabled
):
raise ValueError(
"ROCm AITer MoE backend is not available for this configuration."
)
logger.info_once(_make_log_backend(requested_backend), scope="local")
return requested_backend
if current_platform.is_rocm():
if rocm_aiter_moe_enabled:
backend = UnquantizedMoeBackend.AITER
else:
backend = UnquantizedMoeBackend.TRITON
if current_platform.is_cuda():
if flashinfer_trtllm_moe_enabled:
backend = UnquantizedMoeBackend.FLASHINFER_TRTLLM
elif flashinfer_cutlass_moe_enabled:
backend = UnquantizedMoeBackend.FLASHINFER_CUTLASS
if trtllm_supported:
logger.info_once(
"FlashInfer TRTLLM MoE is available but not enabled, "
"consider setting VLLM_FLASHINFER_MOE_BACKEND=latency "
"to enable it for better performance.",
scope="local",
)
else:
if not envs.VLLM_USE_FLASHINFER_MOE_FP16 and trtllm_supported:
logger.info_once(
"FlashInfer TRTLLM MoE is available but not enabled, "
"consider setting VLLM_USE_FLASHINFER_MOE_FP16=1 "
"and VLLM_FLASHINFER_MOE_BACKEND=latency "
"to enable it for better performance.",
scope="local",
)
elif use_ep and (not use_dp):
logger.info_once(
"FlashInfer MoE is available for EP"
" but not enabled, consider setting"
" VLLM_USE_FLASHINFER_MOE_FP16=1 to enable it.",
scope="local",
)
elif use_dp:
logger.info_once(
"FlashInfer CUTLASS MoE is currently not available for DP.",
scope="local",
)
backend = UnquantizedMoeBackend.TRITON
if current_platform.is_xpu():
backend = UnquantizedMoeBackend.XPU
if current_platform.is_cpu():
backend = UnquantizedMoeBackend.CPU
if current_platform.is_tpu():
backend = UnquantizedMoeBackend.TPU
if current_platform.is_out_of_tree():
backend = UnquantizedMoeBackend.OOT
logger.info_once(_make_log_backend(backend), scope="local")
return backend
def convert_to_unquantized_kernel_format(
unquantized_backend: UnquantizedMoeBackend,
layer: Module,
w13_weight: torch.Tensor | None = None,
w2_weight: torch.Tensor | None = None,
) -> tuple[torch.Tensor, torch.Tensor]:
if unquantized_backend == UnquantizedMoeBackend.AITER:
w13_weight, w2_weight = rocm_aiter_ops.shuffle_weights(
layer.w13_weight.data, layer.w2_weight.data
)
elif unquantized_backend == UnquantizedMoeBackend.FLASHINFER_CUTLASS:
# Swap halves to arrange as [w3; w1] (kernel expectation)
w13_weight = swap_w13_to_w31(layer.w13_weight.data)
return w13_weight, w2_weight
def make_unquantized_moe_kernel(
backend: UnquantizedMoeBackend,
quant_config: FusedMoEQuantConfig,
moe_config: FusedMoEConfig,
) -> mk.FusedMoEModularKernel | None:
if backend in UNSUPPORTED_BACKEND:
return None
if backend == UnquantizedMoeBackend.FLASHINFER_CUTLASS:
from vllm.model_executor.layers.fused_moe.flashinfer_cutlass_moe import (
FlashInferExperts,
)
kernel = mk.FusedMoEModularKernel(
MoEPrepareAndFinalizeNoEP(),
FlashInferExperts(
moe_config=moe_config,
quant_config=quant_config,
),
inplace=False,
)
elif backend == UnquantizedMoeBackend.AITER:
from vllm.model_executor.layers.fused_moe.rocm_aiter_fused_moe import (
AiterExperts,
)
kernel = mk.FusedMoEModularKernel(
MoEPrepareAndFinalizeNoEP(),
AiterExperts(
moe_config=moe_config,
quant_config=quant_config,
),
inplace=not moe_config.disable_inplace,
)
elif backend == UnquantizedMoeBackend.TRITON:
from vllm.model_executor.layers.fused_moe import TritonExperts
kernel = mk.FusedMoEModularKernel(
MoEPrepareAndFinalizeNoEP(),
TritonExperts(
moe_config=moe_config,
quant_config=quant_config,
),
inplace=not moe_config.disable_inplace,
)
elif backend == UnquantizedMoeBackend.XPU:
from vllm.model_executor.layers.fused_moe import XPUExperts
kernel = mk.FusedMoEModularKernel(
MoEPrepareAndFinalizeNoEP(),
XPUExperts(
moe_config=moe_config,
quant_config=quant_config,
),
inplace=not moe_config.disable_inplace,
)
return kernel
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/layers/fused_moe/oracle/unquantized.py",
"license": "Apache License 2.0",
"lines": 235,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tools/vllm-rocm/pin_rocm_dependencies.py | #!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""
Pin vLLM dependencies to exact versions of custom ROCm wheels.
This script modifies vLLM's requirements files to replace version constraints
with exact versions of custom-built ROCm wheels (torch, triton, torchvision, amdsmi).
This ensures that 'pip install vllm' automatically installs the correct custom wheels
instead of allowing pip to download different versions from PyPI.
"""
import sys
from pathlib import Path
import regex as re
def extract_version_from_wheel(wheel_name: str) -> str:
"""
Extract version from wheel filename.
Example:
torch-2.9.0a0+git1c57644-cp312-cp312-linux_x86_64.whl -> 2.9.0a0+git1c57644
triton-3.4.0-cp312-cp312-linux_x86_64.whl -> 3.4.0
"""
# Wheel format:
# {distribution}-{version}(-{build tag})?-{python}-{abi}-{platform}.whl
parts = wheel_name.replace(".whl", "").split("-")
if len(parts) < 5:
raise ValueError(f"Invalid wheel filename format: {wheel_name}")
# Version is the second part
version = parts[1]
return version
def get_custom_wheel_versions(install_dir: str) -> dict[str, str]:
"""
Read /install directory and extract versions of custom wheels.
Returns:
Dict mapping package names to exact versions
"""
install_path = Path(install_dir)
if not install_path.exists():
print(f"ERROR: Install directory not found: {install_dir}", file=sys.stderr)
sys.exit(1)
versions = {}
# Map wheel prefixes to package names
# IMPORTANT: Use dashes to avoid matching substrings
# (e.g., 'torch' would match 'torchvision')
# ORDER MATTERS: This order is preserved when pinning dependencies
# in requirements files
package_mapping = [
("torch-", "torch"), # Match torch- (not torchvision)
("triton-", "triton"), # Match triton- (not triton_kernels)
("triton_kernels-", "triton-kernels"), # Match triton_kernels-
("torchvision-", "torchvision"), # Match torchvision-
("torchaudio-", "torchaudio"), # Match torchaudio-
("amdsmi-", "amdsmi"), # Match amdsmi-
("flash_attn-", "flash-attn"), # Match flash_attn-
("aiter-", "aiter"), # Match aiter-
]
for wheel_file in install_path.glob("*.whl"):
wheel_name = wheel_file.name
for prefix, package_name in package_mapping:
if wheel_name.startswith(prefix):
try:
version = extract_version_from_wheel(wheel_name)
versions[package_name] = version
print(f"Found {package_name}=={version}", file=sys.stderr)
except Exception as e:
print(
f"WARNING: Could not extract version from {wheel_name}: {e}",
file=sys.stderr,
)
break
# Return versions in the order defined by package_mapping
ordered_versions = {}
for _, package_name in package_mapping:
if package_name in versions:
ordered_versions[package_name] = versions[package_name]
return ordered_versions
def pin_dependencies_in_requirements(requirements_path: str, versions: dict[str, str]):
"""
Insert custom wheel pins at the TOP of requirements file.
This ensures that when setup.py processes the file line-by-line,
custom wheels (torch, triton, etc.) are encountered FIRST, before
any `-r common.txt` includes that might pull in other dependencies.
Creates:
# Custom ROCm wheel pins (auto-generated)
torch==2.9.0a0+git1c57644
triton==3.4.0
torchvision==0.23.0a0+824e8c8
amdsmi==26.1.0+5df6c765
-r common.txt
... rest of file ...
"""
requirements_file = Path(requirements_path)
if not requirements_file.exists():
print(
f"ERROR: Requirements file not found: {requirements_path}", file=sys.stderr
)
sys.exit(1)
# Backup original file
backup_file = requirements_file.with_suffix(requirements_file.suffix + ".bak")
with open(requirements_file) as f:
original_lines = f.readlines()
# Write backup
with open(backup_file, "w") as f:
f.writelines(original_lines)
# Build header with pinned custom wheels
header_lines = [
"# Custom ROCm wheel pins (auto-generated by pin_rocm_dependencies.py)\n",
"# These must come FIRST to ensure correct dependency resolution\n",
]
for package_name, exact_version in versions.items():
header_lines.append(f"{package_name}=={exact_version}\n")
header_lines.append("\n") # Blank line separator
# Filter out any existing entries for custom packages from original file
filtered_lines = []
removed_packages = []
for line in original_lines:
stripped = line.strip()
should_keep = True
# Check if this line is for one of our custom packages
if stripped and not stripped.startswith("#") and not stripped.startswith("-"):
for package_name in versions:
# Handle both hyphen and underscore variations
pattern_name = package_name.replace("-", "[-_]")
pattern = rf"^{pattern_name}\s*[=<>]=?\s*[\d.a-zA-Z+]+"
if re.match(pattern, stripped, re.IGNORECASE):
removed_packages.append(f"{package_name}: {stripped}")
should_keep = False
break
if should_keep:
filtered_lines.append(line)
# Combine: header + filtered original content
final_lines = header_lines + filtered_lines
# Write modified content
with open(requirements_file, "w") as f:
f.writelines(final_lines)
# Print summary
print("\n✓ Inserted custom wheel pins at TOP of requirements:", file=sys.stderr)
for package_name, exact_version in versions.items():
print(f" - {package_name}=={exact_version}", file=sys.stderr)
if removed_packages:
print("\n✓ Removed old package entries:", file=sys.stderr)
for pkg in removed_packages:
print(f" - {pkg}", file=sys.stderr)
print(f"\n✓ Patched requirements file: {requirements_path}", file=sys.stderr)
print(f" Backup saved: {backup_file}", file=sys.stderr)
def main():
if len(sys.argv) != 3:
print(
f"Usage: {sys.argv[0]} <install_dir> <requirements_file>", file=sys.stderr
)
print(
f"Example: {sys.argv[0]} /install /app/vllm/requirements/rocm.txt",
file=sys.stderr,
)
sys.exit(1)
install_dir = sys.argv[1]
requirements_path = sys.argv[2]
print("=" * 70, file=sys.stderr)
print("Pinning vLLM dependencies to custom ROCm wheel versions", file=sys.stderr)
print("=" * 70, file=sys.stderr)
# Get versions from custom wheels
print(f"\nScanning {install_dir} for custom wheels...", file=sys.stderr)
versions = get_custom_wheel_versions(install_dir)
if not versions:
print("\nERROR: No custom wheels found in /install!", file=sys.stderr)
sys.exit(1)
# Pin dependencies in requirements file
print(f"\nPatching {requirements_path}...", file=sys.stderr)
pin_dependencies_in_requirements(requirements_path, versions)
print("\n" + "=" * 70, file=sys.stderr)
print("✓ Dependency pinning complete!", file=sys.stderr)
print("=" * 70, file=sys.stderr)
sys.exit(0)
if __name__ == "__main__":
main()
| {
"repo_id": "vllm-project/vllm",
"file_path": "tools/vllm-rocm/pin_rocm_dependencies.py",
"license": "Apache License 2.0",
"lines": 173,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:tests/multimodal/media/test_audio.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import base64
from pathlib import Path
from unittest.mock import patch
import numpy as np
import pytest
from vllm.multimodal.media import AudioMediaIO
pytestmark = pytest.mark.cpu_test
ASSETS_DIR = Path(__file__).parent.parent / "assets"
assert ASSETS_DIR.exists()
@pytest.fixture
def dummy_audio():
return np.array([0.0, 0.1, 0.2, 0.3, 0.4], dtype=float)
@pytest.fixture
def dummy_audio_bytes():
return b"FAKEAUDIOBYTES"
def test_audio_media_io_load_bytes(dummy_audio_bytes):
audio_io = AudioMediaIO()
with patch("librosa.load") as mock_load:
mock_load.return_value = (np.array([0.1, 0.2]), 16000)
out = audio_io.load_bytes(dummy_audio_bytes)
mock_load.assert_called_once()
assert isinstance(out[0], np.ndarray)
assert out[1] == 16000
def test_audio_media_io_load_base64(dummy_audio_bytes):
audio_io = AudioMediaIO()
encoded = base64.b64encode(dummy_audio_bytes).decode("utf-8")
with patch.object(AudioMediaIO, "load_bytes") as mock_load_bytes:
mock_load_bytes.return_value = (np.array([0.1, 0.2]), 16000)
out = audio_io.load_base64("audio/wav", encoded)
mock_load_bytes.assert_called_once()
assert isinstance(out[0], np.ndarray)
assert out[1] == 16000
def test_audio_media_io_load_file():
audio_io = AudioMediaIO()
path = Path("/fake/path.wav")
with patch("librosa.load") as mock_load:
mock_load.return_value = (np.array([0.1, 0.2]), 16000)
out = audio_io.load_file(path)
mock_load.assert_called_once_with(path, sr=None)
assert isinstance(out[0], np.ndarray)
assert out[1] == 16000
def test_audio_media_io_encode_base64(dummy_audio):
audio_io = AudioMediaIO()
media = (dummy_audio, 16000)
with patch("soundfile.write") as mock_write:
def write_to_buffer(buffer, *_args, **_kwargs):
buffer.write(b"dummy_wav_data")
mock_write.side_effect = write_to_buffer
out = audio_io.encode_base64(media)
decoded = base64.b64decode(out)
assert decoded == b"dummy_wav_data"
mock_write.assert_called_once()
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/multimodal/media/test_audio.py",
"license": "Apache License 2.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/multimodal/media/test_base.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pickle
from pathlib import Path
import pytest
from PIL import Image
from vllm.multimodal.media import MediaWithBytes
pytestmark = pytest.mark.cpu_test
ASSETS_DIR = Path(__file__).parent.parent / "assets"
assert ASSETS_DIR.exists()
def test_media_with_bytes_pickle_roundtrip():
"""Regression test for pickle/unpickle of MediaWithBytes.
Verifies that MediaWithBytes can be pickled and unpickled without
RecursionError. See: https://github.com/vllm-project/vllm/issues/30818
"""
original_image = Image.open(ASSETS_DIR / "image1.png").convert("RGB")
original_bytes = b"test_bytes_data"
wrapper = MediaWithBytes(media=original_image, original_bytes=original_bytes)
# Verify attribute delegation works before pickling
assert wrapper.width == original_image.width
assert wrapper.height == original_image.height
assert wrapper.mode == original_image.mode
# Pickle and unpickle (this would cause RecursionError before the fix)
pickled = pickle.dumps(wrapper)
unpickled = pickle.loads(pickled)
# Verify the unpickled object works correctly
assert unpickled.original_bytes == original_bytes
assert unpickled.media.width == original_image.width
assert unpickled.media.height == original_image.height
# Verify attribute delegation works after unpickling
assert unpickled.width == original_image.width
assert unpickled.height == original_image.height
assert unpickled.mode == original_image.mode
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/multimodal/media/test_base.py",
"license": "Apache License 2.0",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/multimodal/media/test_image.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from pathlib import Path
import numpy as np
import pytest
from PIL import Image
from vllm.multimodal.media import ImageMediaIO
pytestmark = pytest.mark.cpu_test
ASSETS_DIR = Path(__file__).parent.parent / "assets"
assert ASSETS_DIR.exists()
def test_image_media_io_rgba_custom_background(tmp_path):
"""Test RGBA to RGB conversion with custom background colors."""
# Create a simple RGBA image with transparent and opaque pixels
rgba_image = Image.new("RGBA", (10, 10), (255, 0, 0, 255)) # Red with full opacity
# Make top-left quadrant transparent
for i in range(5):
for j in range(5):
rgba_image.putpixel((i, j), (0, 0, 0, 0)) # Fully transparent
# Save the test image to tmp_path
test_image_path = tmp_path / "test_rgba.png"
rgba_image.save(test_image_path)
# Test 1: Default white background (backward compatibility)
image_io_default = ImageMediaIO()
converted_default = image_io_default.load_file(test_image_path)
default_numpy = np.array(converted_default)
# Check transparent pixels are white
assert default_numpy[0][0][0] == 255 # R
assert default_numpy[0][0][1] == 255 # G
assert default_numpy[0][0][2] == 255 # B
# Check opaque pixels remain red
assert default_numpy[5][5][0] == 255 # R
assert default_numpy[5][5][1] == 0 # G
assert default_numpy[5][5][2] == 0 # B
# Test 2: Custom black background via kwargs
image_io_black = ImageMediaIO(rgba_background_color=(0, 0, 0))
converted_black = image_io_black.load_file(test_image_path)
black_numpy = np.array(converted_black)
# Check transparent pixels are black
assert black_numpy[0][0][0] == 0 # R
assert black_numpy[0][0][1] == 0 # G
assert black_numpy[0][0][2] == 0 # B
# Check opaque pixels remain red
assert black_numpy[5][5][0] == 255 # R
assert black_numpy[5][5][1] == 0 # G
assert black_numpy[5][5][2] == 0 # B
# Test 3: Custom blue background via kwargs (as list)
image_io_blue = ImageMediaIO(rgba_background_color=[0, 0, 255])
converted_blue = image_io_blue.load_file(test_image_path)
blue_numpy = np.array(converted_blue)
# Check transparent pixels are blue
assert blue_numpy[0][0][0] == 0 # R
assert blue_numpy[0][0][1] == 0 # G
assert blue_numpy[0][0][2] == 255 # B
# Test 4: Test with load_bytes method
with open(test_image_path, "rb") as f:
image_data = f.read()
image_io_green = ImageMediaIO(rgba_background_color=(0, 255, 0))
converted_green = image_io_green.load_bytes(image_data)
green_numpy = np.array(converted_green)
# Check transparent pixels are green
assert green_numpy[0][0][0] == 0 # R
assert green_numpy[0][0][1] == 255 # G
assert green_numpy[0][0][2] == 0 # B
def test_image_media_io_rgba_background_color_validation():
"""Test that invalid rgba_background_color values are properly rejected."""
# Test invalid types
with pytest.raises(
ValueError, match="rgba_background_color must be a list or tuple"
):
ImageMediaIO(rgba_background_color="255,255,255")
with pytest.raises(
ValueError, match="rgba_background_color must be a list or tuple"
):
ImageMediaIO(rgba_background_color=255)
# Test wrong number of elements
with pytest.raises(
ValueError, match="rgba_background_color must be a list or tuple"
):
ImageMediaIO(rgba_background_color=(255, 255))
with pytest.raises(
ValueError, match="rgba_background_color must be a list or tuple"
):
ImageMediaIO(rgba_background_color=(255, 255, 255, 255))
# Test non-integer values
with pytest.raises(
ValueError, match="rgba_background_color must be a list or tuple"
):
ImageMediaIO(rgba_background_color=(255.0, 255.0, 255.0))
with pytest.raises(
ValueError, match="rgba_background_color must be a list or tuple"
):
ImageMediaIO(rgba_background_color=(255, "255", 255))
# Test out of range values
with pytest.raises(
ValueError, match="rgba_background_color must be a list or tuple"
):
ImageMediaIO(rgba_background_color=(256, 255, 255))
with pytest.raises(
ValueError, match="rgba_background_color must be a list or tuple"
):
ImageMediaIO(rgba_background_color=(255, -1, 255))
# Test that valid values work
ImageMediaIO(rgba_background_color=(0, 0, 0)) # Should not raise
ImageMediaIO(rgba_background_color=[255, 255, 255]) # Should not raise
ImageMediaIO(rgba_background_color=(128, 128, 128)) # Should not raise
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/multimodal/media/test_image.py",
"license": "Apache License 2.0",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:tests/multimodal/media/test_video.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from pathlib import Path
import numpy as np
import numpy.typing as npt
import pytest
from PIL import Image
from vllm.assets.base import get_vllm_public_assets
from vllm.assets.video import video_to_ndarrays, video_to_pil_images_list
from vllm.multimodal.media import ImageMediaIO, VideoMediaIO
from vllm.multimodal.video import VIDEO_LOADER_REGISTRY, VideoLoader
from ..utils import cosine_similarity, create_video_from_image, normalize_image
pytestmark = pytest.mark.cpu_test
ASSETS_DIR = Path(__file__).parent.parent / "assets"
assert ASSETS_DIR.exists()
@VIDEO_LOADER_REGISTRY.register("assert_10_frames_1_fps")
class Assert10Frames1FPSVideoLoader(VideoLoader):
@classmethod
def load_bytes(
cls, data: bytes, num_frames: int = -1, fps: float = -1.0, **kwargs
) -> npt.NDArray:
assert num_frames == 10, "bad num_frames"
assert fps == 1.0, "bad fps"
return FAKE_OUTPUT_2
def test_video_media_io_kwargs(monkeypatch: pytest.MonkeyPatch):
with monkeypatch.context() as m:
m.setenv("VLLM_VIDEO_LOADER_BACKEND", "assert_10_frames_1_fps")
imageio = ImageMediaIO()
# Verify that different args pass/fail assertions as expected.
videoio = VideoMediaIO(imageio, **{"num_frames": 10, "fps": 1.0})
_ = videoio.load_bytes(b"test")
videoio = VideoMediaIO(
imageio, **{"num_frames": 10, "fps": 1.0, "not_used": "not_used"}
)
_ = videoio.load_bytes(b"test")
with pytest.raises(AssertionError, match="bad num_frames"):
videoio = VideoMediaIO(imageio, **{})
_ = videoio.load_bytes(b"test")
with pytest.raises(AssertionError, match="bad num_frames"):
videoio = VideoMediaIO(imageio, **{"num_frames": 9, "fps": 1.0})
_ = videoio.load_bytes(b"test")
with pytest.raises(AssertionError, match="bad fps"):
videoio = VideoMediaIO(imageio, **{"num_frames": 10, "fps": 2.0})
_ = videoio.load_bytes(b"test")
@pytest.mark.parametrize("is_color", [True, False])
@pytest.mark.parametrize("fourcc, ext", [("mp4v", "mp4"), ("XVID", "avi")])
def test_opencv_video_io_colorspace(tmp_path, is_color: bool, fourcc: str, ext: str):
"""
Test all functions that use OpenCV for video I/O return RGB format.
Both RGB and grayscale videos are tested.
"""
image_path = get_vllm_public_assets(
filename="stop_sign.jpg", s3_prefix="vision_model_images"
)
image = Image.open(image_path)
if not is_color:
image_path = f"{tmp_path}/test_grayscale_image.png"
image = image.convert("L")
image.save(image_path)
# Convert to gray RGB for comparison
image = image.convert("RGB")
video_path = f"{tmp_path}/test_RGB_video.{ext}"
create_video_from_image(
image_path,
video_path,
num_frames=2,
is_color=is_color,
fourcc=fourcc,
)
frames = video_to_ndarrays(video_path)
for frame in frames:
sim = cosine_similarity(
normalize_image(np.array(frame)), normalize_image(np.array(image))
)
assert np.sum(np.isnan(sim)) / sim.size < 0.001
assert np.nanmean(sim) > 0.99
pil_frames = video_to_pil_images_list(video_path)
for frame in pil_frames:
sim = cosine_similarity(
normalize_image(np.array(frame)), normalize_image(np.array(image))
)
assert np.sum(np.isnan(sim)) / sim.size < 0.001
assert np.nanmean(sim) > 0.99
io_frames, _ = VideoMediaIO(ImageMediaIO()).load_file(Path(video_path))
for frame in io_frames:
sim = cosine_similarity(
normalize_image(np.array(frame)), normalize_image(np.array(image))
)
assert np.sum(np.isnan(sim)) / sim.size < 0.001
assert np.nanmean(sim) > 0.99
NUM_FRAMES = 10
FAKE_OUTPUT_1 = np.random.rand(NUM_FRAMES, 1280, 720, 3)
FAKE_OUTPUT_2 = np.random.rand(NUM_FRAMES, 1280, 720, 3)
@VIDEO_LOADER_REGISTRY.register("test_video_backend_override_1")
class TestVideoBackendOverride1(VideoLoader):
"""Test loader that returns FAKE_OUTPUT_1 to verify backend selection."""
@classmethod
def load_bytes(
cls, data: bytes, num_frames: int = -1, **kwargs
) -> tuple[npt.NDArray, dict]:
return FAKE_OUTPUT_1, {"video_backend": "test_video_backend_override_1"}
@VIDEO_LOADER_REGISTRY.register("test_video_backend_override_2")
class TestVideoBackendOverride2(VideoLoader):
"""Test loader that returns FAKE_OUTPUT_2 to verify backend selection."""
@classmethod
def load_bytes(
cls, data: bytes, num_frames: int = -1, **kwargs
) -> tuple[npt.NDArray, dict]:
return FAKE_OUTPUT_2, {"video_backend": "test_video_backend_override_2"}
def test_video_media_io_backend_kwarg_override(monkeypatch: pytest.MonkeyPatch):
"""
Test that video_backend kwarg can override the VLLM_VIDEO_LOADER_BACKEND
environment variable.
This allows users to dynamically select a different video backend
via --media-io-kwargs without changing the global env var, which is
useful when plugins set a default backend but a specific request
needs a different one.
"""
with monkeypatch.context() as m:
# Set the env var to one backend
m.setenv("VLLM_VIDEO_LOADER_BACKEND", "test_video_backend_override_1")
imageio = ImageMediaIO()
# Without video_backend kwarg, should use env var backend
videoio_default = VideoMediaIO(imageio, num_frames=10)
frames_default, metadata_default = videoio_default.load_bytes(b"test")
np.testing.assert_array_equal(frames_default, FAKE_OUTPUT_1)
assert metadata_default["video_backend"] == "test_video_backend_override_1"
# With video_backend kwarg, should override env var
videoio_override = VideoMediaIO(
imageio, num_frames=10, video_backend="test_video_backend_override_2"
)
frames_override, metadata_override = videoio_override.load_bytes(b"test")
np.testing.assert_array_equal(frames_override, FAKE_OUTPUT_2)
assert metadata_override["video_backend"] == "test_video_backend_override_2"
def test_video_media_io_backend_kwarg_not_passed_to_loader(
monkeypatch: pytest.MonkeyPatch,
):
"""
Test that video_backend kwarg is consumed by VideoMediaIO and NOT passed
through to the underlying video loader's load_bytes method.
This ensures the kwarg is properly popped from kwargs before forwarding.
"""
@VIDEO_LOADER_REGISTRY.register("test_reject_video_backend_kwarg")
class RejectVideoBackendKwargLoader(VideoLoader):
"""Test loader that fails if video_backend is passed through."""
@classmethod
def load_bytes(
cls, data: bytes, num_frames: int = -1, **kwargs
) -> tuple[npt.NDArray, dict]:
# This should never receive video_backend in kwargs
if "video_backend" in kwargs:
raise AssertionError(
"video_backend should be consumed by VideoMediaIO, "
"not passed to loader"
)
return FAKE_OUTPUT_1, {"received_kwargs": list(kwargs.keys())}
with monkeypatch.context() as m:
m.setenv("VLLM_VIDEO_LOADER_BACKEND", "test_reject_video_backend_kwarg")
imageio = ImageMediaIO()
# Even when video_backend is provided, it should NOT be passed to loader
videoio = VideoMediaIO(
imageio,
num_frames=10,
video_backend="test_reject_video_backend_kwarg",
other_kwarg="should_pass_through",
)
# This should NOT raise AssertionError
frames, metadata = videoio.load_bytes(b"test")
np.testing.assert_array_equal(frames, FAKE_OUTPUT_1)
# Verify other kwargs are still passed through
assert "other_kwarg" in metadata["received_kwargs"]
def test_video_media_io_backend_env_var_fallback(monkeypatch: pytest.MonkeyPatch):
"""
Test that when video_backend kwarg is None or not provided,
VideoMediaIO falls back to VLLM_VIDEO_LOADER_BACKEND env var.
"""
with monkeypatch.context() as m:
m.setenv("VLLM_VIDEO_LOADER_BACKEND", "test_video_backend_override_2")
imageio = ImageMediaIO()
# Explicit None should fall back to env var
videoio_none = VideoMediaIO(imageio, num_frames=10, video_backend=None)
frames_none, metadata_none = videoio_none.load_bytes(b"test")
np.testing.assert_array_equal(frames_none, FAKE_OUTPUT_2)
assert metadata_none["video_backend"] == "test_video_backend_override_2"
# Not providing video_backend should also fall back to env var
videoio_missing = VideoMediaIO(imageio, num_frames=10)
frames_missing, metadata_missing = videoio_missing.load_bytes(b"test")
np.testing.assert_array_equal(frames_missing, FAKE_OUTPUT_2)
assert metadata_missing["video_backend"] == "test_video_backend_override_2"
| {
"repo_id": "vllm-project/vllm",
"file_path": "tests/multimodal/media/test_video.py",
"license": "Apache License 2.0",
"lines": 189,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
vllm-project/vllm:vllm/multimodal/media/audio.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import base64
from io import BytesIO
from pathlib import Path
import numpy.typing as npt
import pybase64
import torch
from vllm.utils.import_utils import PlaceholderModule
from vllm.utils.serial_utils import tensor2base64
from .base import MediaIO
try:
import librosa
except ImportError:
librosa = PlaceholderModule("librosa") # type: ignore[assignment]
try:
import soundfile
except ImportError:
soundfile = PlaceholderModule("soundfile") # type: ignore[assignment]
class AudioMediaIO(MediaIO[tuple[npt.NDArray, float]]):
def __init__(self, **kwargs) -> None:
super().__init__()
# `kwargs` contains custom arguments from
# --media-io-kwargs for this modality.
# They can be passed to the underlying
# media loaders (e.g. custom implementations)
# for flexible control.
self.kwargs = kwargs
def load_bytes(self, data: bytes) -> tuple[npt.NDArray, float]:
return librosa.load(BytesIO(data), sr=None)
def load_base64(
self,
media_type: str,
data: str,
) -> tuple[npt.NDArray, float]:
return self.load_bytes(base64.b64decode(data))
def load_file(self, filepath: Path) -> tuple[npt.NDArray, float]:
return librosa.load(filepath, sr=None)
def encode_base64(
self,
media: tuple[npt.NDArray, int],
*,
audio_format: str = "WAV",
) -> str:
audio, sr = media
with BytesIO() as buffer:
soundfile.write(buffer, audio, sr, format=audio_format)
data = buffer.getvalue()
return base64.b64encode(data).decode("utf-8")
class AudioEmbeddingMediaIO(MediaIO[torch.Tensor]):
def __init__(self) -> None:
super().__init__()
def load_bytes(self, data: bytes) -> torch.Tensor:
buffer = BytesIO(data)
# Enable sparse tensor integrity checks to prevent out-of-bounds
# writes from maliciously crafted tensors
with torch.sparse.check_sparse_tensor_invariants():
tensor = torch.load(buffer, weights_only=True)
return tensor.to_dense()
def load_base64(self, media_type: str, data: str) -> torch.Tensor:
return self.load_bytes(pybase64.b64decode(data, validate=True))
def load_file(self, filepath: Path) -> torch.Tensor:
# Enable sparse tensor integrity checks to prevent out-of-bounds
# writes from maliciously crafted tensors
with torch.sparse.check_sparse_tensor_invariants():
tensor = torch.load(filepath, weights_only=True)
return tensor.to_dense()
def encode_base64(self, media: torch.Tensor) -> str:
return tensor2base64(media)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/multimodal/media/audio.py",
"license": "Apache License 2.0",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/multimodal/media/image.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from io import BytesIO
from pathlib import Path
import pybase64
import torch
from PIL import Image
from vllm.utils.serial_utils import tensor2base64
from ..image import convert_image_mode, rgba_to_rgb
from .base import MediaIO, MediaWithBytes
class ImageMediaIO(MediaIO[Image.Image]):
def __init__(self, image_mode: str = "RGB", **kwargs) -> None:
super().__init__()
self.image_mode = image_mode
# `kwargs` contains custom arguments from
# --media-io-kwargs for this modality.
# They can be passed to the underlying
# media loaders (e.g. custom implementations)
# for flexible control.
self.kwargs = kwargs
# Extract RGBA background color from kwargs if provided
# Default to white background for backward compatibility
rgba_bg = kwargs.get("rgba_background_color", (255, 255, 255))
# Convert list to tuple for consistency
if isinstance(rgba_bg, list):
rgba_bg = tuple(rgba_bg)
# Validate rgba_background_color format
if not (
isinstance(rgba_bg, tuple)
and len(rgba_bg) == 3
and all(isinstance(c, int) and 0 <= c <= 255 for c in rgba_bg)
):
raise ValueError(
"rgba_background_color must be a list or tuple of 3 integers "
"in the range [0, 255]."
)
self.rgba_background_color = rgba_bg
def _convert_image_mode(
self, image: Image.Image | MediaWithBytes[Image.Image]
) -> Image.Image:
"""Convert image mode with custom background color."""
if isinstance(image, MediaWithBytes):
image = image.media
if image.mode == self.image_mode:
return image
elif image.mode == "RGBA" and self.image_mode == "RGB":
return rgba_to_rgb(image, self.rgba_background_color)
else:
return convert_image_mode(image, self.image_mode)
def load_bytes(self, data: bytes) -> MediaWithBytes[Image.Image]:
image = Image.open(BytesIO(data))
return MediaWithBytes(self._convert_image_mode(image), data)
def load_base64(self, media_type: str, data: str) -> MediaWithBytes[Image.Image]:
return self.load_bytes(pybase64.b64decode(data, validate=True))
def load_file(self, filepath: Path) -> MediaWithBytes[Image.Image]:
with open(filepath, "rb") as f:
data = f.read()
image = Image.open(BytesIO(data))
return MediaWithBytes(self._convert_image_mode(image), data)
def encode_base64(
self,
media: Image.Image,
*,
image_format: str = "PNG",
) -> str:
image = media
with BytesIO() as buffer:
image = self._convert_image_mode(image)
image.save(buffer, image_format)
data = buffer.getvalue()
return pybase64.b64encode(data).decode("utf-8")
class ImageEmbeddingMediaIO(MediaIO[torch.Tensor]):
def __init__(self) -> None:
super().__init__()
def load_bytes(self, data: bytes) -> torch.Tensor:
buffer = BytesIO(data)
# Enable sparse tensor integrity checks to prevent out-of-bounds
# writes from maliciously crafted tensors
with torch.sparse.check_sparse_tensor_invariants():
tensor = torch.load(buffer, weights_only=True)
return tensor.to_dense()
def load_base64(self, media_type: str, data: str) -> torch.Tensor:
return self.load_bytes(pybase64.b64decode(data, validate=True))
def load_file(self, filepath: Path) -> torch.Tensor:
# Enable sparse tensor integrity checks to prevent out-of-bounds
# writes from maliciously crafted tensors
with torch.sparse.check_sparse_tensor_invariants():
tensor = torch.load(filepath, weights_only=True)
return tensor.to_dense()
def encode_base64(self, media: torch.Tensor) -> str:
return tensor2base64(media)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/multimodal/media/image.py",
"license": "Apache License 2.0",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/multimodal/media/video.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import base64
from functools import partial
from pathlib import Path
from typing import Any
import numpy as np
import numpy.typing as npt
from PIL import Image
from vllm import envs
from ..video import VIDEO_LOADER_REGISTRY
from .base import MediaIO
from .image import ImageMediaIO
class VideoMediaIO(MediaIO[tuple[npt.NDArray, dict[str, Any]]]):
def __init__(
self,
image_io: ImageMediaIO,
num_frames: int = 32,
**kwargs,
) -> None:
super().__init__()
self.image_io = image_io
self.num_frames = num_frames
# `kwargs` contains custom arguments from
# --media-io-kwargs for this modality.
# They can be passed to the underlying
# media loaders (e.g. custom implementations)
# for flexible control.
# Allow per-request override of video backend via kwargs.
# This enables users to specify a different backend than the
# global VLLM_VIDEO_LOADER_BACKEND env var, e.g.:
# --media-io-kwargs '{"video": {"video_backend": "torchcodec"}}'
video_loader_backend = (
kwargs.pop("video_backend", None) or envs.VLLM_VIDEO_LOADER_BACKEND
)
self.kwargs = kwargs
self.video_loader = VIDEO_LOADER_REGISTRY.load(video_loader_backend)
def load_bytes(self, data: bytes) -> tuple[npt.NDArray, dict[str, Any]]:
return self.video_loader.load_bytes(
data, num_frames=self.num_frames, **self.kwargs
)
def load_base64(
self, media_type: str, data: str
) -> tuple[npt.NDArray, dict[str, Any]]:
if media_type.lower() == "video/jpeg":
load_frame = partial(
self.image_io.load_base64,
"image/jpeg",
)
return np.stack(
[np.asarray(load_frame(frame_data)) for frame_data in data.split(",")]
), {}
return self.load_bytes(base64.b64decode(data))
def load_file(self, filepath: Path) -> tuple[npt.NDArray, dict[str, Any]]:
with filepath.open("rb") as f:
data = f.read()
return self.load_bytes(data)
def encode_base64(
self,
media: npt.NDArray,
*,
video_format: str = "JPEG",
) -> str:
video = media
if video_format == "JPEG":
encode_frame = partial(
self.image_io.encode_base64,
image_format=video_format,
)
return ",".join(encode_frame(Image.fromarray(frame)) for frame in video)
msg = "Only JPEG format is supported for now."
raise NotImplementedError(msg)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/multimodal/media/video.py",
"license": "Apache License 2.0",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/benchmarks/sweep/startup.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import argparse
import json
import shlex
import subprocess
from contextlib import contextmanager
from dataclasses import dataclass
from datetime import datetime
from functools import lru_cache
from pathlib import Path
from typing import ClassVar
from vllm.benchmarks.startup import add_cli_args as add_startup_cli_args
from vllm.utils.argparse_utils import FlexibleArgumentParser
from vllm.utils.import_utils import PlaceholderModule
from .param_sweep import ParameterSweep, ParameterSweepItem
from .utils import sanitize_filename
try:
import pandas as pd
except ImportError:
pd = PlaceholderModule("pandas")
@lru_cache(maxsize=1)
def _get_supported_startup_keys() -> set[str]:
parser = FlexibleArgumentParser(add_help=False)
add_startup_cli_args(parser)
supported: set[str] = {"config"}
for action in parser._actions:
if action.dest and action.dest is not argparse.SUPPRESS:
supported.add(action.dest)
for option in action.option_strings:
if option.startswith("--"):
supported.add(option.lstrip("-").replace("-", "_"))
return supported
def _is_supported_param(param_key: str, supported: set[str]) -> bool:
if param_key == "_benchmark_name":
return True
prefix = param_key.split(".", 1)[0]
normalized = prefix.replace("-", "_")
return normalized in supported
def _filter_params(
params: ParameterSweep, *, supported: set[str], strict: bool
) -> ParameterSweep:
filtered = []
for item in params:
kept: dict[str, object] = {}
dropped: list[str] = []
for key, value in item.items():
if _is_supported_param(key, supported):
kept[key] = value
else:
dropped.append(key)
if dropped:
label = item.get("_benchmark_name") or item.as_text()
message = (
"Ignoring unsupported startup params"
f"{' for ' + str(label) if label else ''}: "
f"{', '.join(sorted(dropped))}"
)
if strict:
raise ValueError(message)
print(message)
filtered.append(ParameterSweepItem.from_record(kept))
return ParameterSweep(filtered)
def _update_run_data(
run_data: dict[str, object],
serve_overrides: ParameterSweepItem,
startup_overrides: ParameterSweepItem,
run_number: int,
) -> dict[str, object]:
run_data["run_number"] = run_number
run_data.update(serve_overrides)
run_data.update(startup_overrides)
return run_data
def _strip_arg(cmd: list[str], keys: tuple[str, ...]) -> list[str]:
stripped: list[str] = []
skip_next = False
for arg in cmd:
if skip_next:
skip_next = False
continue
if arg in keys:
skip_next = True
continue
if any(arg.startswith(f"{key}=") for key in keys):
continue
stripped.append(arg)
return stripped
def _apply_output_json(cmd: list[str], output_path: Path) -> list[str]:
keys = ("--output-json", "--output_json")
cmd = _strip_arg(cmd, keys)
return [*cmd, keys[0], str(output_path)]
def _get_comb_base_path(
experiment_dir: Path,
serve_comb: ParameterSweepItem,
startup_comb: ParameterSweepItem,
) -> Path:
parts = list[str]()
if serve_comb:
parts.extend(("SERVE-", serve_comb.name))
if startup_comb:
parts.extend(("STARTUP-", startup_comb.name))
return experiment_dir / sanitize_filename("-".join(parts))
def _get_comb_run_path(base_path: Path, run_number: int | None) -> Path:
if run_number is None:
return base_path / "summary.json"
return base_path / f"run={run_number}.json"
def run_benchmark(
startup_cmd: list[str],
*,
serve_overrides: ParameterSweepItem,
startup_overrides: ParameterSweepItem,
run_number: int,
output_path: Path,
show_stdout: bool,
dry_run: bool,
) -> dict[str, object] | None:
cmd = serve_overrides.apply_to_cmd(startup_cmd)
cmd = startup_overrides.apply_to_cmd(cmd)
cmd = _apply_output_json(cmd, output_path)
print("[BEGIN BENCHMARK]")
print(f"Serve overrides: {serve_overrides}")
print(f"Startup overrides: {startup_overrides}")
print(f"Run Number: {run_number}")
print(f"Benchmark command: {cmd}")
print(f"Output file: {output_path}")
if output_path.exists():
print("Found existing results.")
print("[SKIPPED BENCHMARK]")
with output_path.open("r", encoding="utf-8") as f:
run_data = json.load(f)
return _update_run_data(
run_data, serve_overrides, startup_overrides, run_number
)
if dry_run:
print("[END BENCHMARK]")
return None
output_path.parent.mkdir(parents=True, exist_ok=True)
subprocess.run(
cmd,
stdout=None if show_stdout else subprocess.DEVNULL,
check=True,
)
with output_path.open("r", encoding="utf-8") as f:
run_data = json.load(f)
run_data = _update_run_data(
run_data, serve_overrides, startup_overrides, run_number
)
with output_path.open("w", encoding="utf-8") as f:
json.dump(run_data, f, indent=4)
print("[END BENCHMARK]")
return run_data
def run_comb(
startup_cmd: list[str],
*,
serve_comb: ParameterSweepItem,
startup_comb: ParameterSweepItem,
base_path: Path,
num_runs: int,
show_stdout: bool,
dry_run: bool,
) -> list[dict[str, object]] | None:
comb_data = list[dict[str, object]]()
for run_number in range(num_runs):
run_data = run_benchmark(
startup_cmd,
serve_overrides=serve_comb,
startup_overrides=startup_comb,
run_number=run_number,
output_path=_get_comb_run_path(base_path, run_number),
show_stdout=show_stdout,
dry_run=dry_run,
)
if run_data is not None:
comb_data.append(run_data)
if dry_run:
return None
with _get_comb_run_path(base_path, run_number=None).open(
"w", encoding="utf-8"
) as f:
json.dump(comb_data, f, indent=4)
return comb_data
def run_combs(
startup_cmd: list[str],
*,
serve_params: ParameterSweep,
startup_params: ParameterSweep,
experiment_dir: Path,
num_runs: int,
show_stdout: bool,
dry_run: bool,
) -> "pd.DataFrame | None":
all_data = list[dict[str, object]]()
for serve_comb in serve_params:
for startup_comb in startup_params:
base_path = _get_comb_base_path(experiment_dir, serve_comb, startup_comb)
comb_data = run_comb(
startup_cmd,
serve_comb=serve_comb,
startup_comb=startup_comb,
base_path=base_path,
num_runs=num_runs,
show_stdout=show_stdout,
dry_run=dry_run,
)
if comb_data is not None:
all_data.extend(comb_data)
if dry_run:
return None
combined_df = pd.DataFrame.from_records(all_data)
combined_df.to_csv(experiment_dir / "summary.csv")
return combined_df
@dataclass
class SweepStartupArgs:
startup_cmd: list[str]
serve_params: ParameterSweep
startup_params: ParameterSweep
output_dir: Path
experiment_name: str
num_runs: int
show_stdout: bool
dry_run: bool
resume: bool
parser_name: ClassVar[str] = "startup"
parser_help: ClassVar[str] = (
"Benchmark vLLM startup time over parameter combinations."
)
@classmethod
def from_cli_args(cls, args: argparse.Namespace):
startup_cmd = shlex.split(args.startup_cmd)
if args.serve_params:
serve_params = ParameterSweep.read_json(args.serve_params)
else:
serve_params = ParameterSweep.from_records([{}])
if args.startup_params:
startup_params = ParameterSweep.read_json(args.startup_params)
else:
startup_params = ParameterSweep.from_records([{}])
supported = _get_supported_startup_keys()
strict_params = args.strict_params
serve_params = _filter_params(
serve_params, supported=supported, strict=strict_params
)
startup_params = _filter_params(
startup_params, supported=supported, strict=strict_params
)
if args.experiment_name:
experiment_name = args.experiment_name
else:
experiment_name = datetime.now().strftime("%Y%m%d_%H%M%S")
if args.num_runs < 1:
raise ValueError("`num_runs` should be at least 1.")
return cls(
startup_cmd=startup_cmd,
serve_params=serve_params,
startup_params=startup_params,
output_dir=Path(args.output_dir),
experiment_name=experiment_name,
num_runs=args.num_runs,
show_stdout=args.show_stdout,
dry_run=args.dry_run,
resume=args.resume,
)
@classmethod
def add_cli_args(cls, parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parser.add_argument(
"--startup-cmd",
type=str,
default="vllm bench startup",
help="The command used to run the startup benchmark.",
)
parser.add_argument(
"--serve-params",
type=str,
default=None,
help="Path to JSON file containing parameter combinations "
"for the `vllm serve` command. Only parameters supported by "
"`vllm bench startup` will be applied.",
)
parser.add_argument(
"--startup-params",
type=str,
default=None,
help="Path to JSON file containing parameter combinations "
"for the `vllm bench startup` command.",
)
parser.add_argument(
"--strict-params",
action="store_true",
help="If set, unknown parameters in sweep files raise an error "
"instead of being ignored.",
)
parser.add_argument(
"-o",
"--output-dir",
type=str,
default="results",
help="The main directory to which results are written.",
)
parser.add_argument(
"-e",
"--experiment-name",
type=str,
default=None,
help="The name of this experiment (defaults to current timestamp). "
"Results will be stored under `output_dir/experiment_name`.",
)
parser.add_argument(
"--num-runs",
type=int,
default=1,
help="Number of runs per parameter combination.",
)
parser.add_argument(
"--show-stdout",
action="store_true",
help="If set, logs the standard output of subcommands.",
)
parser.add_argument(
"--dry-run",
action="store_true",
help="If set, prints the commands to run, "
"then exits without executing them.",
)
parser.add_argument(
"--resume",
action="store_true",
help="Resume a previous execution of this script, i.e., only run "
"parameter combinations for which there are still no output files "
"under `output_dir/experiment_name`.",
)
return parser
def resolve_experiment_dir(self) -> Path:
experiment_dir = self.output_dir / self.experiment_name
if self.resume:
if not experiment_dir.exists():
raise ValueError(f"Cannot resume from non-existent {experiment_dir=}")
else:
if experiment_dir.exists():
raise ValueError(f"Cannot overwrite existing {experiment_dir=}")
return experiment_dir
@contextmanager
def run_ctx(self, experiment_dir: Path):
if self.dry_run:
yield
print(f"Experiment will be saved at: {experiment_dir}")
return
try:
yield
print(f"Experiment has been saved at: {experiment_dir}")
except BaseException as exc:
raise RuntimeError(
"The script was terminated early. Use `--resume` "
"to continue the script from its last checkpoint."
) from exc
def run_main(args: SweepStartupArgs):
experiment_dir = args.resolve_experiment_dir()
with args.run_ctx(experiment_dir):
return run_combs(
startup_cmd=args.startup_cmd,
serve_params=args.serve_params,
startup_params=args.startup_params,
experiment_dir=experiment_dir,
num_runs=args.num_runs,
show_stdout=args.show_stdout,
dry_run=args.dry_run,
)
def main(args: argparse.Namespace):
run_main(SweepStartupArgs.from_cli_args(args))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=SweepStartupArgs.parser_help)
SweepStartupArgs.add_cli_args(parser)
main(parser.parse_args())
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/benchmarks/sweep/startup.py",
"license": "Apache License 2.0",
"lines": 372,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/entrypoints/anthropic/api_router.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from http import HTTPStatus
from fastapi import APIRouter, Depends, FastAPI, Request
from fastapi.responses import JSONResponse, StreamingResponse
from vllm.entrypoints.anthropic.protocol import (
AnthropicCountTokensRequest,
AnthropicCountTokensResponse,
AnthropicError,
AnthropicErrorResponse,
AnthropicMessagesRequest,
AnthropicMessagesResponse,
)
from vllm.entrypoints.anthropic.serving import AnthropicServingMessages
from vllm.entrypoints.openai.engine.protocol import ErrorResponse
from vllm.entrypoints.openai.utils import validate_json_request
from vllm.entrypoints.utils import (
load_aware_call,
with_cancellation,
)
from vllm.logger import init_logger
logger = init_logger(__name__)
router = APIRouter()
def messages(request: Request) -> AnthropicServingMessages:
return request.app.state.anthropic_serving_messages
def translate_error_response(response: ErrorResponse) -> JSONResponse:
anthropic_error = AnthropicErrorResponse(
error=AnthropicError(
type=response.error.type,
message=response.error.message,
)
)
return JSONResponse(
status_code=response.error.code, content=anthropic_error.model_dump()
)
@router.post(
"/v1/messages",
dependencies=[Depends(validate_json_request)],
responses={
HTTPStatus.OK.value: {"content": {"text/event-stream": {}}},
HTTPStatus.BAD_REQUEST.value: {"model": AnthropicErrorResponse},
HTTPStatus.NOT_FOUND.value: {"model": AnthropicErrorResponse},
HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": AnthropicErrorResponse},
},
)
@with_cancellation
@load_aware_call
async def create_messages(request: AnthropicMessagesRequest, raw_request: Request):
handler = messages(raw_request)
if handler is None:
base_server = raw_request.app.state.openai_serving_tokenization
error = base_server.create_error_response(
message="The model does not support Messages API"
)
return translate_error_response(error)
try:
generator = await handler.create_messages(request, raw_request)
except Exception as e:
logger.exception("Error in create_messages: %s", e)
return JSONResponse(
status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value,
content=AnthropicErrorResponse(
error=AnthropicError(
type="internal_error",
message=str(e),
)
).model_dump(),
)
if isinstance(generator, ErrorResponse):
return translate_error_response(generator)
elif isinstance(generator, AnthropicMessagesResponse):
resp = generator.model_dump(exclude_none=True)
logger.debug("Anthropic Messages Response: %s", resp)
return JSONResponse(content=resp)
return StreamingResponse(content=generator, media_type="text/event-stream")
@router.post(
"/v1/messages/count_tokens",
dependencies=[Depends(validate_json_request)],
responses={
HTTPStatus.OK.value: {"model": AnthropicCountTokensResponse},
HTTPStatus.BAD_REQUEST.value: {"model": AnthropicErrorResponse},
HTTPStatus.NOT_FOUND.value: {"model": AnthropicErrorResponse},
HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": AnthropicErrorResponse},
},
)
@load_aware_call
@with_cancellation
async def count_tokens(request: AnthropicCountTokensRequest, raw_request: Request):
handler = messages(raw_request)
if handler is None:
base_server = raw_request.app.state.openai_serving_tokenization
error = base_server.create_error_response(
message="The model does not support Messages API"
)
return translate_error_response(error)
try:
response = await handler.count_tokens(request, raw_request)
except Exception as e:
logger.exception("Error in count_tokens: %s", e)
return JSONResponse(
status_code=HTTPStatus.INTERNAL_SERVER_ERROR.value,
content=AnthropicErrorResponse(
error=AnthropicError(
type="internal_error",
message=str(e),
)
).model_dump(),
)
if isinstance(response, ErrorResponse):
return translate_error_response(response)
return JSONResponse(content=response.model_dump(exclude_none=True))
def attach_router(app: FastAPI):
app.include_router(router)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/entrypoints/anthropic/api_router.py",
"license": "Apache License 2.0",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/entrypoints/openai/completion/api_router.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from http import HTTPStatus
from fastapi import APIRouter, Depends, FastAPI, Request
from fastapi.responses import JSONResponse, StreamingResponse
from vllm.entrypoints.openai.completion.protocol import (
CompletionRequest,
CompletionResponse,
)
from vllm.entrypoints.openai.completion.serving import OpenAIServingCompletion
from vllm.entrypoints.openai.engine.protocol import ErrorResponse
from vllm.entrypoints.openai.orca_metrics import metrics_header
from vllm.entrypoints.openai.utils import validate_json_request
from vllm.entrypoints.utils import (
load_aware_call,
with_cancellation,
)
from vllm.logger import init_logger
logger = init_logger(__name__)
router = APIRouter()
ENDPOINT_LOAD_METRICS_FORMAT_HEADER_LABEL = "endpoint-load-metrics-format"
def completion(request: Request) -> OpenAIServingCompletion | None:
return request.app.state.openai_serving_completion
@router.post(
"/v1/completions",
dependencies=[Depends(validate_json_request)],
responses={
HTTPStatus.OK.value: {"content": {"text/event-stream": {}}},
HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse},
HTTPStatus.NOT_FOUND.value: {"model": ErrorResponse},
HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse},
},
)
@with_cancellation
@load_aware_call
async def create_completion(request: CompletionRequest, raw_request: Request):
metrics_header_format = raw_request.headers.get(
ENDPOINT_LOAD_METRICS_FORMAT_HEADER_LABEL, ""
)
handler = completion(raw_request)
if handler is None:
base_server = raw_request.app.state.openai_serving_tokenization
return base_server.create_error_response(
message="The model does not support Completions API"
)
try:
generator = await handler.create_completion(request, raw_request)
except Exception as e:
generator = handler.create_error_response(e)
if isinstance(generator, ErrorResponse):
return JSONResponse(
content=generator.model_dump(), status_code=generator.error.code
)
elif isinstance(generator, CompletionResponse):
return JSONResponse(
content=generator.model_dump(),
headers=metrics_header(metrics_header_format),
)
return StreamingResponse(content=generator, media_type="text/event-stream")
@router.post(
"/v1/completions/render",
dependencies=[Depends(validate_json_request)],
response_model=list,
responses={
HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse},
HTTPStatus.NOT_FOUND.value: {"model": ErrorResponse},
HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse},
},
)
async def render_completion(request: CompletionRequest, raw_request: Request):
"""render completion request and return engine prompts without generating."""
handler = completion(raw_request)
if handler is None:
base_server = raw_request.app.state.openai_serving_tokenization
return base_server.create_error_response(
message="The model does not support Completions API"
)
try:
result = await handler.render_completion_request(request)
except Exception as e:
result = handler.create_error_response(e)
if isinstance(result, ErrorResponse):
return JSONResponse(content=result.model_dump(), status_code=result.error.code)
return JSONResponse(content=result)
def attach_router(app: FastAPI):
app.include_router(router)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/entrypoints/openai/completion/api_router.py",
"license": "Apache License 2.0",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/entrypoints/openai/completion/protocol.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# Adapted from
# https://github.com/lm-sys/FastChat/blob/168ccc29d3f7edc50823016105c024fe2282732a/fastchat/protocol/openai_api_protocol.py
import json
import time
from typing import Annotated, Any, Literal
import torch
from pydantic import Field, model_validator
from vllm.config import ModelConfig
from vllm.config.utils import replace
from vllm.entrypoints.openai.engine.protocol import (
AnyResponseFormat,
LegacyStructuralTagResponseFormat,
OpenAIBaseModel,
StreamOptions,
StructuralTagResponseFormat,
UsageInfo,
)
from vllm.exceptions import VLLMValidationError
from vllm.logger import init_logger
from vllm.logprobs import Logprob
from vllm.renderers import TokenizeParams
from vllm.sampling_params import (
BeamSearchParams,
RequestOutputKind,
SamplingParams,
StructuredOutputsParams,
)
from vllm.utils import random_uuid
logger = init_logger(__name__)
_LONG_INFO = torch.iinfo(torch.long)
class CompletionRequest(OpenAIBaseModel):
# Ordered by official OpenAI API documentation
# https://platform.openai.com/docs/api-reference/completions/create
model: str | None = None
prompt: (
list[Annotated[int, Field(ge=0)]]
| list[list[Annotated[int, Field(ge=0)]]]
| str
| list[str]
| None
) = None
echo: bool | None = False
frequency_penalty: float | None = 0.0
logit_bias: dict[str, float] | None = None
logprobs: int | None = None
max_tokens: int | None = 16
n: int = 1
presence_penalty: float | None = 0.0
seed: int | None = Field(None, ge=_LONG_INFO.min, le=_LONG_INFO.max)
stop: str | list[str] | None = []
stream: bool | None = False
stream_options: StreamOptions | None = None
suffix: str | None = None
temperature: float | None = None
top_p: float | None = None
user: str | None = None
# --8<-- [start:completion-sampling-params]
use_beam_search: bool = False
top_k: int | None = None
min_p: float | None = None
repetition_penalty: float | None = None
length_penalty: float = 1.0
stop_token_ids: list[int] | None = []
include_stop_str_in_output: bool = False
ignore_eos: bool = False
min_tokens: int = 0
skip_special_tokens: bool = True
spaces_between_special_tokens: bool = True
truncate_prompt_tokens: Annotated[int, Field(ge=-1, le=_LONG_INFO.max)] | None = (
None
)
allowed_token_ids: list[int] | None = None
prompt_logprobs: int | None = None
# --8<-- [end:completion-sampling-params]
# --8<-- [start:completion-extra-params]
prompt_embeds: bytes | list[bytes] | None = None
add_special_tokens: bool = Field(
default=True,
description=(
"If true (the default), special tokens (e.g. BOS) will be added to "
"the prompt."
),
)
response_format: AnyResponseFormat | None = Field(
default=None,
description=(
"Similar to chat completion, this parameter specifies the format "
"of output. Only {'type': 'json_object'}, {'type': 'json_schema'}"
", {'type': 'structural_tag'}, or {'type': 'text' } is supported."
),
)
structured_outputs: StructuredOutputsParams | None = Field(
default=None,
description="Additional kwargs for structured outputs",
)
priority: int = Field(
default=0,
description=(
"The priority of the request (lower means earlier handling; "
"default: 0). Any priority other than 0 will raise an error "
"if the served model does not use priority scheduling."
),
)
request_id: str = Field(
default_factory=random_uuid,
description=(
"The request_id related to this request. If the caller does "
"not set it, a random_uuid will be generated. This id is used "
"through out the inference process and return in response."
),
)
return_tokens_as_token_ids: bool | None = Field(
default=None,
description=(
"If specified with 'logprobs', tokens are represented "
" as strings of the form 'token_id:{token_id}' so that tokens "
"that are not JSON-encodable can be identified."
),
)
return_token_ids: bool | None = Field(
default=None,
description=(
"If specified, the result will include token IDs alongside the "
"generated text. In streaming mode, prompt_token_ids is included "
"only in the first chunk, and token_ids contains the delta tokens "
"for each chunk. This is useful for debugging or when you "
"need to map generated text back to input tokens."
),
)
cache_salt: str | None = Field(
default=None,
description=(
"If specified, the prefix cache will be salted with the provided "
"string to prevent an attacker to guess prompts in multi-user "
"environments. The salt should be random, protected from "
"access by 3rd parties, and long enough to be "
"unpredictable (e.g., 43 characters base64-encoded, corresponding "
"to 256 bit)."
),
)
kv_transfer_params: dict[str, Any] | None = Field(
default=None,
description="KVTransfer parameters used for disaggregated serving.",
)
vllm_xargs: dict[str, str | int | float] | None = Field(
default=None,
description=(
"Additional request parameters with string or "
"numeric values, used by custom extensions."
),
)
# --8<-- [end:completion-extra-params]
def build_tok_params(self, model_config: ModelConfig) -> TokenizeParams:
return TokenizeParams(
max_total_tokens=model_config.max_model_len,
max_output_tokens=self.max_tokens or 0,
truncate_prompt_tokens=self.truncate_prompt_tokens,
add_special_tokens=self.add_special_tokens,
needs_detokenization=bool(self.echo and not self.return_token_ids),
max_total_tokens_param="max_model_len",
max_output_tokens_param="max_tokens",
)
# Default sampling parameters for completion requests
_DEFAULT_SAMPLING_PARAMS: dict = {
"repetition_penalty": 1.0,
"temperature": 1.0,
"top_p": 1.0,
"top_k": 0,
"min_p": 0.0,
}
def to_beam_search_params(
self,
max_tokens: int,
default_sampling_params: dict | None = None,
) -> BeamSearchParams:
if default_sampling_params is None:
default_sampling_params = {}
n = self.n if self.n is not None else 1
if (temperature := self.temperature) is None:
temperature = default_sampling_params.get("temperature", 1.0)
return BeamSearchParams(
beam_width=n,
max_tokens=max_tokens,
ignore_eos=self.ignore_eos,
temperature=temperature,
length_penalty=self.length_penalty,
include_stop_str_in_output=self.include_stop_str_in_output,
)
def to_sampling_params(
self,
max_tokens: int,
default_sampling_params: dict | None = None,
) -> SamplingParams:
if default_sampling_params is None:
default_sampling_params = {}
# Default parameters
if (repetition_penalty := self.repetition_penalty) is None:
repetition_penalty = default_sampling_params.get(
"repetition_penalty",
self._DEFAULT_SAMPLING_PARAMS["repetition_penalty"],
)
if (temperature := self.temperature) is None:
temperature = default_sampling_params.get(
"temperature", self._DEFAULT_SAMPLING_PARAMS["temperature"]
)
if (top_p := self.top_p) is None:
top_p = default_sampling_params.get(
"top_p", self._DEFAULT_SAMPLING_PARAMS["top_p"]
)
if (top_k := self.top_k) is None:
top_k = default_sampling_params.get(
"top_k", self._DEFAULT_SAMPLING_PARAMS["top_k"]
)
if (min_p := self.min_p) is None:
min_p = default_sampling_params.get(
"min_p", self._DEFAULT_SAMPLING_PARAMS["min_p"]
)
prompt_logprobs = self.prompt_logprobs
if prompt_logprobs is None and self.echo:
prompt_logprobs = self.logprobs
echo_without_generation = self.echo and self.max_tokens == 0
response_format = self.response_format
if response_format is not None:
structured_outputs_kwargs = dict[str, Any]()
# Set structured output params for response format
if response_format.type == "json_object":
structured_outputs_kwargs["json_object"] = True
elif response_format.type == "json_schema":
json_schema = response_format.json_schema
assert json_schema is not None
structured_outputs_kwargs["json"] = json_schema.json_schema
elif response_format.type == "structural_tag":
structural_tag = response_format
assert isinstance(
structural_tag,
(
LegacyStructuralTagResponseFormat,
StructuralTagResponseFormat,
),
)
s_tag_obj = structural_tag.model_dump(by_alias=True)
structured_outputs_kwargs["structural_tag"] = json.dumps(s_tag_obj)
# If structured outputs wasn't already enabled,
# we must enable it for these features to work
if len(structured_outputs_kwargs) > 0:
self.structured_outputs = (
StructuredOutputsParams(**structured_outputs_kwargs)
if self.structured_outputs is None
else replace(self.structured_outputs, **structured_outputs_kwargs)
)
extra_args: dict[str, Any] = self.vllm_xargs if self.vllm_xargs else {}
if self.kv_transfer_params:
# Pass in kv_transfer_params via extra_args
extra_args["kv_transfer_params"] = self.kv_transfer_params
return SamplingParams.from_optional(
n=self.n,
presence_penalty=self.presence_penalty,
frequency_penalty=self.frequency_penalty,
repetition_penalty=repetition_penalty,
temperature=temperature,
top_p=top_p,
top_k=top_k,
min_p=min_p,
seed=self.seed,
stop=self.stop,
stop_token_ids=self.stop_token_ids,
logprobs=self.logprobs,
ignore_eos=self.ignore_eos,
max_tokens=max_tokens if not echo_without_generation else 1,
min_tokens=self.min_tokens,
prompt_logprobs=prompt_logprobs,
skip_special_tokens=self.skip_special_tokens,
spaces_between_special_tokens=self.spaces_between_special_tokens,
include_stop_str_in_output=self.include_stop_str_in_output,
output_kind=RequestOutputKind.DELTA
if self.stream
else RequestOutputKind.FINAL_ONLY,
structured_outputs=self.structured_outputs,
logit_bias=self.logit_bias,
allowed_token_ids=self.allowed_token_ids,
extra_args=extra_args or None,
skip_clone=True, # Created fresh per request, safe to skip clone
)
@model_validator(mode="before")
@classmethod
def validate_response_format(cls, data):
response_format = data.get("response_format")
if response_format is None:
return data
rf_type = (
response_format.get("type")
if isinstance(response_format, dict)
else getattr(response_format, "type", None)
)
if rf_type == "json_schema":
json_schema = (
response_format.get("json_schema")
if isinstance(response_format, dict)
else getattr(response_format, "json_schema", None)
)
if json_schema is None:
raise VLLMValidationError(
"When response_format type is 'json_schema', the "
"'json_schema' field must be provided.",
parameter="response_format",
)
return data
@model_validator(mode="before")
@classmethod
def check_structured_outputs_count(cls, data):
if data.get("structured_outputs", None) is None:
return data
structured_outputs_kwargs = data["structured_outputs"]
# structured_outputs may arrive as a dict (from JSON/raw kwargs) or
# as a StructuredOutputsParams dataclass instance.
is_dataclass = isinstance(structured_outputs_kwargs, StructuredOutputsParams)
count = sum(
(
getattr(structured_outputs_kwargs, k, None)
if is_dataclass
else structured_outputs_kwargs.get(k)
)
is not None
for k in ("json", "regex", "choice")
)
if count > 1:
raise VLLMValidationError(
"You can only use one kind of constraints for structured "
"outputs ('json', 'regex' or 'choice').",
parameter="structured_outputs",
)
return data
@model_validator(mode="before")
@classmethod
def check_logprobs(cls, data):
if (prompt_logprobs := data.get("prompt_logprobs")) is not None:
if data.get("stream") and (prompt_logprobs > 0 or prompt_logprobs == -1):
raise VLLMValidationError(
"`prompt_logprobs` are not available when `stream=True`.",
parameter="prompt_logprobs",
)
if prompt_logprobs < 0 and prompt_logprobs != -1:
raise VLLMValidationError(
"`prompt_logprobs` must be a positive value or -1.",
parameter="prompt_logprobs",
value=prompt_logprobs,
)
if (logprobs := data.get("logprobs")) is not None and logprobs < 0:
raise VLLMValidationError(
"`logprobs` must be a positive value.",
parameter="logprobs",
value=logprobs,
)
return data
@model_validator(mode="before")
@classmethod
def validate_stream_options(cls, data):
if data.get("stream_options") and not data.get("stream"):
raise VLLMValidationError(
"Stream options can only be defined when `stream=True`.",
parameter="stream_options",
)
return data
@model_validator(mode="before")
@classmethod
def validate_prompt_and_prompt_embeds(cls, data):
prompt = data.get("prompt")
prompt_embeds = data.get("prompt_embeds")
prompt_is_empty = prompt is None or (isinstance(prompt, str) and prompt == "")
embeds_is_empty = prompt_embeds is None or (
isinstance(prompt_embeds, list) and len(prompt_embeds) == 0
)
if prompt_is_empty and embeds_is_empty:
raise ValueError(
"Either prompt or prompt_embeds must be provided and non-empty."
)
return data
@model_validator(mode="before")
@classmethod
def check_cache_salt_support(cls, data):
if data.get("cache_salt") is not None and (
not isinstance(data["cache_salt"], str) or not data["cache_salt"]
):
raise ValueError(
"Parameter 'cache_salt' must be a non-empty string if provided."
)
return data
class CompletionLogProbs(OpenAIBaseModel):
text_offset: list[int] = Field(default_factory=list)
token_logprobs: list[float | None] = Field(default_factory=list)
tokens: list[str] = Field(default_factory=list)
top_logprobs: list[dict[str, float] | None] = Field(default_factory=list)
class CompletionResponseChoice(OpenAIBaseModel):
index: int
text: str
logprobs: CompletionLogProbs | None = None
finish_reason: str | None = None
stop_reason: int | str | None = Field(
default=None,
description=(
"The stop string or token id that caused the completion "
"to stop, None if the completion finished for some other reason "
"including encountering the EOS token"
),
)
token_ids: list[int] | None = None # For response
prompt_logprobs: list[dict[int, Logprob] | None] | None = None
prompt_token_ids: list[int] | None = None # For prompt
class CompletionResponse(OpenAIBaseModel):
id: str = Field(default_factory=lambda: f"cmpl-{random_uuid()}")
object: Literal["text_completion"] = "text_completion"
created: int = Field(default_factory=lambda: int(time.time()))
model: str
choices: list[CompletionResponseChoice]
service_tier: Literal["auto", "default", "flex", "scale", "priority"] | None = None
system_fingerprint: str | None = None
usage: UsageInfo
# vLLM-specific fields that are not in OpenAI spec
kv_transfer_params: dict[str, Any] | None = Field(
default=None, description="KVTransfer parameters."
)
class CompletionResponseStreamChoice(OpenAIBaseModel):
index: int
text: str
logprobs: CompletionLogProbs | None = None
finish_reason: str | None = None
stop_reason: int | str | None = Field(
default=None,
description=(
"The stop string or token id that caused the completion "
"to stop, None if the completion finished for some other reason "
"including encountering the EOS token"
),
)
# not part of the OpenAI spec but for tracing the tokens
# prompt tokens is put into choice to align with CompletionResponseChoice
prompt_token_ids: list[int] | None = None
token_ids: list[int] | None = None
class CompletionStreamResponse(OpenAIBaseModel):
id: str = Field(default_factory=lambda: f"cmpl-{random_uuid()}")
object: str = "text_completion"
created: int = Field(default_factory=lambda: int(time.time()))
model: str
choices: list[CompletionResponseStreamChoice]
usage: UsageInfo | None = Field(default=None)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/entrypoints/openai/completion/protocol.py",
"license": "Apache License 2.0",
"lines": 447,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/entrypoints/openai/models/api_router.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from fastapi import APIRouter, FastAPI, Request
from fastapi.responses import JSONResponse
from vllm.entrypoints.openai.models.serving import OpenAIServingModels
from vllm.logger import init_logger
logger = init_logger(__name__)
router = APIRouter()
def models(request: Request) -> OpenAIServingModels:
return request.app.state.openai_serving_models
@router.get("/v1/models")
async def show_available_models(raw_request: Request):
handler = models(raw_request)
models_ = await handler.show_available_models()
return JSONResponse(content=models_.model_dump())
def attach_router(app: FastAPI):
app.include_router(router)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/entrypoints/openai/models/api_router.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/entrypoints/openai/models/protocol.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from dataclasses import dataclass
@dataclass
class BaseModelPath:
name: str
model_path: str
@dataclass
class LoRAModulePath:
name: str
path: str
base_model_name: str | None = None
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/entrypoints/openai/models/protocol.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/multimodal/processing/context.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import time
from abc import abstractmethod
from collections.abc import Mapping
from contextlib import contextmanager
from dataclasses import dataclass, field
from functools import cached_property
from typing import TYPE_CHECKING, Any, overload
import torch
from typing_extensions import TypeVar
from vllm.logger import init_logger
from vllm.multimodal.inputs import MultiModalDataDict
from vllm.multimodal.parse import (
DictEmbeddingItems,
EmbeddingItems,
MultiModalDataItems,
MultiModalDataParser,
)
from vllm.renderers import TokenizeParams
from vllm.tokenizers import TokenizerLike
from vllm.transformers_utils.processor import cached_processor_from_config
from vllm.utils.func_utils import get_allowed_kwarg_only_overrides
from vllm.utils.jsontree import JSONTree, json_map_leaves
from vllm.utils.mistral import is_mistral_tokenizer
if TYPE_CHECKING:
from transformers.configuration_utils import PretrainedConfig
from transformers.feature_extraction_utils import BatchFeature
from transformers.processing_utils import ProcessorMixin
from vllm.config import ModelConfig
else:
PretrainedConfig = object
BatchFeature = object
ProcessorMixin = object
ModelConfig = object
logger = init_logger(__name__)
@dataclass
class TimingContext:
"""Helper class to record execution times during multi-modal processing."""
enabled: bool = True
"""If disabled, `TimingContext.record` becomes a no-op."""
stage_secs: dict[str, float] = field(default_factory=dict)
"""The execution time (in seconds) for each processing stage."""
@property
def total_secs(self) -> float:
return sum(self.stage_secs.values())
@contextmanager
def record(self, stage: str):
"""Record the execution time for a processing stage."""
if not self.enabled:
yield
return
start_time = time.perf_counter()
try:
yield
finally:
elapsed = time.perf_counter() - start_time
self.stage_secs.setdefault(stage, 0.0)
self.stage_secs[stage] += elapsed
def get_stats_dict(self):
stats_dict = {
f"{stage}_secs": time_s for stage, time_s in self.stage_secs.items()
}
stats_dict["preprocessor_total_secs"] = self.total_secs
return stats_dict
_T = TypeVar("_T")
_C = TypeVar("_C", bound=PretrainedConfig, default=PretrainedConfig)
_P = TypeVar("_P", bound=ProcessorMixin, default=ProcessorMixin)
@dataclass(frozen=True)
class InputProcessingContext:
"""
Contains information about the model which may be used to
modify the inputs.
"""
model_config: ModelConfig
"""The configuration of the model."""
tokenizer: TokenizerLike | None
"""The tokenizer used to tokenize the inputs."""
def get_tokenizer(self) -> TokenizerLike:
if self.tokenizer is None:
raise ValueError(
"You cannot pass text prompts when `skip_tokenizer_init=True`"
)
return self.tokenizer
@overload
def get_hf_config(self, /) -> PretrainedConfig: ...
@overload
def get_hf_config(
self,
typ: type[_C] | tuple[type[_C], ...],
/,
) -> _C: ...
def get_hf_config(
self,
typ: type[Any] | tuple[type[Any], ...] | None = None,
/,
) -> Any:
"""
Get the HuggingFace configuration
(`transformers.PretrainedConfig`) of the model,
additionally checking its type.
Raises:
TypeError: If the configuration is not of the specified type.
"""
if typ is None:
from transformers.configuration_utils import PretrainedConfig
typ = PretrainedConfig
hf_config = self.model_config.hf_config
if not isinstance(hf_config, typ):
raise TypeError(
"Invalid type of HuggingFace config. "
f"Expected type: {typ}, but "
f"found type: {type(hf_config)}"
)
return hf_config
def get_hf_image_processor_config(self) -> dict[str, Any]:
"""
Get the HuggingFace image processor configuration of the model.
"""
return self.model_config.hf_image_processor_config
def get_mm_config(self):
"""
Get the multimodal config of the model.
Raises:
RuntimeError: If the model is not a multimodal model.
"""
mm_config = self.model_config.multimodal_config
if mm_config is None:
raise RuntimeError("Not a multimodal model")
return mm_config
@overload
def get_hf_processor(self, /, **kwargs: object) -> ProcessorMixin: ...
@overload
def get_hf_processor(
self,
typ: type[_P] | tuple[type[_P], ...],
/,
**kwargs: object,
) -> _P: ...
def get_hf_processor(
self,
typ: type[Any] | tuple[type[Any], ...] | None = None,
/,
**kwargs: object,
) -> Any:
"""
Get the HuggingFace processor
(`transformers.ProcessorMixin`) of the model,
additionally checking its type.
Raises:
TypeError: If the processor is not of the specified type.
"""
if typ is None:
from transformers.processing_utils import ProcessorMixin
typ = ProcessorMixin
tokenizer = self.tokenizer
if is_mistral_tokenizer(tokenizer):
tokenizer = tokenizer.transformers_tokenizer
merged_kwargs = self.get_merged_mm_kwargs(kwargs)
merged_kwargs.pop("tokenizer", None)
return cached_processor_from_config(
self.model_config,
processor_cls=typ,
tokenizer=tokenizer,
**merged_kwargs,
)
def init_processor(
self,
typ: type[_T],
/,
**kwargs: object,
) -> _T:
"""
Initialize a HuggingFace-like processor class, merging the
keyword arguments with those in the model's configuration.
"""
merged_kwargs = self.get_merged_mm_kwargs(kwargs)
return typ(**merged_kwargs)
def _postprocess_output(
self,
output: JSONTree,
) -> JSONTree:
def _postprocess_one(x: object):
if isinstance(x, torch.Tensor): # noqa: SIM102
# This mimics the behavior of transformers.BatchFeature
if x.is_floating_point():
x = x.to(dtype=self.model_config.dtype)
return x
return json_map_leaves(_postprocess_one, output)
def get_merged_mm_kwargs(self, kwargs: Mapping[str, object]):
mm_config = self.model_config.get_multimodal_config()
return mm_config.merge_mm_processor_kwargs(kwargs)
def call_hf_processor(
self,
hf_processor: ProcessorMixin,
data: Mapping[str, object],
kwargs: Mapping[str, object] = {},
*,
num_tries: int = 1,
max_tries: int = 5,
) -> BatchFeature | JSONTree:
"""
Call `hf_processor` on the prompt `data`
(text, image, audio...) with configurable options `kwargs`.
"""
assert callable(hf_processor)
merged_kwargs = self.get_merged_mm_kwargs(kwargs)
allowed_kwargs = get_allowed_kwarg_only_overrides(
hf_processor,
merged_kwargs,
requires_kw_only=False,
allow_var_kwargs=True,
)
try:
output = hf_processor(**data, **allowed_kwargs, return_tensors="pt")
except Exception as exc:
# See https://github.com/huggingface/tokenizers/issues/537
if (
isinstance(exc, RuntimeError)
and exc
and exc.args[0] == "Already borrowed"
and num_tries < max_tries
):
logger.warning(
"Failed to acquire tokenizer in current thread. "
"Retrying (%d/%d)...",
num_tries,
max_tries,
)
time.sleep(0.5)
return self.call_hf_processor(
hf_processor,
data,
kwargs,
num_tries=num_tries + 1,
max_tries=max_tries,
)
msg = (
f"Failed to apply {type(hf_processor).__name__} "
f"on data={data} with kwargs={allowed_kwargs}"
)
raise ValueError(msg) from exc
# this emulates output.to(dtype=self.model_config.dtype)
from transformers.feature_extraction_utils import BatchFeature
if isinstance(output, BatchFeature):
output_ = self._postprocess_output(output.data)
return BatchFeature(output_)
logger.warning_once(
"%s did not return `BatchFeature`. "
"Make sure to match the behaviour of `ProcessorMixin` when "
"implementing custom processors.",
type(hf_processor).__name__,
)
return self._postprocess_output(output)
class BaseProcessingInfo:
"""Base class to provide the information necessary for data processing."""
def __init__(self, ctx: InputProcessingContext) -> None:
super().__init__()
self.ctx = ctx
@property
def model_id(self) -> str:
return self.ctx.model_config.model
def get_tokenizer(self) -> TokenizerLike:
return self.ctx.get_tokenizer()
def get_hf_config(self) -> PretrainedConfig:
return self.ctx.get_hf_config()
def get_hf_processor(self, **kwargs: object) -> ProcessorMixin:
"""
Subclasses can override this method to handle
specific kwargs from model config or user inputs.
"""
return self.ctx.get_hf_processor(**kwargs)
def get_default_tok_params(self) -> TokenizeParams:
"""Construct the default parameters for tokenization."""
model_config = self.ctx.model_config
encoder_config = model_config.encoder_config or {}
return TokenizeParams(
max_total_tokens=model_config.max_model_len,
do_lower_case=encoder_config.get("do_lower_case", False),
add_special_tokens=True,
)
@cached_property
def default_tok_params(self) -> TokenizeParams:
return self.get_default_tok_params()
def _get_expected_hidden_size(self) -> int | None:
"""
Get expected hidden size for embedding validation if `mm_embeds` are enabled.
This validates hidden dimensions to prevent a vulnerability where embeddings
with correct `ndim` but wrong `shape` could cause crashes at inference time.
"""
model_config = self.ctx.model_config
mm_config = model_config.get_multimodal_config()
if mm_config.enable_mm_embeds:
return model_config.get_inputs_embeds_size()
return None
def get_data_parser(self) -> MultiModalDataParser:
"""
Constructs a parser to preprocess multi-modal data items
before passing them to
[`_get_hf_mm_data`][vllm.multimodal.processing.BaseMultiModalProcessor._get_hf_mm_data].
You can support additional modalities by creating a subclass
of [`MultiModalDataParser`][vllm.multimodal.parse.MultiModalDataParser]
that has additional subparsers.
"""
return MultiModalDataParser(
expected_hidden_size=self._get_expected_hidden_size(),
)
@cached_property
def data_parser(self) -> MultiModalDataParser:
return self.get_data_parser()
@property
def skip_prompt_length_check(self) -> bool:
return False
@abstractmethod
def get_supported_mm_limits(self) -> Mapping[str, int | None]:
"""
Return the maximum supported number of items for each modality.
A value of `None` means unlimited number of items.
Omitting a modality from the returned dictionary means that
it is not supported at all.
"""
raise NotImplementedError
@cached_property
def supported_mm_limits(self) -> Mapping[str, int | None]:
"""The maximum supported number of items for each modality."""
return self.get_supported_mm_limits()
@cached_property
def allowed_mm_limits(self) -> Mapping[str, int]:
"""The maximum allowed number of items for each modality."""
mm_config = self.ctx.get_mm_config()
allowed_limits = dict[str, int]()
for modality, supported_limit in self.supported_mm_limits.items():
user_limit = mm_config.get_limit_per_prompt(modality)
allowed_limits[modality] = (
user_limit
if supported_limit is None
else min(user_limit, supported_limit)
)
return allowed_limits
def validate_num_items(self, modality: str, num_items: int) -> None:
"""
Raise `ValueError` if the number of input items for the given modality
is invalid.
"""
supported_limit = self.supported_mm_limits.get(modality, 0)
allowed_limit = self.allowed_mm_limits.get(modality, 0)
if supported_limit is None:
supported_limit = allowed_limit
limit = min(supported_limit, allowed_limit)
if num_items > limit:
msg = f"At most {limit} {modality}(s) may be provided in one prompt."
if num_items <= supported_limit:
msg += " Set `--limit-mm-per-prompt` to increase this limit."
raise ValueError(msg)
def parse_mm_data(
self,
mm_data: MultiModalDataDict,
*,
validate: bool = True,
) -> MultiModalDataItems:
"""
Normalize
[`MultiModalDataDict`][vllm.multimodal.inputs.MultiModalDataDict]
to [`MultiModalDataItems`][vllm.multimodal.parse.MultiModalDataItems]
before passing them to
[`_get_hf_mm_data`][vllm.multimodal.processing.BaseMultiModalProcessor._get_hf_mm_data].
"""
mm_items = self.data_parser.parse_mm_data(mm_data)
if validate:
mm_config = self.ctx.get_mm_config()
for modality, items in mm_items.items():
if isinstance(items, (EmbeddingItems, DictEmbeddingItems)):
if not mm_config.enable_mm_embeds:
raise ValueError(
f"You must set `--enable-mm-embeds` to input "
f"`{modality}_embeds`"
)
if mm_config.get_limit_per_prompt(modality) == 0:
logger.debug(
"Skipping count validation for modality "
"'%s' (embeddings with limit=0)",
modality,
)
continue
self.validate_num_items(modality, len(items))
return mm_items
def get_mm_max_tokens_per_item(
self,
seq_len: int,
mm_counts: Mapping[str, int],
) -> Mapping[str, int] | None:
"""
Return the maximum number of tokens per item of for each modality.
When `None` (the default) is returned, vLLM will generate dummy inputs
(images/videos) at maximum possible sizes and process them to determine
the maximum token count per modality.
This approach works but can be very slow for certain models (e.g.,
Qwen2.5-VL), leading to very long startup time. For better performance,
each model can override this method to return pre-computed maximum token
counts, avoiding the need for dummy input generation and processing.
Note:
The maximum number of tokens per item of each modality returned
from this function should respect the model's maximum sequence
length and the maximum number of items of each modality allowed,
and agree with dummy inputs (images/videos) at maximum possible
sizes.
"""
return None
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/multimodal/processing/context.py",
"license": "Apache License 2.0",
"lines": 408,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/model_executor/models/molmo2.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import math
from collections.abc import Iterable, Mapping, Sequence
from dataclasses import dataclass, fields
from functools import cached_property, partial
from itertools import islice
from typing import Annotated, Any
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from PIL import ImageOps
from PIL.Image import Image
from transformers import (
BatchFeature,
PretrainedConfig,
ProcessorMixin,
TensorType,
)
from transformers.image_utils import ImageInput
from transformers.tokenization_utils_base import TextInput
from transformers.video_utils import VideoInput, VideoMetadata
from vllm.compilation.decorators import support_torch_compile
from vllm.config import CacheConfig, VllmConfig
from vllm.config.multimodal import BaseDummyOptions, VideoDummyOptions
from vllm.distributed import (
get_pp_group,
get_tensor_model_parallel_rank,
get_tensor_model_parallel_world_size,
split_tensor_along_last_dim,
tensor_model_parallel_all_gather,
)
from vllm.logger import init_logger
from vllm.model_executor.layers.activation import MulAndSilu, SiluAndMul, get_act_fn
from vllm.model_executor.layers.attention import Attention, MMEncoderAttention
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.linear import (
ColumnParallelLinear,
MergedColumnParallelLinear,
QKVParallelLinear,
RowParallelLinear,
)
from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.model_executor.layers.rotary_embedding import get_rope
from vllm.model_executor.layers.vocab_parallel_embedding import (
ParallelLMHead,
VocabParallelEmbedding,
)
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
from vllm.model_executor.models.module_mapping import MultiModelKeys
from vllm.multimodal import MULTIMODAL_REGISTRY
from vllm.multimodal.inputs import (
MultiModalDataDict,
MultiModalFieldConfig,
MultiModalKwargsItems,
VideoItem,
)
from vllm.multimodal.parse import (
ImageProcessorItems,
ImageSize,
MultiModalDataItems,
MultiModalDataParser,
)
from vllm.multimodal.processing import (
BaseMultiModalProcessor,
BaseProcessingInfo,
PromptReplacement,
PromptUpdate,
PromptUpdateDetails,
)
from vllm.multimodal.processing.dummy_inputs import BaseDummyInputsBuilder
from vllm.sequence import IntermediateTensors
from vllm.utils.math_utils import round_down
from vllm.utils.tensor_schema import TensorSchema, TensorShape
from .interfaces import (
MultiModalEmbeddings,
SupportsLoRA,
SupportsMultiModal,
SupportsPP,
SupportsQuant,
)
from .utils import (
AutoWeightsLoader,
WeightsMapper,
_merge_multimodal_embeddings,
extract_layer_index,
is_pp_missing_parameter,
make_empty_intermediate_tensors_factory,
make_layers,
maybe_prefix,
)
logger = init_logger(__name__)
# Special tokens. These should be present in any tokenizer we use
# because the preprocessor relies on them.
IMAGE_PROMPT = "<|image|>"
VIDEO_PROMPT = "<|video|>"
_MAX_VIDEO_FPS = 8
class Molmo2ImageInputs(TensorSchema):
"""
Dimensions:
- nc: The total number of crops (dynamic)
- np: The total number of patches per crop
- cps: Number of channels * patch_size * patch_size
- npp: Number of pooled patches (dynamic)
- pp: pooling_size * pooling_size
- ni: Number of images
- nt: Number of image tokens (dynamic)
"""
pixel_values: Annotated[torch.Tensor, TensorShape("nc", "np", "cps")]
token_pooling: Annotated[torch.Tensor, TensorShape("npp", "pp")]
"""
An index tensor that maps image features to their corresponding
patch tokens before pooling.
"""
num_pooled_patches: Annotated[torch.Tensor, TensorShape("ni")]
image_tokens: Annotated[torch.BoolTensor, TensorShape("nt")]
num_image_tokens: Annotated[torch.Tensor, TensorShape("ni")]
class Molmo2VideoInputs(TensorSchema):
"""
Dimensions:
- nc: The total number of frames (dynamic)
- np: The total number of patches per frame
- cps: Number of channels * patch_size * patch_size
- npp: Number of pooled patches (dynamic)
- pp: pooling_size * pooling_size
- nv: Number of videos
- nt: Number of video tokens (dynamic)
"""
pixel_values_videos: Annotated[torch.Tensor, TensorShape("nc", "np", "cps")]
token_pooling: Annotated[torch.Tensor, TensorShape("npp", "pp")]
"""
An index tensor that maps image features to their corresponding
patch tokens before pooling.
"""
num_pooled_patches: Annotated[torch.Tensor, TensorShape("nv")]
video_tokens: Annotated[torch.BoolTensor, TensorShape("nt")]
num_video_tokens: Annotated[torch.Tensor, TensorShape("nv")]
@dataclass
class VitConfig:
"""Config for a vision transformer"""
hidden_size: int = 1152
intermediate_size: int = 4304
num_hidden_layers: int = 27
num_attention_heads: int = 16
num_key_value_heads: int = 16
head_dim: int = 72
hidden_act: str = "gelu_pytorch_tanh"
layer_norm_eps: float = 1e-6
image_default_input_size: tuple[int, int] = (378, 378)
image_patch_size: int = 14
image_num_pos: int = 577
def __post_init__(self):
self.image_default_input_size = tuple(self.image_default_input_size) # type: ignore[assignment]
@property
def image_num_patch(self):
h, w = self.image_default_input_size
return h // self.image_patch_size, w // self.image_patch_size
@dataclass
class AdapterConfig:
"""Config for a vit-llm adapter"""
vit_layers: tuple[int, int] = (-3, -9)
pooling_attention_mask: bool = False
hidden_size: int = 1152
num_attention_heads: int = 16
num_key_value_heads: int = 16
head_dim: int = 72
hidden_act: str = "silu"
intermediate_size: int = 18944
text_hidden_size: int = 3584
@dataclass
class TextConfig:
"""Configuration for a text model transformer"""
hidden_size: int = 3584
"""
The hidden size of the model.
"""
num_attention_heads: int = 28
"""
The number of self-attention heads.
"""
num_key_value_heads: int = 4
"""
The number of heads to use for keys and values.
"""
head_dim: int = 128
"""
The head dimensionality for the attention mechanism.
"""
vocab_size: int = 152064
"""Vocabulary size of the model."""
additional_vocab_size: int = 128
"""Number of additional tokens to have the input embeddings for"""
qkv_bias: bool = True
"""
Do QKV projection a bias
"""
num_hidden_layers: int = 48
"""
The number of layers/blocks.
"""
intermediate_size: int = 18944
"""
The hidden size for the MLP.
"""
hidden_act: str = "silu"
"""
The activation function to use within the MLP layers.
"""
max_position_embeddings: int = 4096
"""
Max positional embeddings to use in RoPE cache
"""
rope_theta: float = 1000000.0
"""
RoPE theta parameter.
"""
use_qk_norm: bool = False
"""
Apply layer norm to the keys and queries within the attention mechanism.
This can help stabilize training.
"""
qk_norm_type: str = "olmo"
"""
The type of layer norm to use for the keys and queries.
Can be "olmo" or "qwen3".
"""
layer_norm_eps: float = 1e-6
"""
epsilon for layer norms
"""
norm_after: bool = False
"""Apply layer norm before and after the attention and MLP blocks."""
rope_scaling_layers: tuple[int, ...] | None = None
"""
RoPE scaling layers.
"""
class ViTMLP(nn.Module):
"""MLP used in Vision Transformer."""
def __init__(
self,
dim: int,
hidden_dim: int,
hidden_act: str,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
self.w1 = ColumnParallelLinear(
dim,
hidden_dim,
bias=True,
quant_config=quant_config,
prefix=f"{prefix}.w1",
)
# Activation function.
self.act = get_act_fn(hidden_act)
self.w2 = RowParallelLinear(
hidden_dim,
dim,
bias=True,
quant_config=quant_config,
prefix=f"{prefix}.w2",
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x, _ = self.w1(x)
x = self.act(x)
x, _ = self.w2(x)
return x
class ViTMultiHeadDotProductAttention(nn.Module):
"""Multi-head attention used in Vision Transformer."""
def __init__(
self,
hidden_size: int,
num_heads: int,
num_key_value_heads: int,
head_dim: int,
use_bias: bool = True,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
self.hidden_size = hidden_size
self.total_num_heads = num_heads
tp_size = get_tensor_model_parallel_world_size()
assert self.hidden_size % self.total_num_heads == 0
assert self.total_num_heads % tp_size == 0
self.num_heads = self.total_num_heads // tp_size
self.head_dim = head_dim
assert self.head_dim == self.hidden_size // self.total_num_heads
self.total_num_kv_heads = num_key_value_heads
if self.total_num_kv_heads >= tp_size:
assert self.total_num_kv_heads % tp_size == 0
else:
assert tp_size % self.total_num_kv_heads == 0
self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
self.q_size = self.num_heads * self.head_dim
self.kv_size = self.num_kv_heads * self.head_dim
self.merged_qkv = QKVParallelLinear(
self.hidden_size,
self.head_dim,
self.total_num_heads,
self.total_num_kv_heads,
bias=use_bias,
quant_config=quant_config,
prefix=f"{prefix}.merged_qkv",
)
self.wo = RowParallelLinear(
self.total_num_heads * self.head_dim,
self.hidden_size,
bias=use_bias,
quant_config=quant_config,
prefix=f"{prefix}.wo",
)
self.scale = self.head_dim**-0.5
self.attn = MMEncoderAttention(
self.num_heads,
self.head_dim,
self.scale,
num_kv_heads=self.num_kv_heads,
prefix=f"{prefix}.attn",
)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
qkv, _ = self.merged_qkv(inputs)
xq, xk, xv = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
output = self.attn(xq, xk, xv)
output, _ = self.wo(output)
return output
class Molmo2VisionBlock(nn.Module):
"""Residual attention block used in Vision Transformer."""
def __init__(
self,
config: VitConfig,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
self.attention = ViTMultiHeadDotProductAttention(
hidden_size=config.hidden_size,
num_heads=config.num_attention_heads,
num_key_value_heads=config.num_key_value_heads,
head_dim=config.head_dim,
quant_config=quant_config,
prefix=f"{prefix}.attention",
)
self.feed_forward = ViTMLP(
dim=config.hidden_size,
hidden_dim=config.intermediate_size,
hidden_act=config.hidden_act,
quant_config=quant_config,
prefix=f"{prefix}.feed_forward",
)
self.attention_norm = nn.LayerNorm(
config.hidden_size,
eps=config.layer_norm_eps,
)
self.ffn_norm = nn.LayerNorm(
config.hidden_size,
eps=config.layer_norm_eps,
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x + self.attention(self.attention_norm(x))
x = x + self.feed_forward(self.ffn_norm(x))
return x
class Molmo2VisionBlockCollection(nn.Module):
"""Collection of residual attention blocks used in Vision Transformer."""
def __init__(
self,
config: VitConfig,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
self.resblocks = nn.ModuleList(
[
Molmo2VisionBlock(
config,
quant_config,
prefix=f"{prefix}.resblocks.{layer_idx}",
)
for layer_idx in range(config.num_hidden_layers)
]
)
def forward(self, x: torch.Tensor) -> list[torch.Tensor]:
hidden_states = []
for r in self.resblocks:
x = r(x)
hidden_states.append(x)
return hidden_states
class Molmo2VisionTransformer(nn.Module):
"""Vision Transformer used in Vision Backbone."""
def __init__(
self,
config: VitConfig,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
scale = config.hidden_size**-0.5
self.num_prefix_tokens: int = 0 # no class embeddings
self.patch_num = config.image_num_patch
self.positional_embedding = nn.Parameter(
torch.randn(config.image_num_pos, config.hidden_size) * scale,
)
image_patch_size = config.image_patch_size
self.patch_embedding = nn.Linear(
image_patch_size * image_patch_size * 3,
config.hidden_size,
bias=True,
)
self.transformer = Molmo2VisionBlockCollection(
config,
quant_config,
prefix=f"{prefix}.transformer",
)
def add_pos_emb(self, x: torch.Tensor, patch_num: int) -> torch.Tensor:
pos_emb = self.positional_embedding
pos_emb = pos_emb.reshape(
(
int(math.sqrt(pos_emb.shape[0])),
int(math.sqrt(pos_emb.shape[0])),
pos_emb.shape[1],
)
)
(patch_num_0, patch_num_1) = patch_num
if pos_emb.shape[0] != patch_num_0 or pos_emb.shape[1] != patch_num_1:
# from https://github.com/facebookresearch/mae/blob/main/util/pos_embed.py
pos_emb = pos_emb.unsqueeze(0).permute(0, 3, 1, 2)
pos_emb = F.interpolate(
pos_emb,
size=(patch_num_0, patch_num_1),
mode="bicubic",
align_corners=False,
antialias=True,
)
pos_emb = pos_emb.permute(0, 2, 3, 1).squeeze(0)
pos_emb = pos_emb.reshape(-1, pos_emb.shape[-1])
x = x + pos_emb[None, :, :].to(x.dtype)
return x
def forward(
self,
x: torch.Tensor,
patch_num: int | None = None,
) -> list[torch.Tensor]:
"""
: param x: (batch_size, num_patch, n_pixels)
"""
if patch_num is None:
patch_num = self.patch_num
x = self.patch_embedding(x)
x = self.add_pos_emb(x, patch_num)
hidden_states = self.transformer(x)
return hidden_states
class ImagePoolingAttention(nn.Module):
"""Multi-head attention used for image pooling"""
def __init__(
self,
input_dim: int,
hidden_size: int,
num_heads: int,
num_key_value_heads: int,
head_dim: int,
use_bias: bool = True,
use_pytorch_sdpa: bool = False,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
self.input_dim = input_dim
self.hidden_size = hidden_size
self.total_num_heads = num_heads
tp_size = get_tensor_model_parallel_world_size()
assert self.hidden_size % self.total_num_heads == 0
assert self.total_num_heads % tp_size == 0
self.num_heads = self.total_num_heads // tp_size
self.head_dim = head_dim
assert self.head_dim == self.hidden_size // self.total_num_heads
self.total_num_kv_heads = num_key_value_heads
if self.total_num_kv_heads >= tp_size:
assert self.total_num_kv_heads % tp_size == 0
else:
assert tp_size % self.total_num_kv_heads == 0
self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
self.kv_size = self.num_kv_heads * self.head_dim
self.q_proj = ColumnParallelLinear(
self.input_dim,
self.total_num_heads * self.head_dim,
bias=use_bias,
quant_config=quant_config,
prefix=f"{prefix}.q_proj",
)
self.merged_kv = MergedColumnParallelLinear(
self.input_dim,
[self.total_num_kv_heads * self.head_dim] * 2,
bias=use_bias,
quant_config=quant_config,
prefix=f"{prefix}.merged_kv",
)
self.o_proj = RowParallelLinear(
self.total_num_heads * self.head_dim,
self.hidden_size,
bias=use_bias,
quant_config=quant_config,
prefix=f"{prefix}.o_proj",
)
self.scale = self.head_dim**-0.5
self.use_pytorch_sdpa = use_pytorch_sdpa
if use_pytorch_sdpa:
self.attn = None
else:
self.attn = MMEncoderAttention(
self.num_heads,
self.head_dim,
self.scale,
num_kv_heads=self.num_kv_heads,
prefix=f"{prefix}.attn",
)
def forward_sdpa(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attn_mask: torch.Tensor | None = None,
) -> torch.Tensor:
bsz, q_len, _ = query.size()
kv_len = key.size(1)
query = query.view(bsz, q_len, self.num_heads, self.head_dim)
key = key.view(bsz, kv_len, self.num_kv_heads, self.head_dim)
value = value.view(bsz, kv_len, self.num_kv_heads, self.head_dim)
query, key, value = (x.transpose(1, 2) for x in (query, key, value))
out = F.scaled_dot_product_attention(
query,
key,
value,
attn_mask=attn_mask,
is_causal=False,
enable_gqa=self.num_heads > self.num_kv_heads,
).transpose(1, 2)
return out.reshape(bsz, q_len, -1)
def forward(
self,
inputs_q: torch.Tensor,
inputs_kv: torch.Tensor,
attn_mask: torch.Tensor | None = None,
) -> torch.Tensor:
xq, _ = self.q_proj(inputs_q)
kv, _ = self.merged_kv(inputs_kv)
xk, xv = kv.split([self.kv_size, self.kv_size], dim=-1)
if self.use_pytorch_sdpa:
output = self.forward_sdpa(xq, xk, xv, attn_mask)
else:
output = self.attn(xq, xk, xv)
output, _ = self.o_proj(output)
return output
class ImageProjectorMLP(nn.Module):
"""MLP used for the image projector"""
def __init__(
self,
input_dim: int,
hidden_dim: int,
output_dim: int,
hidden_act: str,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
self.merged_linear = MergedColumnParallelLinear(
input_dim,
[hidden_dim] * 2,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.merged_linear",
)
# Activation function.
assert hidden_act == "silu"
self.act_fn = SiluAndMul()
# Feed-forward output projection.
self.down_proj = RowParallelLinear(
hidden_dim,
output_dim,
bias=False,
quant_config=quant_config,
prefix=f"{prefix}.down_proj",
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x, _ = self.merged_linear(x)
x = self.act_fn(x)
x, _ = self.down_proj(x)
return x
class Molmo2VisionBackbone(nn.Module, SupportsQuant):
packed_modules_mapping = {
"merged_qkv": ["wq", "wk", "wv"], # vision backbone
"merged_kv": ["k_proj", "v_proj"], # image_pooling_2d
"merged_linear": ["gate_proj", "up_proj"],
}
def __init__(
self,
vit_config: VitConfig,
adapter_config: AdapterConfig,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
self.vit_config = vit_config
self.adapter_config = adapter_config
self.vit_layers = []
for layer in adapter_config.vit_layers:
if layer >= 0:
self.vit_layers.append(layer)
else:
self.vit_layers.append(layer + vit_config.num_hidden_layers)
last_layer_needed = max(self.vit_layers) + 1
if last_layer_needed < vit_config.num_hidden_layers:
vit_config.num_hidden_layers = last_layer_needed
self.image_vit = Molmo2VisionTransformer(
vit_config,
quant_config,
prefix=f"{prefix}.image_vit",
)
self.num_prefix_tokens: int = self.image_vit.num_prefix_tokens
pool_dim = vit_config.hidden_size * len(adapter_config.vit_layers)
self.image_pooling_2d = ImagePoolingAttention(
input_dim=pool_dim,
hidden_size=adapter_config.hidden_size,
num_heads=adapter_config.num_attention_heads,
num_key_value_heads=adapter_config.num_key_value_heads,
head_dim=adapter_config.head_dim,
use_pytorch_sdpa=adapter_config.pooling_attention_mask,
quant_config=quant_config,
prefix=f"{prefix}.image_pooling_2d",
)
self.image_projector = ImageProjectorMLP(
input_dim=adapter_config.hidden_size,
hidden_dim=adapter_config.intermediate_size,
output_dim=adapter_config.text_hidden_size,
hidden_act=adapter_config.hidden_act,
quant_config=quant_config,
prefix=f"{prefix}.image_projector",
)
@property
def dtype(self) -> torch.dtype:
return self.image_vit.patch_embedding.weight.dtype
@property
def device(self) -> torch.device:
return self.image_vit.patch_embedding.weight.device
def encode_image(self, images: torch.Tensor) -> torch.Tensor:
"""
: param images: (batch_size, num_crops, num_patch, n_pixels)
"""
B, T, N, D = images.shape
images = images.view(B * T, N, D)
image_features = self.image_vit(images)
features = []
for layer in self.vit_layers:
features.append(image_features[layer])
image_features = torch.cat(features, dim=-1)
if self.num_prefix_tokens > 0:
image_features = image_features[:, 1:]
image_features = image_features.view(B, T, N, -1)
return image_features
def forward(
self,
images: torch.Tensor,
token_pooling: torch.Tensor,
) -> torch.Tensor:
# image_features shape:
# (batch_size, num_crops(=num_image), num_patch, nximage_emb_dim)
batch_size, num_image = images.shape[:2]
images = images.to(device=self.device, dtype=self.dtype)
image_features = self.encode_image(images)
dim = image_features.shape[-1]
valid = token_pooling >= 0
valid_token = torch.any(valid, -1)
# Use `token_pooling` to arange the features for image pooling
batch_idx = torch.arange(
token_pooling.shape[0],
dtype=torch.long,
device=token_pooling.device,
)
batch_idx = torch.tile(
batch_idx.view(batch_size, 1, 1),
[1, token_pooling.shape[1], token_pooling.shape[2]],
)
# Now [batch, num_features, num_pooled_patches, dim]
to_pool = image_features.reshape(batch_size, -1, dim)[
batch_idx, torch.clip(token_pooling, 0)
]
to_pool = to_pool * valid.to(self.dtype)[:, :, :, None]
to_pool = to_pool.reshape([-1, token_pooling.shape[-1], dim])
if self.adapter_config.pooling_attention_mask:
attn_mask = valid.reshape([-1, 1, 1, valid.shape[-1]])
denom = valid.view(-1, to_pool.shape[-2]).float().sum(-1)
denom = torch.where(denom == 0, 1, denom)
query = to_pool.sum(-2, keepdim=True) / denom[:, None, None].to(
to_pool.dtype
)
else:
attn_mask = None
query = to_pool.mean(-2, keepdim=True)
pooled_features = self.image_pooling_2d(query, to_pool, attn_mask=attn_mask)
pooled_features = pooled_features.reshape(
[batch_size, -1, pooled_features.shape[-1]]
)
# MLP layer to map the feature.
pooled_features = self.image_projector(pooled_features)
return pooled_features.view(-1, pooled_features.shape[-1])[
valid_token.flatten()
]
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("merged_qkv", "wq", "q"),
("merged_qkv", "wk", "k"),
("merged_qkv", "wv", "v"),
("merged_kv", "k_proj", 0),
("merged_kv", "v_proj", 1),
("merged_linear", "gate_proj", 0),
("merged_linear", "up_proj", 1),
]
params_dict = dict(self.named_parameters())
loaded_params: set[str] = set()
for name, loaded_weight in weights:
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
if is_pp_missing_parameter(name, self):
continue
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
break
else:
if name.endswith(".bias") and name not in params_dict:
continue
if is_pp_missing_parameter(name, self):
continue
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
loaded_params.add(name)
return loaded_params
class Molmo2Attention(nn.Module):
"""Molmo2's LLM Attention."""
def __init__(
self,
config: TextConfig,
rope_parameters: dict[str, Any],
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
self.hidden_size = config.hidden_size
self.tp_size = get_tensor_model_parallel_world_size()
self.total_num_heads = config.num_attention_heads
assert self.hidden_size % self.total_num_heads == 0
assert self.total_num_heads % self.tp_size == 0
self.num_heads = self.total_num_heads // self.tp_size
self.total_num_kv_heads = config.num_key_value_heads
if self.total_num_kv_heads >= self.tp_size:
assert self.total_num_kv_heads % self.tp_size == 0
else:
assert self.tp_size % self.total_num_kv_heads == 0
self.num_kv_heads = max(1, self.total_num_kv_heads // self.tp_size)
self.head_dim = config.head_dim
self.q_size = self.num_heads * self.head_dim
self.kv_size = self.num_kv_heads * self.head_dim
self.max_position_embeddings = config.max_position_embeddings
self.rope_theta = config.rope_theta
# Attention input projection. Projects x -> (q, k, v)
self.qkv_proj = QKVParallelLinear(
self.hidden_size,
self.head_dim,
self.total_num_heads,
self.total_num_kv_heads,
bias=config.qkv_bias,
quant_config=quant_config,
)
self.tp_rank: int | None = None
self.k_norm: nn.Module | None = None
self.q_norm: nn.Module | None = None
self.qk_norm_type: str | None = None
if config.use_qk_norm:
k_norm_size = (
self.head_dim
if config.qk_norm_type == "qwen3"
else self.total_num_kv_heads * self.head_dim
)
self.tp_rank = get_tensor_model_parallel_rank()
self.k_norm = RMSNorm(k_norm_size, eps=config.layer_norm_eps)
q_norm_size = (
self.head_dim
if config.qk_norm_type == "qwen3"
else self.total_num_heads * self.head_dim
)
self.q_norm = RMSNorm(q_norm_size, eps=config.layer_norm_eps)
self.qk_norm_type = config.qk_norm_type
# Rotary embeddings. Rope scaling is only applied on full attention layers.
layer_idx = extract_layer_index(prefix)
if (
config.rope_scaling_layers is not None
and layer_idx not in config.rope_scaling_layers
):
rope_theta = rope_parameters["rope_theta"]
rope_parameters = {"rope_type": "default", "rope_theta": rope_theta}
self.rotary_emb = get_rope(
self.head_dim,
max_position=self.max_position_embeddings,
rope_parameters=rope_parameters,
)
self.scaling = self.head_dim**-0.5
self.attn = Attention(
self.num_heads,
self.head_dim,
self.scaling,
num_kv_heads=self.num_kv_heads,
cache_config=cache_config,
quant_config=quant_config,
prefix=f"{prefix}.attn",
)
# Attention output projection.
self.o_proj = RowParallelLinear(
self.total_num_heads * self.head_dim,
self.hidden_size,
bias=False,
quant_config=quant_config,
)
def _apply_qk_norm(
self,
q: torch.Tensor,
k: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor]:
if self.tp_size > 1:
q = tensor_model_parallel_all_gather(q.contiguous())
k = tensor_model_parallel_all_gather(k.contiguous())
q = self.q_norm(q)
k = self.k_norm(k)
if self.tp_size > 1:
splitter = partial(split_tensor_along_last_dim, num_partitions=self.tp_size)
q = splitter(q)[self.tp_rank]
k = splitter(k)[self.tp_rank]
return q, k
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
**kwargs: object,
) -> torch.Tensor:
qkv, _ = self.qkv_proj(hidden_states)
q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
if (
self.q_norm is not None
and self.k_norm is not None
and self.qk_norm_type == "olmo"
):
q, k = self._apply_qk_norm(q, k)
elif self.q_norm is not None and self.k_norm is not None:
q_by_head = q.view(
*q.shape[:-1],
q.shape[-1] // self.head_dim,
self.head_dim,
)
q_by_head = self.q_norm(q_by_head)
q = q_by_head.view(q.shape)
k_by_head = k.view(
*k.shape[:-1],
k.shape[-1] // self.head_dim,
self.head_dim,
)
k_by_head = self.k_norm(k_by_head)
k = k_by_head.view(k.shape)
q, k = self.rotary_emb(positions, q, k)
attn_output = self.attn(q, k, v)
output, _ = self.o_proj(attn_output)
return output
class LanguageModelMLP(nn.Module):
"""Molmo2's LLM mlp."""
def __init__(
self,
input_dim: int,
intermediate_size: int,
hidden_act: str,
quant_config: QuantizationConfig | None = None,
) -> None:
super().__init__()
self.up_gate_proj = MergedColumnParallelLinear(
input_dim,
[intermediate_size] * 2,
bias=False,
quant_config=quant_config,
)
# Activation function.
assert hidden_act == "silu"
self.act_fn = MulAndSilu()
# Feed-forward output projection.
self.down_proj = RowParallelLinear(
intermediate_size,
input_dim,
bias=False,
quant_config=quant_config,
)
def forward(
self,
x: torch.Tensor,
) -> torch.Tensor:
up_gate, _ = self.up_gate_proj(x)
x = self.act_fn(up_gate)
x, _ = self.down_proj(x)
return x
class Molmo2DecoderLayer(nn.Module):
def __init__(
self,
config: TextConfig,
rope_parameters: dict[str, Any],
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
# Attention block.
self.self_attn = Molmo2Attention(
config,
rope_parameters,
cache_config,
quant_config,
prefix=f"{prefix}.self_attn",
)
# MLP block.
self.mlp = LanguageModelMLP(
config.hidden_size,
config.intermediate_size,
config.hidden_act,
quant_config,
)
# LayerNorm
self.input_layernorm = RMSNorm(config.hidden_size, eps=config.layer_norm_eps)
self.post_attention_layernorm = RMSNorm(
config.hidden_size,
eps=config.layer_norm_eps,
)
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
residual: torch.Tensor | None,
**kwargs: object,
) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor] | None]:
# Self Attention
if residual is None:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
else:
hidden_states, residual = self.input_layernorm(hidden_states, residual)
hidden_states = self.self_attn(
positions=positions,
hidden_states=hidden_states,
**kwargs,
)
hidden_states, residual = self.post_attention_layernorm(hidden_states, residual)
hidden_states = self.mlp(hidden_states)
return hidden_states, residual
class Molmo2DecoderNormAfterLayer(Molmo2DecoderLayer):
def forward(
self,
positions: torch.Tensor,
hidden_states: torch.Tensor,
residual: torch.Tensor | None,
**kwargs: object,
) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor] | None]:
# Self Attention
residual = hidden_states
hidden_states = self.self_attn(
positions=positions,
hidden_states=hidden_states,
**kwargs,
)
hidden_states = self.input_layernorm(hidden_states)
hidden_states = hidden_states + residual
residual = hidden_states
hidden_states = self.mlp(hidden_states)
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = hidden_states + residual
residual = None
return hidden_states, residual
@support_torch_compile
class Molmo2TextModel(nn.Module, SupportsQuant):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
cache_config = vllm_config.cache_config
quant_config = vllm_config.quant_config
self.config = config
if hasattr(config, "text_config"):
hf_text_config = config.text_config
else:
hf_text_config = config.llm_config
kwargs = {}
for field in fields(TextConfig):
kwargs[field.name] = getattr(hf_text_config, field.name)
text_config = TextConfig(**kwargs)
self.embedding_size = text_config.vocab_size
self.embedding_size += text_config.additional_vocab_size or 0
self.embed_tokens = VocabParallelEmbedding(
self.embedding_size,
text_config.hidden_size,
quant_config=quant_config,
)
decoder_layer = (
Molmo2DecoderNormAfterLayer
if text_config.norm_after
else Molmo2DecoderLayer
)
self.start_layer, self.end_layer, self.layers = make_layers(
text_config.num_hidden_layers,
lambda prefix: decoder_layer(
text_config,
hf_text_config.rope_parameters,
cache_config=cache_config,
quant_config=quant_config,
prefix=prefix,
),
prefix=f"{prefix}.layers",
)
self.norm = RMSNorm(text_config.hidden_size, eps=text_config.layer_norm_eps)
self.make_empty_intermediate_tensors = make_empty_intermediate_tensors_factory(
["hidden_states", "residual"],
text_config.hidden_size,
)
def embed_input_ids(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.embed_tokens(input_ids)
def forward(
self,
input_ids: torch.Tensor | None,
positions: torch.Tensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
**kwargs: object,
) -> torch.Tensor:
if get_pp_group().is_first_rank:
if inputs_embeds is not None:
hidden_states = inputs_embeds
else:
hidden_states = self.embed_tokens(input_ids)
residual = None
else:
assert intermediate_tensors is not None
hidden_states = intermediate_tensors["hidden_states"]
residual = intermediate_tensors["residual"]
# Apply blocks one-by-one.
for layer in islice(self.layers, self.start_layer, self.end_layer):
hidden_states, residual = layer(
positions,
hidden_states,
residual,
**kwargs,
)
if not get_pp_group().is_last_rank:
return IntermediateTensors(
{"hidden_states": hidden_states, "residual": residual}
)
if residual is not None:
hidden_states, _ = self.norm(hidden_states, residual)
else:
hidden_states = self.norm(hidden_states)
return hidden_states
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
params_dict = dict(self.named_parameters())
loaded_params: set[str] = set()
for name, loaded_weight in weights:
if name.endswith(".bias") and name not in params_dict:
continue
if is_pp_missing_parameter(name, self):
continue
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
loaded_params.add(name)
return loaded_params
def get_patches_grid_size(
*,
image_h: int,
image_w: int,
patch_size: int,
pool_h: int,
pool_w: int,
) -> tuple[int, int]:
patch_h = image_h // patch_size
patch_w = image_w // patch_size
h_pad = round_down(patch_h + pool_h - 1, pool_h) - patch_h
w_pad = round_down(patch_w + pool_w - 1, pool_w) - patch_w
nrows = (patch_h + h_pad) // pool_h
ncols = (patch_w + w_pad) // pool_w
return nrows, ncols
def get_candidate_tilings(max_num: int) -> list[tuple[int, int]]:
tilings = [
(i, j)
for i in range(1, max_num + 1)
for j in range(1, max_num + 1)
if i * j <= max_num
]
return sorted(tilings, key=lambda x: (x[0] * x[1], x[0]))
def select_tiling(
*,
height: int,
width: int,
patch_size: int,
max_num_patches: int,
):
tilings = get_candidate_tilings(max_num_patches)
candidate_tilings = np.array(tilings, dtype=np.int32)
candidate_resolutions = candidate_tilings * patch_size
original_size = np.array([height, width], dtype=np.float32)
required_scale_d = candidate_resolutions.astype(np.float32) / original_size
required_scale = required_scale_d.min(axis=-1, keepdims=True)
if (required_scale < 1).all():
ix = required_scale.argmax()
else:
ix = np.where(required_scale < 1.0, 10e9, required_scale).argmin()
return candidate_tilings[ix]
def get_image_size(image: ImageInput) -> ImageSize:
if isinstance(image, Image):
return ImageSize(*image.size)
elif isinstance(image, (np.ndarray, torch.Tensor)):
assert image.ndim == 3
h, w, c = image.shape
assert c in [1, 3]
return ImageSize(w, h)
else:
raise ValueError(f"Unknown image type: {type(image)}")
def exif_tranpose(
images: ImageInput | None,
) -> ImageInput | None:
if images is None:
return None
if images is not None and isinstance(images, (list, tuple)):
images = [
exif_tranpose(img) if isinstance(img, Image) else img for img in images
]
elif images is not None and isinstance(images, Image):
images = ImageOps.exif_transpose(images)
return images
def build_flat_image_bool_length(
image_grids: torch.LongTensor,
image_patch_id: int,
low_res_image_start_id: int,
image_start_id: int,
image_col_id: int,
image_end_id: int,
) -> tuple[torch.LongTensor, torch.LongTensor]:
device = image_grids.device
B = image_grids.shape[0]
resized_h = image_grids[:, 0]
resized_w = image_grids[:, 1]
h = image_grids[:, 2]
w = image_grids[:, 3]
lengths = resized_h * resized_w + h * (w + 1) + 4 # [B]
total_len = int(lengths.sum().item())
flat = torch.empty(total_len, dtype=torch.long, device=device)
offset = 0
for i in range(B):
resized_h_i, resized_w_i, h_i, w_i = image_grids[i].tolist()
L_i = int(lengths[i].item())
num_low_res_patches = resized_h_i * resized_w_i
idx = offset
flat[idx] = low_res_image_start_id
idx += 1
if num_low_res_patches > 0:
flat[idx : idx + num_low_res_patches] = image_patch_id
idx += num_low_res_patches
flat[idx] = image_end_id
idx += 1
flat[idx] = image_start_id
idx += 1
block_len = w_i + 1
if block_len > 0 and h_i > 0:
line = torch.empty(block_len, dtype=torch.long, device=device)
if w_i > 0:
line[:w_i] = image_patch_id
line[w_i] = image_col_id
block = line.repeat(h_i)
flat[idx : idx + h_i * block_len] = block
idx += h_i * block_len
flat[idx] = image_end_id
idx += 1
assert idx - offset == L_i
offset += L_i
return flat, lengths
def build_flat_video_bool_length(
video_grids: torch.LongTensor,
image_patch_id: int,
frame_start_id: int,
frame_end_id: int,
) -> tuple[torch.LongTensor, torch.LongTensor]:
device = video_grids.device
B = video_grids.shape[0]
t = video_grids[:, 0]
resized_h = video_grids[:, 1]
resized_w = video_grids[:, 2]
P = resized_h * resized_w
block_len = P + 2
lengths = t * block_len
total_len = int(lengths.sum().item())
flat = torch.empty(total_len, dtype=torch.long, device=device)
offset = 0
for i in range(B):
ti = int(t[i].item())
Pi = int(P[i].item())
Li = int(lengths[i].item())
block = torch.empty(Pi + 2, dtype=torch.long, device=device)
block[0] = frame_start_id
if Pi > 0:
block[1 : 1 + Pi] = image_patch_id
block[-1] = frame_end_id
seq = block.repeat(ti)
flat[offset : offset + Li] = seq
offset += Li
return flat, lengths
class Molmo2ProcessorWrapper:
"""
Wraps :class:`Molmo2Processor` so that it can be called directly.
"""
def __init__(self, processor: ProcessorMixin, hf_config: PretrainedConfig):
super().__init__()
self.processor = processor
self.hf_config = hf_config
@cached_property
def vocab(self) -> dict[str, int]:
return self.processor.tokenizer.vocab # type: ignore
@cached_property
def max_crops(self) -> int:
image_processor = self.processor.image_processor # type: ignore
max_crops = image_processor.max_crops
assert isinstance(max_crops, int)
return max_crops
@cached_property
def image_pooling_h(self) -> int:
image_processor = self.processor.image_processor # type: ignore
image_pooling_h = image_processor.pooling_size[0]
assert isinstance(image_pooling_h, int)
return image_pooling_h
@cached_property
def image_pooling_w(self) -> int:
image_processor = self.processor.image_processor # type: ignore
image_pooling_w = image_processor.pooling_size[1]
assert isinstance(image_pooling_w, int)
return image_pooling_w
@cached_property
def video_pooling_h(self) -> int:
video_processor = self.processor.video_processor # type: ignore
video_pooling_h = video_processor.pooling_size[0]
assert isinstance(video_pooling_h, int)
return video_pooling_h
@cached_property
def video_pooling_w(self) -> int:
video_processor = self.processor.video_processor # type: ignore
video_pooling_w = video_processor.pooling_size[1]
assert isinstance(video_pooling_w, int)
return video_pooling_w
@cached_property
def base_image_input_size(self) -> tuple[int, int]:
if getattr(self.processor, "image_processor", None) is not None:
processor = self.processor.image_processor # type: ignore
else:
processor = self.processor.video_processor # type: ignore
base_image_input_size = (processor.size["height"], processor.size["width"])
return base_image_input_size
@cached_property
def image_patch_size(self) -> int:
if getattr(self.processor, "image_processor", None) is not None:
processor = self.processor.image_processor # type: ignore
else:
processor = self.processor.video_processor # type: ignore
image_patch_size = processor.patch_size
assert isinstance(image_patch_size, int)
return image_patch_size
@cached_property
def overlap_margins(self) -> tuple[int, int]:
image_processor = self.processor.image_processor # type: ignore
left_margin, right_margin = image_processor.overlap_margins
assert isinstance(left_margin, int)
assert isinstance(right_margin, int)
return left_margin, right_margin
@cached_property
def bos_token(self) -> str:
return self.processor.tokenizer.bos_token or self.processor.tokenizer.eos_token
@cached_property
def image_patch_id(self) -> int:
return self.hf_config.image_patch_id
@cached_property
def im_col_id(self) -> int:
return self.hf_config.image_col_id
@cached_property
def im_start_id(self) -> int:
return self.hf_config.image_start_token_id
@cached_property
def im_end_id(self) -> int:
return self.hf_config.image_end_token_id
@cached_property
def low_res_im_start_id(self) -> int:
return self.hf_config.low_res_image_start_token_id
@cached_property
def frame_start_id(self) -> int:
return self.hf_config.frame_start_token_id
@cached_property
def frame_end_id(self) -> int:
return self.hf_config.frame_end_token_id
@cached_property
def im_low_res_id(self) -> int:
return self.hf_config.image_low_res_id
@cached_property
def image_placeholder_id(self) -> int:
return self.vocab[IMAGE_PROMPT]
@cached_property
def video_placeholder_id(self) -> int:
return self.vocab[VIDEO_PROMPT]
@cached_property
def image_token_ids(self) -> list[int]:
return [
self.image_patch_id,
self.im_col_id,
self.im_start_id,
self.low_res_im_start_id,
self.frame_start_id,
self.im_end_id,
self.frame_end_id,
self.im_low_res_id,
]
def select_tiling(
self,
*,
image_height: int,
image_width: int,
) -> tuple[int, int]:
max_crops = self.max_crops
left_margin, right_margin = self.overlap_margins
base_image_input_size = self.base_image_input_size
base_image_input_d = self.image_patch_size
total_margin_pixels = base_image_input_d * (right_margin + left_margin)
crop_patches = base_image_input_size[0] // base_image_input_d
crop_window_patches = crop_patches - (right_margin + left_margin)
crop_window_size = crop_window_patches * base_image_input_d
tiling_h, tiling_w = select_tiling(
height=image_height - total_margin_pixels,
width=image_width - total_margin_pixels,
patch_size=crop_window_size,
max_num_patches=max_crops,
)
return tiling_h, tiling_w
def get_base_grid_size(self, is_video: bool) -> tuple[int, int]:
base_image_input_size = self.base_image_input_size
return get_patches_grid_size(
image_h=base_image_input_size[0],
image_w=base_image_input_size[1],
patch_size=self.image_patch_size,
pool_h=self.video_pooling_h if is_video else self.image_pooling_h,
pool_w=self.video_pooling_w if is_video else self.image_pooling_w,
)
def get_patches_grid_size(
self,
*,
image_height: int,
image_width: int,
) -> tuple[int, int]:
left_margin, right_margin = self.overlap_margins
base_image_input_size = self.base_image_input_size
base_image_input_d = self.image_patch_size
total_margin_pixels = base_image_input_d * (right_margin + left_margin)
crop_patches = base_image_input_size[0] // base_image_input_d
crop_window_patches = crop_patches - (right_margin + left_margin)
crop_window_size = crop_window_patches * base_image_input_d
tiling_h, tiling_w = self.select_tiling(
image_height=image_height,
image_width=image_width,
)
h, w = [
tiling_h * crop_window_size + total_margin_pixels,
tiling_w * crop_window_size + total_margin_pixels,
]
nrows, ncols = get_patches_grid_size(
image_h=h,
image_w=w,
patch_size=base_image_input_d,
pool_h=self.image_pooling_h,
pool_w=self.image_pooling_w,
)
return nrows, ncols
def __call__(
self,
text: TextInput | list[TextInput] | None = None,
images: ImageInput | None = None,
videos: VideoInput | None = None,
return_tensors: str | TensorType = None,
**kwargs: object,
) -> BatchFeature:
inputs = [text]
images = exif_tranpose(images)
if getattr(self.processor, "image_processor", None) is not None:
inputs.append(images)
if getattr(self.processor, "video_processor", None) is not None:
inputs.append(videos)
outputs = self.processor( # type: ignore
*inputs,
return_tensors=return_tensors,
**kwargs,
)
# revert insert bos token
if outputs["input_ids"][0, 0] == self.vocab[self.bos_token]:
outputs["input_ids"] = outputs["input_ids"][:, 1:]
if images is None:
images = []
if not isinstance(images, list):
images = [images]
if videos is None:
videos = []
if not isinstance(videos, list):
videos = [videos]
assert len(videos) in {0, 1}, "At most one video is supported for Molmo2"
_attention_mask: torch.Tensor = outputs.pop("attention_mask")
_token_type_ids: torch.Tensor = outputs.pop("token_type_ids", None)
if len(images) > 0:
# For each image: tiling_h * tiling_w + global view
num_crops = []
for image in images:
image_size = get_image_size(image)
tiling = self.select_tiling(
image_height=image_size.height,
image_width=image_size.width,
)
num_crops.append(np.prod(tiling) + 1)
assert sum(num_crops) == len(outputs["pixel_values"])
assert sum(num_crops) == outputs["image_num_crops"].sum().item()
image_grids: torch.Tensor = outputs.pop("image_grids")
image_num_pooled_patches: torch.Tensor = image_grids[:, :2].prod(
dim=1
) + image_grids[:, 2:].prod(dim=1)
outputs["image_num_pooled_patches"] = image_num_pooled_patches
n_patches = outputs["pixel_values"].shape[1]
outputs["image_num_patches"] = outputs["image_num_crops"] * n_patches
image_tokens, num_image_tokens = build_flat_image_bool_length(
image_grids,
self.image_patch_id,
self.low_res_im_start_id,
self.im_start_id,
self.im_col_id,
self.im_end_id,
)
outputs["image_tokens"] = image_tokens
outputs["num_image_tokens"] = num_image_tokens
if len(videos) > 0:
video_grids: torch.Tensor = outputs.pop("video_grids")
assert video_grids[:, 0].sum() == len(outputs["pixel_values_videos"])
outputs["video_num_crops"] = video_grids[:, 0]
outputs["video_num_pooled_patches"] = video_grids.prod(dim=1)
n_patches = outputs["pixel_values_videos"].shape[1]
outputs["video_num_patches"] = outputs["video_num_crops"] * n_patches
video_tokens, num_video_tokens = build_flat_video_bool_length(
video_grids,
self.image_patch_id,
self.frame_start_id,
self.frame_end_id,
)
outputs["video_tokens"] = video_tokens
outputs["num_video_tokens"] = num_video_tokens
return BatchFeature(outputs)
def get_candidate_target_fps(
video_fps: int | float,
sampling_fps: int | float,
max_fps: int | float = _MAX_VIDEO_FPS,
) -> list[float]:
"""
Return the subset of `video_fps` factors that remain multiples
of `sampling_fps`.
Examples:
>>> get_candidate_target_fps(video_fps=6, sampling_fps=2)
[2, 6]
>>> get_candidate_target_fps(video_fps=5, sampling_fps=1)
[1, 5]
>>> get_candidate_target_fps(video_fps=2, sampling_fps=2)
[2]
>>> get_candidate_target_fps(video_fps=5, sampling_fps=2)
Traceback (most recent call last):
...
ValueError: sampling_fps=2 must divide video_fps=5 to produce
consistent frame steps.
"""
video_fps = int(video_fps)
sampling_fps = int(sampling_fps)
max_fps = int(max_fps)
if sampling_fps is None:
raise ValueError("sampling_fps must be provided")
if video_fps <= 0 or sampling_fps <= 0:
raise ValueError(
"video_fps and sampling_fps must be positive "
f"(got {video_fps}, {sampling_fps})"
)
if video_fps % sampling_fps != 0:
raise ValueError(
f"sampling_fps={sampling_fps} must divide video_fps={video_fps}."
)
candidates = []
for candidate in range(sampling_fps, video_fps + 1, sampling_fps):
if candidate > max_fps:
break
if video_fps % candidate == 0:
candidates.append(float(candidate))
return candidates
def get_target_fps(
video_fps: float,
max_frames: int,
total_frames: int,
frame_sample_mode: str,
candidate_target_fps: list[float],
) -> float | None:
"""
Get the target fps that best spans the video and has the most frames sampled
"""
num_frames_sampled = 0
selected_target_fps = None
for target_fps in candidate_target_fps:
step_size = max(int(video_fps / target_fps), 1)
num_frames_sampled_at_fps = int(total_frames / step_size)
if num_frames_sampled == 0:
if (
"uniform" in frame_sample_mode
and num_frames_sampled_at_fps > max_frames
):
break
selected_target_fps = target_fps
num_frames_sampled = num_frames_sampled_at_fps
else:
# the candidate sampling fps increases so frame count can't decrease
assert num_frames_sampled <= num_frames_sampled_at_fps
if num_frames_sampled_at_fps > max_frames:
# choose the sampling fps that spans the video
continue
elif num_frames_sampled_at_fps > num_frames_sampled:
# both are less than max_frames; choose the one with higher
# density of frames sampled
selected_target_fps = target_fps
num_frames_sampled = num_frames_sampled_at_fps
return selected_target_fps
def get_frame_times_and_chosen_fps(
selected_target_fps, total_frames, max_frames, video_fps
):
if selected_target_fps is None:
frame_indices = np.linspace(
0, total_frames, max_frames, endpoint=False, dtype=int
)
else:
step_size = max(int(video_fps / selected_target_fps), 1)
frame_indices = np.arange(0, total_frames, step_size)
if len(frame_indices) > max_frames:
frame_indices = frame_indices[:max_frames]
return selected_target_fps, frame_indices
class Molmo2ProcessingInfo(BaseProcessingInfo):
def get_data_parser(self):
return MultiModalDataParser(
video_needs_metadata=True,
expected_hidden_size=self._get_expected_hidden_size(),
)
def get_hf_processor(self, **kwargs: object) -> Molmo2ProcessorWrapper:
processor = self.ctx.get_hf_processor(**kwargs)
hf_config = self.ctx.get_hf_config()
return Molmo2ProcessorWrapper(processor, hf_config)
def get_supported_mm_limits(self) -> Mapping[str, int | None]:
return {"image": None, "video": 1}
def get_num_image_tokens(
self,
*,
image_height: int,
image_width: int,
processor: Molmo2ProcessorWrapper,
) -> int:
hf_processor = processor.processor
resize_nrows, resize_cols = processor.get_base_grid_size(is_video=False)
# start/end tokens + image patch token + col tokens
if hf_processor.use_single_crop_col_tokens is not None:
use_col_tokens = hf_processor.use_single_crop_col_tokens
else:
use_col_tokens = hf_processor.image_use_col_tokens
extra = 2 + resize_nrows * (resize_cols + int(use_col_tokens))
overlap_nrows, overlap_ncols = processor.get_patches_grid_size(
image_height=image_height,
image_width=image_width,
)
joint = 2 + overlap_nrows * (
overlap_ncols + int(hf_processor.image_use_col_tokens)
)
return extra + joint
def get_num_video_tokens(
self,
*,
num_frames: int,
processor: Molmo2ProcessorWrapper,
) -> int:
resize_nrows, resize_cols = processor.get_base_grid_size(is_video=True)
# start/end tokens
extra = 2 + resize_nrows * (
resize_cols + int(processor.processor.video_use_col_tokens)
)
return num_frames * extra
def get_image_size_with_most_features(self) -> ImageSize:
processor = self.get_hf_processor()
left_margin, right_margin = processor.overlap_margins
base_image_input_size = processor.base_image_input_size
base_image_input_d = processor.image_patch_size
total_margin_pixels = base_image_input_d * (right_margin + left_margin)
crop_patches = base_image_input_size[0] // base_image_input_d
crop_window_patches = crop_patches - (right_margin + left_margin)
crop_window_size = crop_window_patches * base_image_input_d
tilings = get_candidate_tilings(processor.max_crops)
largest_feature_size, largest_feature_pinpoint = 0, None
for hr, wr in tilings:
height = hr * crop_window_size + total_margin_pixels
width = wr * crop_window_size + total_margin_pixels
feat_size = self.get_num_image_tokens(
image_height=height,
image_width=width,
processor=processor,
)
if feat_size > largest_feature_size:
largest_feature_size = feat_size
largest_feature_pinpoint = ImageSize(width=width, height=height)
if largest_feature_size == 0 or largest_feature_pinpoint is None:
raise ValueError("Cannot have a largest feature size of 0!")
return largest_feature_pinpoint
def _get_max_video_frames(
self,
max_tokens: int,
processor: Molmo2ProcessorWrapper,
) -> int:
num_tokens_per_frame = self.get_num_video_tokens(
num_frames=1,
processor=processor,
)
max_frames = max_tokens // num_tokens_per_frame
return max(max_frames, 1)
def get_num_frames_with_most_features(
self,
seq_len: int,
mm_counts: Mapping[str, int],
) -> int:
processor = self.get_hf_processor()
video_processor = processor.processor.video_processor
num_frames = video_processor.num_frames
max_videos = mm_counts.get("video", 0)
max_total_frames = self._get_max_video_frames(seq_len, processor)
max_frames_per_video = min(
max_total_frames // max(max_videos, 1),
num_frames,
)
return max(max_frames_per_video, 1)
def _sample_frames(
self,
total_num_frames: int,
video_fps: float,
duration: float,
frame_sample_mode: str,
num_frames: int,
max_fps: int,
sampling_fps: int,
) -> np.ndarray:
if frame_sample_mode == "uniform_last_frame" and max_fps is not None:
if total_num_frames <= 2:
indices = np.arange(total_num_frames).astype(int)
elif duration > (num_frames - 1) / max_fps: # -1 to include the last frame
# uniform fallback
indices = np.linspace(
0,
total_num_frames - 1,
num=min(num_frames, total_num_frames),
endpoint=True,
).astype(int)
else:
float_indices = np.arange(
0.0,
stop=total_num_frames - 1,
step=float(video_fps / max_fps),
)
if np.round(float_indices[-1]) != total_num_frames - 1:
float_indices = np.concatenate(
[float_indices, [total_num_frames - 1]], axis=0
)
indices = np.round(float_indices).astype(int)
assert indices[-1] < total_num_frames
assert len(float_indices) <= num_frames
elif frame_sample_mode == "uniform_last_frame":
indices = np.linspace(
0,
total_num_frames - 1,
num=min(num_frames, total_num_frames),
endpoint=True,
).astype(int)
elif frame_sample_mode == "fps":
candidate_target_fps = get_candidate_target_fps(video_fps, sampling_fps)
selected_target_fps = get_target_fps(
video_fps,
num_frames,
total_num_frames,
frame_sample_mode,
candidate_target_fps,
)
_, indices = get_frame_times_and_chosen_fps(
selected_target_fps,
total_num_frames,
num_frames,
video_fps,
)
else:
raise NotImplementedError(frame_sample_mode)
return indices
def _get_video_second_idx(
self,
metadata: dict[str, Any],
do_sample_frames: bool | None = None,
) -> list[float]:
video_processor = self.get_hf_processor().processor.video_processor
# metadata["fps"] refers to the true fps of the input video.
video_fps = metadata["fps"]
frames_indices = metadata.get("frames_indices")
if do_sample_frames is None:
do_sample_frames = metadata.get("do_sample_frames", False)
if do_sample_frames:
# Frame-based sampling is applied in HF video processor
total_num_frames = metadata["total_num_frames"]
duration = total_num_frames / video_fps
frame_sample_mode = video_processor.frame_sample_mode
num_frames = video_processor.num_frames
max_fps = video_processor.max_fps
sampling_fps = video_processor.sampling_fps
frames_indices = self._sample_frames(
total_num_frames,
video_fps,
duration,
frame_sample_mode,
num_frames,
max_fps,
sampling_fps,
)
else:
# Time-based sampling is done in vllm molmo2 video loader or molmo_utils
assert frames_indices is not None
timestamps = [frame_idx / video_fps for frame_idx in frames_indices]
return timestamps
class Molmo2DummyInputsBuilder(BaseDummyInputsBuilder[Molmo2ProcessingInfo]):
def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str:
num_images = mm_counts.get("image", 0)
num_videos = mm_counts.get("video", 0)
image_placeholder_token = IMAGE_PROMPT
video_placeholder_token = VIDEO_PROMPT
if num_images == 1:
image_string = image_placeholder_token
else:
image_string = "".join(
[f"Image {i + 1}" + image_placeholder_token for i in range(num_images)]
)
return image_string + video_placeholder_token * num_videos
def get_dummy_mm_data(
self,
seq_len: int,
mm_counts: Mapping[str, int],
mm_options: Mapping[str, BaseDummyOptions],
) -> MultiModalDataDict:
num_images = mm_counts.get("image", 0)
num_videos = mm_counts.get("video", 0)
dummy_images = []
dummy_videos = []
if num_images > 0:
target_width, target_height = self.info.get_image_size_with_most_features()
image_overrides = mm_options.get("image")
dummy_images = self._get_dummy_images(
width=target_width,
height=target_height,
num_images=num_images,
overrides=image_overrides,
)
if num_videos > 0:
processor = self.info.get_hf_processor()
base_image_input_size = processor.base_image_input_size
target_num_frames = self.info.get_num_frames_with_most_features(
seq_len, mm_counts
)
video_overrides = mm_options.get("video")
if video_overrides:
assert isinstance(video_overrides, VideoDummyOptions)
num_frames_override = video_overrides.num_frames
if num_frames_override:
if num_frames_override > target_num_frames:
logger.warning(
"video.num_frames override (%d) exceeds model's "
"maximum number of frames (%d), will be ignored",
num_frames_override,
target_num_frames,
)
if num_frames_override < 2:
logger.warning(
"video.num_frames override (%d) cannot be less "
"than 2, will be ignored",
num_frames_override,
)
target_num_frames = min(target_num_frames, num_frames_override)
dummy_videos = self._get_dummy_videos(
width=base_image_input_size[1],
height=base_image_input_size[0],
num_frames=target_num_frames,
num_videos=num_videos,
)
return {
"image": dummy_images,
"video": dummy_videos,
}
def _get_dummy_videos(
self,
*,
width: int,
height: int,
num_frames: int,
num_videos: int,
) -> list[VideoItem]:
video = np.full((num_frames, height, width, 3), 255, dtype=np.uint8)
video_items = []
for i in range(num_videos):
video_metadata = {
"fps": 2.0,
"duration": num_frames / 2.0,
"total_num_frames": num_frames,
"frames_indices": list(range(num_frames)),
"video_backend": "decord",
"do_sample_frames": False,
"height": height,
"width": width,
}
video_item = (video.copy(), video_metadata)
video_items.append(video_item)
return video_items
class Molmo2MultiModalProcessor(BaseMultiModalProcessor[Molmo2ProcessingInfo]):
def _apply_hf_processor_tokens_only(
self,
prompt_tokens: list[int],
) -> list[int]:
processor = self.info.get_hf_processor()
tokenizer = processor.processor.tokenizer
bos_token_id = tokenizer.bos_token_id or tokenizer.eos_token_id
if len(prompt_tokens) > 0 and prompt_tokens[0] != bos_token_id:
# Prepend the bos token to the prompt tokens
prompt_tokens = [bos_token_id] + prompt_tokens
return prompt_tokens
def _call_hf_processor(
self,
prompt: str,
mm_data: Mapping[str, object],
mm_kwargs: Mapping[str, object],
tok_kwargs: Mapping[str, object],
) -> BatchFeature:
mm_data = dict(mm_data)
processor = self.info.get_hf_processor(**mm_kwargs)
if videos := mm_data.pop("videos", []):
pixel_values_videos_lst = []
video_token_pooling_lst = []
video_num_crops_lst = []
video_num_pooled_patches_lst = []
video_num_patches_lst = []
video_tokens_lst = []
num_video_tokens_lst = []
for item in videos:
video_array, metadata = item
# NOTE: metadata.frames_indices indicates
# the sampled frames indices of pre-sampled videos, which is
# used to calculate the timestamps. Make sure that
# do_sample_frames in mm_kwargs is false for presampled videos.
# NOTE: a copy of mm_kwargs is created to update do_sample_frames,
# otherwise mm_hash for the object will be incorrect.
video_mm_kwargs = dict(**mm_kwargs)
if "do_sample_frames" not in video_mm_kwargs:
# molmo_utils already has "do_sample_frames" in
# mm_kwargs, don't overwrite it.
video_mm_kwargs["do_sample_frames"] = metadata.get(
"do_sample_frames", False
)
metadata = VideoMetadata(
**{k: metadata[k] for k in metadata if k != "do_sample_frames"}
)
video_mm_data = dict()
video_mm_data["videos"] = [[video_array]]
video_mm_data["video_metadata"] = [[metadata]]
video_outputs = super()._call_hf_processor(
prompt=VIDEO_PROMPT,
mm_data=video_mm_data,
mm_kwargs=video_mm_kwargs,
tok_kwargs=tok_kwargs,
)
input_ids = video_outputs.pop("input_ids")
video_string = processor.processor.tokenizer.batch_decode(input_ids)[0]
prompt = prompt.replace(
VIDEO_PROMPT,
video_string,
1,
)
pixel_values_videos_lst.append(video_outputs["pixel_values_videos"])
video_token_pooling_lst.append(video_outputs["video_token_pooling"])
video_num_crops_lst.append(video_outputs["video_num_crops"])
video_num_pooled_patches_lst.append(
video_outputs["video_num_pooled_patches"]
)
video_num_patches_lst.append(video_outputs["video_num_patches"])
video_tokens_lst.append(video_outputs["video_tokens"])
num_video_tokens_lst.append(video_outputs["num_video_tokens"])
video_outputs = dict(
pixel_values_videos=torch.cat(pixel_values_videos_lst),
video_token_pooling=torch.cat(video_token_pooling_lst),
video_num_crops=torch.cat(video_num_crops_lst),
video_num_pooled_patches=torch.cat(video_num_pooled_patches_lst),
video_num_patches=torch.cat(video_num_patches_lst),
video_tokens=torch.cat(video_tokens_lst),
num_video_tokens=torch.cat(num_video_tokens_lst),
)
else:
video_outputs = dict()
processed_outputs = super()._call_hf_processor(
prompt=prompt,
mm_data=mm_data,
mm_kwargs=mm_kwargs,
tok_kwargs=tok_kwargs,
)
bos_token_id = processor.vocab[processor.bos_token]
input_ids = processed_outputs["input_ids"]
# add bos token back to prompt start
if input_ids.numel() > 0 and input_ids[0, 0] != bos_token_id:
bos_token_id_tensor = torch.tensor(
[[bos_token_id]], device=input_ids.device, dtype=input_ids.dtype
)
processed_outputs["input_ids"] = torch.concat(
[bos_token_id_tensor, input_ids], dim=1
)
combined_outputs = dict(
processed_outputs,
**video_outputs,
)
return BatchFeature(combined_outputs)
def _get_mm_fields_config(
self,
hf_inputs: BatchFeature,
hf_processor_mm_kwargs: Mapping[str, object],
) -> Mapping[str, MultiModalFieldConfig]:
image_num_crops = hf_inputs.get("image_num_crops", torch.empty(0))
image_num_pooled_patches = hf_inputs.get(
"image_num_pooled_patches", torch.empty(0)
)
video_num_crops = hf_inputs.get("video_num_crops", torch.empty(0))
video_num_pooled_patches = hf_inputs.get(
"video_num_pooled_patches", torch.empty(0)
)
num_image_tokens = hf_inputs.get("num_image_tokens", torch.empty(0))
num_video_tokens = hf_inputs.get("num_video_tokens", torch.empty(0))
return dict(
pixel_values=MultiModalFieldConfig.flat_from_sizes(
"image", image_num_crops
),
image_token_pooling=MultiModalFieldConfig.flat_from_sizes(
"image", image_num_pooled_patches
),
image_num_crops=MultiModalFieldConfig.batched("image"),
image_num_pooled_patches=MultiModalFieldConfig.batched("image"),
image_num_patches=MultiModalFieldConfig.batched("image"),
image_tokens=MultiModalFieldConfig.flat_from_sizes(
"image", num_image_tokens
),
num_image_tokens=MultiModalFieldConfig.batched("image"),
pixel_values_videos=MultiModalFieldConfig.flat_from_sizes(
"video", video_num_crops
),
video_token_pooling=MultiModalFieldConfig.flat_from_sizes(
"video", video_num_pooled_patches
),
video_num_crops=MultiModalFieldConfig.batched("video"),
video_num_pooled_patches=MultiModalFieldConfig.batched("video"),
video_num_patches=MultiModalFieldConfig.batched("video"),
video_tokens=MultiModalFieldConfig.flat_from_sizes(
"video", num_video_tokens
),
num_video_tokens=MultiModalFieldConfig.batched("video"),
)
def _get_prompt_updates(
self,
mm_items: MultiModalDataItems,
hf_processor_mm_kwargs: Mapping[str, object],
out_mm_kwargs: MultiModalKwargsItems,
) -> Sequence[PromptUpdate]:
processor = self.info.get_hf_processor(**hf_processor_mm_kwargs)
img_patch_id = processor.image_patch_id
img_col_id = processor.im_col_id
img_start_id = processor.im_start_id
img_end_id = processor.im_end_id
image_use_col_tokens = processor.processor.image_use_col_tokens
use_single_crop_col_tokens = processor.processor.use_single_crop_col_tokens
use_single_crop_start_token = processor.processor.use_single_crop_start_token
video_use_col_tokens = processor.processor.video_use_col_tokens
use_frame_special_tokens = processor.processor.use_frame_special_tokens
def get_image_replacement_molmo2(item_idx: int) -> list[int]:
images = mm_items.get_items("image", ImageProcessorItems)
image = images.get(item_idx)
image = exif_tranpose(image)
resize_nrows, resize_cols = processor.get_base_grid_size(is_video=False)
if use_single_crop_col_tokens is not None:
use_col_tokens = use_single_crop_col_tokens
else:
use_col_tokens = image_use_col_tokens
if use_single_crop_start_token:
start_id = processor.low_res_im_start_id
else:
start_id = img_start_id
extra_row = [img_patch_id] * resize_cols + [img_col_id] * int(
use_col_tokens
)
extra_joint = [start_id] + extra_row * resize_nrows + [img_end_id]
image_size = get_image_size(image)
nrows, ncols = processor.get_patches_grid_size(
image_height=image_size.height,
image_width=image_size.width,
)
joint_row = [img_patch_id] * ncols + [img_col_id] * int(
image_use_col_tokens
)
joint = [img_start_id] + joint_row * nrows + [img_end_id]
img_token_ids = extra_joint + joint
return PromptUpdateDetails.select_token_ids(
img_token_ids,
processor.image_token_ids,
)
def get_video_replacement_molmo2(item_idx: int) -> list[int]:
video, metadata = mm_items["video"][item_idx]
do_sample_frames = hf_processor_mm_kwargs.get("do_sample_frames")
timestamps = self.info._get_video_second_idx(metadata, do_sample_frames)
nrows, ncols = processor.get_base_grid_size(is_video=True)
if use_frame_special_tokens:
start_id = processor.frame_start_id
end_id = processor.frame_end_id
else:
start_id = img_start_id
end_id = img_end_id
img_token_ids = []
for frame_idx, frame_time in enumerate(timestamps):
prev_space = " " if frame_idx > 0 else ""
frame_prefix = (
prev_space + f"{frame_time:.1f} "
) # explicit whitespace before/after image tokens
img_token_ids += processor.processor.tokenizer.encode(
frame_prefix,
add_special_tokens=False,
)
joint_row = [img_patch_id] * ncols + [img_col_id] * int(
video_use_col_tokens
)
joint = [start_id] + nrows * joint_row + [end_id]
img_token_ids += joint
return PromptUpdateDetails.select_token_ids(
img_token_ids,
processor.image_token_ids,
)
return [
PromptReplacement(
modality=modality,
target=[target],
replacement=replacement_fn,
)
for modality, target, replacement_fn in zip(
["image", "video"],
[processor.image_placeholder_id, processor.video_placeholder_id],
[get_image_replacement_molmo2, get_video_replacement_molmo2],
)
]
@MULTIMODAL_REGISTRY.register_processor(
Molmo2MultiModalProcessor,
info=Molmo2ProcessingInfo,
dummy_inputs=Molmo2DummyInputsBuilder,
)
class Molmo2ForConditionalGeneration(
nn.Module, SupportsMultiModal, SupportsPP, SupportsLoRA, SupportsQuant
):
hf_to_vllm_mapper = WeightsMapper(
orig_to_new_substr={
# vision backbone mapping
"image_pooling_2d.wq": "image_pooling_2d.q_proj",
"image_pooling_2d.wk": "image_pooling_2d.k_proj",
"image_pooling_2d.wv": "image_pooling_2d.v_proj",
"image_pooling_2d.wo": "image_pooling_2d.o_proj",
"image_projector.w1": "image_projector.gate_proj",
"image_projector.w3": "image_projector.up_proj",
"image_projector.w2": "image_projector.down_proj",
# language backbone mapping
"att_proj": "qkv_proj",
"attn_out": "o_proj",
"q_norm": "q_norm",
"k_norm": "k_norm",
"ff_proj": "up_gate_proj",
"ff_out": "down_proj",
"attn_norm": "input_layernorm",
"ff_norm": "post_attention_layernorm",
},
orig_to_new_prefix={
# vision backbone mapping
"model.vision_backbone.": "vision_backbone.",
# language backbone mapping
"model.transformer.blocks.": "model.layers.",
"model.transformer.ln_f.": "model.norm.",
},
)
packed_modules_mapping = {
"qkv_proj": ["qkv_proj"],
"up_gate_proj": ["up_gate_proj"], # language model
"merged_qkv": ["wq", "wk", "wv"], # vision backbone
"merged_kv": ["k_proj", "v_proj"], # image_pooling_2d
"merged_linear": ["gate_proj", "up_proj"], # image_projector
}
@classmethod
def get_placeholder_str(cls, modality: str, i: int) -> str | None:
if modality.startswith("image"):
return IMAGE_PROMPT
if modality.startswith("video"):
return VIDEO_PROMPT
raise ValueError("Only image or video modality is supported")
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
multimodal_config = vllm_config.model_config.multimodal_config
self.config = config
self.multimodal_config = multimodal_config
kwargs = {}
for field in fields(VitConfig):
kwargs[field.name] = getattr(config.vit_config, field.name)
vit_config = VitConfig(**kwargs)
kwargs = {}
for field in fields(AdapterConfig):
kwargs[field.name] = getattr(config.adapter_config, field.name)
adapter_config = AdapterConfig(**kwargs)
with self._mark_tower_model(vllm_config, {"image", "video"}):
self.vision_backbone = Molmo2VisionBackbone(
vit_config,
adapter_config,
quant_config,
prefix=maybe_prefix(prefix, "vision_backbone"),
)
with self._mark_language_model(vllm_config):
self.model = Molmo2TextModel(
vllm_config=vllm_config,
prefix=maybe_prefix(prefix, "model"),
)
self.img_patch_id = config.image_patch_id
if hasattr(config, "text_config"):
hf_text_config = config.text_config
else:
hf_text_config = config.llm_config
self.lm_head = ParallelLMHead(
hf_text_config.vocab_size,
hf_text_config.hidden_size,
quant_config=quant_config,
)
self.logits_processor = LogitsProcessor(hf_text_config.vocab_size)
self.make_empty_intermediate_tensors = (
self.model.make_empty_intermediate_tensors
)
@property
def dtype(self):
return next(self.parameters()).dtype
def _parse_and_validate_image_input(
self,
**kwargs: object,
) -> Molmo2ImageInputs | None:
pixel_values = kwargs.pop("pixel_values", None)
if pixel_values is None:
return None
token_pooling = kwargs.pop("image_token_pooling", None)
num_pooled_patches = kwargs.pop("image_num_pooled_patches", None)
num_patches = kwargs.pop("image_num_patches", None)
image_tokens = kwargs.pop("image_tokens", None)
num_image_tokens = kwargs.pop("num_image_tokens", None)
accum_patches = [0] + num_patches.cumsum(dim=0)[:-1].tolist()
patch_offset = 0
new_token_pooling = token_pooling.clone()
for i, n in enumerate(num_pooled_patches):
cur_slice = token_pooling[patch_offset : patch_offset + n]
index_offset = int(accum_patches[i])
new_token_pooling[patch_offset : patch_offset + n] = torch.where(
cur_slice >= 0,
cur_slice + index_offset,
cur_slice,
)
patch_offset += n
return Molmo2ImageInputs(
pixel_values=pixel_values,
token_pooling=new_token_pooling,
num_pooled_patches=num_pooled_patches,
image_tokens=image_tokens,
num_image_tokens=num_image_tokens,
)
def _parse_and_validate_video_input(
self,
**kwargs: object,
) -> Molmo2VideoInputs | None:
pixel_values_videos = kwargs.pop("pixel_values_videos", None)
if pixel_values_videos is None:
return None
token_pooling = kwargs.pop("video_token_pooling", None)
num_pooled_patches = kwargs.pop("video_num_pooled_patches", None)
num_patches = kwargs.pop("video_num_patches", None)
video_tokens = kwargs.pop("video_tokens", None)
num_video_tokens = kwargs.pop("num_video_tokens", None)
accum_patches = [0] + num_patches.cumsum(dim=0)[:-1].tolist()
patch_offset = 0
new_token_pooling = token_pooling.clone()
for i, n in enumerate(num_pooled_patches):
cur_slice = token_pooling[patch_offset : patch_offset + n]
index_offset = int(accum_patches[i])
new_token_pooling[patch_offset : patch_offset + n] = torch.where(
cur_slice >= 0,
cur_slice + index_offset,
cur_slice,
)
patch_offset += n
return Molmo2VideoInputs(
pixel_values_videos=pixel_values_videos,
token_pooling=new_token_pooling,
num_pooled_patches=num_pooled_patches,
video_tokens=video_tokens,
num_video_tokens=num_video_tokens,
)
def _parse_and_validate_multimodal_inputs(self, **kwargs: object) -> dict:
modalities = {}
for input_key in kwargs:
if input_key in ("pixel_values",) and "images" not in modalities:
modalities["images"] = self._parse_and_validate_image_input(**kwargs)
if input_key in ("pixel_values_videos",) and "videos" not in modalities:
modalities["videos"] = self._parse_and_validate_video_input(**kwargs)
return modalities
def _process_image_input(
self,
image_input: Molmo2ImageInputs,
) -> tuple[torch.Tensor, ...]:
pixel_values = image_input["pixel_values"]
token_pooling = image_input["token_pooling"]
num_pooled_patches = image_input["num_pooled_patches"]
image_tokens = image_input["image_tokens"]
num_image_tokens = image_input["num_image_tokens"]
image_features_flat = self.vision_backbone(
images=pixel_values.unsqueeze(0),
token_pooling=token_pooling.unsqueeze(0),
)
assert len(image_features_flat) == num_pooled_patches.sum()
image_features_list = image_features_flat.split(
num_pooled_patches.tolist(), dim=0
)
image_tokens_list = image_tokens.split(num_image_tokens.tolist(), dim=0)
out = []
for image_features_i, image_tokens_i in zip(
image_features_list, image_tokens_list
):
out_features = self.get_language_model().embed_input_ids(image_tokens_i)
is_image_patch = image_tokens_i == self.img_patch_id
out_features[is_image_patch] = image_features_i
out.append(out_features)
return tuple(out)
def _process_video_input(
self,
video_input: Molmo2VideoInputs,
) -> tuple[torch.Tensor, ...]:
pixel_values_videos = video_input["pixel_values_videos"]
token_pooling = video_input["token_pooling"]
num_pooled_patches = video_input["num_pooled_patches"]
video_tokens = video_input["video_tokens"]
num_video_tokens = video_input["num_video_tokens"]
image_features_flat = self.vision_backbone(
images=pixel_values_videos.unsqueeze(0),
token_pooling=token_pooling.unsqueeze(0),
)
assert len(image_features_flat) == num_pooled_patches.sum()
image_features_list = image_features_flat.split(
num_pooled_patches.tolist(), dim=0
)
video_tokens_list = video_tokens.split(num_video_tokens.tolist(), dim=0)
out = []
for image_features_i, video_tokens_i in zip(
image_features_list, video_tokens_list
):
out_features = self.get_language_model().embed_input_ids(video_tokens_i)
is_image_patch = video_tokens_i == self.img_patch_id
out_features[is_image_patch] = image_features_i
out.append(out_features)
return tuple(out)
def embed_multimodal(self, **kwargs: object) -> MultiModalEmbeddings | None:
modalities = self._parse_and_validate_multimodal_inputs(**kwargs)
if not modalities:
return []
multimodal_embeddings: tuple[torch.Tensor, ...] = ()
for modality in modalities:
if modality == "images":
image_input = modalities["images"]
image_embeddings = self._process_image_input(image_input)
multimodal_embeddings += image_embeddings
if modality == "videos":
video_input = modalities["videos"]
video_embeddings = self._process_video_input(video_input)
multimodal_embeddings += video_embeddings
return multimodal_embeddings
def embed_input_ids(
self,
input_ids: torch.Tensor,
multimodal_embeddings: MultiModalEmbeddings | None = None,
*,
is_multimodal: torch.Tensor | None = None,
handle_oov_mm_token: bool = False,
) -> torch.Tensor:
inputs_embeds = self._embed_text_input_ids(
input_ids,
self.get_language_model().embed_input_ids,
is_multimodal=is_multimodal,
handle_oov_mm_token=handle_oov_mm_token,
)
if multimodal_embeddings is None or len(multimodal_embeddings) == 0:
return inputs_embeds
if is_multimodal is None:
raise ValueError(
"`embed_input_ids` now requires `is_multimodal` arg, "
"please update your model runner according to "
"https://github.com/vllm-project/vllm/pull/16229."
)
inputs_embeds = _merge_multimodal_embeddings(
inputs_embeds=inputs_embeds,
multimodal_embeddings=multimodal_embeddings,
is_multimodal=is_multimodal,
)
return inputs_embeds
def forward(
self,
input_ids: torch.LongTensor,
positions: torch.LongTensor,
intermediate_tensors: IntermediateTensors | None = None,
inputs_embeds: torch.Tensor | None = None,
**kwargs: object,
) -> torch.Tensor:
if intermediate_tensors is not None:
inputs_embeds = None
hidden_states = self.model(
input_ids,
positions,
intermediate_tensors,
inputs_embeds=inputs_embeds,
**kwargs,
)
return hidden_states
def compute_logits(self, hidden_states: torch.Tensor) -> torch.Tensor:
logits = self.logits_processor(self.lm_head, hidden_states)
return logits
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]):
loader = AutoWeightsLoader(self)
weights = _get_weights_with_merged_embedding(weights)
return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper)
def get_mm_mapping(self) -> MultiModelKeys:
"""
Get the module prefix in multimodal models
"""
return MultiModelKeys.from_string_field(
language_model="model",
connector="vision_backbone.image_projector",
tower_model="vision_backbone",
)
def _get_weights_with_merged_embedding(
weights: Iterable[tuple[str, torch.Tensor]],
) -> Iterable[tuple[str, torch.Tensor]]:
embedding_weights = {}
for name, weight in weights:
if "wte.embedding" in name:
embedding_weights["embedding"] = weight
elif "wte.new_embedding" in name:
embedding_weights["new_embedding"] = weight
else:
yield (name, weight)
# this is compatible with most of quantization,
# because they won't quantize embed_tokens
if "embedding" not in embedding_weights or "new_embedding" not in embedding_weights:
raise ValueError(
"Checkpoint is missing 'wte.embedding' or "
"'wte.new_embedding' weights required for Molmo2."
)
embedding_weights = torch.cat(
[embedding_weights["embedding"], embedding_weights["new_embedding"]],
dim=0,
)
yield ("model.embed_tokens.weight", embedding_weights)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/model_executor/models/molmo2.py",
"license": "Apache License 2.0",
"lines": 2394,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/entrypoints/openai/responses/api_router.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from collections.abc import AsyncGenerator
from http import HTTPStatus
from fastapi import APIRouter, Depends, FastAPI, Request
from fastapi.responses import JSONResponse, StreamingResponse
from vllm.entrypoints.openai.engine.protocol import ErrorResponse
from vllm.entrypoints.openai.responses.protocol import (
ResponsesRequest,
ResponsesResponse,
StreamingResponsesResponse,
)
from vllm.entrypoints.openai.responses.serving import OpenAIServingResponses
from vllm.entrypoints.openai.utils import validate_json_request
from vllm.entrypoints.utils import (
load_aware_call,
with_cancellation,
)
from vllm.logger import init_logger
logger = init_logger(__name__)
router = APIRouter()
def responses(request: Request) -> OpenAIServingResponses | None:
return request.app.state.openai_serving_responses
async def _convert_stream_to_sse_events(
generator: AsyncGenerator[StreamingResponsesResponse, None],
) -> AsyncGenerator[str, None]:
"""Convert the generator to a stream of events in SSE format"""
async for event in generator:
event_type = getattr(event, "type", "unknown")
# https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#event_stream_format
event_data = (
f"event: {event_type}\ndata: {event.model_dump_json(indent=None)}\n\n"
)
yield event_data
@router.post(
"/v1/responses",
dependencies=[Depends(validate_json_request)],
responses={
HTTPStatus.OK.value: {"content": {"text/event-stream": {}}},
HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse},
HTTPStatus.NOT_FOUND.value: {"model": ErrorResponse},
HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse},
},
)
@with_cancellation
@load_aware_call
async def create_responses(request: ResponsesRequest, raw_request: Request):
handler = responses(raw_request)
if handler is None:
base_server = raw_request.app.state.openai_serving_tokenization
return base_server.create_error_response(
message="The model does not support Responses API"
)
try:
generator = await handler.create_responses(request, raw_request)
except Exception as e:
generator = handler.create_error_response(e)
if isinstance(generator, ErrorResponse):
return JSONResponse(
content=generator.model_dump(), status_code=generator.error.code
)
elif isinstance(generator, ResponsesResponse):
return JSONResponse(content=generator.model_dump())
return StreamingResponse(
content=_convert_stream_to_sse_events(generator), media_type="text/event-stream"
)
@router.get("/v1/responses/{response_id}")
@load_aware_call
async def retrieve_responses(
response_id: str,
raw_request: Request,
starting_after: int | None = None,
stream: bool | None = False,
):
handler = responses(raw_request)
if handler is None:
base_server = raw_request.app.state.openai_serving_tokenization
return base_server.create_error_response(
message="The model does not support Responses API"
)
try:
response = await handler.retrieve_responses(
response_id,
starting_after=starting_after,
stream=stream,
)
except Exception as e:
response = handler.create_error_response(e)
if isinstance(response, ErrorResponse):
return JSONResponse(
content=response.model_dump(), status_code=response.error.code
)
elif isinstance(response, ResponsesResponse):
return JSONResponse(content=response.model_dump())
return StreamingResponse(
content=_convert_stream_to_sse_events(response), media_type="text/event-stream"
)
@router.post("/v1/responses/{response_id}/cancel")
@load_aware_call
async def cancel_responses(response_id: str, raw_request: Request):
handler = responses(raw_request)
if handler is None:
base_server = raw_request.app.state.openai_serving_tokenization
return base_server.create_error_response(
message="The model does not support Responses API"
)
try:
response = await handler.cancel_responses(response_id)
except Exception as e:
response = handler.create_error_response(e)
if isinstance(response, ErrorResponse):
return JSONResponse(
content=response.model_dump(), status_code=response.error.code
)
return JSONResponse(content=response.model_dump())
def attach_router(app: FastAPI):
app.include_router(router)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/entrypoints/openai/responses/api_router.py",
"license": "Apache License 2.0",
"lines": 117,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/entrypoints/openai/responses/protocol.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
# Adapted from
# https://github.com/lm-sys/FastChat/blob/168ccc29d3f7edc50823016105c024fe2282732a/fastchat/protocol/openai_api_protocol.py
import time
from typing import Any, Literal, TypeAlias
import torch
from openai.types.responses import (
ResponseCodeInterpreterCallCodeDeltaEvent,
ResponseCodeInterpreterCallCodeDoneEvent,
ResponseCodeInterpreterCallCompletedEvent,
ResponseCodeInterpreterCallInProgressEvent,
ResponseCodeInterpreterCallInterpretingEvent,
ResponseContentPartAddedEvent,
ResponseContentPartDoneEvent,
ResponseFunctionToolCall,
ResponseInputItemParam,
ResponseMcpCallArgumentsDeltaEvent,
ResponseMcpCallArgumentsDoneEvent,
ResponseMcpCallCompletedEvent,
ResponseMcpCallInProgressEvent,
ResponseOutputItem,
ResponseOutputItemAddedEvent,
ResponseOutputItemDoneEvent,
ResponsePrompt,
ResponseReasoningTextDeltaEvent,
ResponseReasoningTextDoneEvent,
ResponseStatus,
ResponseWebSearchCallCompletedEvent,
ResponseWebSearchCallInProgressEvent,
ResponseWebSearchCallSearchingEvent,
)
from openai.types.responses import (
ResponseCompletedEvent as OpenAIResponseCompletedEvent,
)
from openai.types.responses import ResponseCreatedEvent as OpenAIResponseCreatedEvent
from openai.types.responses import (
ResponseInProgressEvent as OpenAIResponseInProgressEvent,
)
from openai.types.responses.tool import Tool
from openai_harmony import Message as OpenAIHarmonyMessage
# Backward compatibility for OpenAI client versions
try: # For older openai versions (< 1.100.0)
from openai.types.responses import ResponseTextConfig
except ImportError: # For newer openai versions (>= 1.100.0)
from openai.types.responses import ResponseFormatTextConfig as ResponseTextConfig
from openai.types.responses.response import IncompleteDetails, ToolChoice
from openai.types.responses.response_reasoning_item import (
Content as ResponseReasoningTextContent,
)
from openai.types.shared import Metadata, Reasoning
from pydantic import (
Field,
ValidationError,
field_serializer,
model_validator,
)
from vllm.config import ModelConfig
from vllm.entrypoints.chat_utils import (
ChatCompletionMessageParam,
ChatTemplateContentFormatOption,
)
from vllm.entrypoints.openai.engine.protocol import OpenAIBaseModel
from vllm.exceptions import VLLMValidationError
from vllm.logger import init_logger
from vllm.renderers import ChatParams, TokenizeParams, merge_kwargs
from vllm.sampling_params import (
RequestOutputKind,
SamplingParams,
StructuredOutputsParams,
)
from vllm.utils import random_uuid
logger = init_logger(__name__)
_LONG_INFO = torch.iinfo(torch.long)
class InputTokensDetails(OpenAIBaseModel):
cached_tokens: int
input_tokens_per_turn: list[int] = Field(default_factory=list)
cached_tokens_per_turn: list[int] = Field(default_factory=list)
class OutputTokensDetails(OpenAIBaseModel):
reasoning_tokens: int = 0
tool_output_tokens: int = 0
output_tokens_per_turn: list[int] = Field(default_factory=list)
tool_output_tokens_per_turn: list[int] = Field(default_factory=list)
class ResponseUsage(OpenAIBaseModel):
input_tokens: int
input_tokens_details: InputTokensDetails
output_tokens: int
output_tokens_details: OutputTokensDetails
total_tokens: int
def serialize_message(msg):
"""
Serializes a single message
"""
if isinstance(msg, dict):
return msg
elif hasattr(msg, "to_dict"):
return msg.to_dict()
else:
# fallback to pyandic dump
return msg.model_dump_json()
def serialize_messages(msgs):
"""
Serializes multiple messages
"""
return [serialize_message(msg) for msg in msgs] if msgs else None
class ResponseRawMessageAndToken(OpenAIBaseModel):
"""Class to show the raw message.
If message / tokens diverge, tokens is the source of truth"""
message: str
tokens: list[int]
type: Literal["raw_message_tokens"] = "raw_message_tokens"
ResponseInputOutputMessage: TypeAlias = (
list[ChatCompletionMessageParam] | list[ResponseRawMessageAndToken]
)
ResponseInputOutputItem: TypeAlias = ResponseInputItemParam | ResponseOutputItem
class ResponsesRequest(OpenAIBaseModel):
# Ordered by official OpenAI API documentation
# https://platform.openai.com/docs/api-reference/responses/create
background: bool | None = False
include: (
list[
Literal[
"code_interpreter_call.outputs",
"computer_call_output.output.image_url",
"file_search_call.results",
"message.input_image.image_url",
"message.output_text.logprobs",
"reasoning.encrypted_content",
],
]
| None
) = None
input: str | list[ResponseInputOutputItem]
instructions: str | None = None
max_output_tokens: int | None = None
max_tool_calls: int | None = None
metadata: Metadata | None = None
model: str | None = None
logit_bias: dict[str, float] | None = None
parallel_tool_calls: bool | None = True
previous_response_id: str | None = None
prompt: ResponsePrompt | None = None
reasoning: Reasoning | None = None
service_tier: Literal["auto", "default", "flex", "scale", "priority"] = "auto"
store: bool | None = True
stream: bool | None = False
temperature: float | None = None
text: ResponseTextConfig | None = None
tool_choice: ToolChoice = "auto"
tools: list[Tool] = Field(default_factory=list)
top_logprobs: int | None = 0
top_p: float | None = None
top_k: int | None = None
truncation: Literal["auto", "disabled"] | None = "disabled"
user: str | None = None
skip_special_tokens: bool = True
include_stop_str_in_output: bool = False
prompt_cache_key: str | None = Field(
default=None,
description=(
"A key that was used to read from or write to the prompt cache."
"Note: This field has not been implemented yet "
"and vLLM will ignore it."
),
)
# --8<-- [start:responses-extra-params]
request_id: str = Field(
default_factory=lambda: f"resp_{random_uuid()}",
description=(
"The request_id related to this request. If the caller does "
"not set it, a random_uuid will be generated. This id is used "
"through out the inference process and return in response."
),
)
mm_processor_kwargs: dict[str, Any] | None = Field(
default=None,
description=("Additional kwargs to pass to the HF processor."),
)
priority: int = Field(
default=0,
description=(
"The priority of the request (lower means earlier handling; "
"default: 0). Any priority other than 0 will raise an error "
"if the served model does not use priority scheduling."
),
)
cache_salt: str | None = Field(
default=None,
description=(
"If specified, the prefix cache will be salted with the provided "
"string to prevent an attacker to guess prompts in multi-user "
"environments. The salt should be random, protected from "
"access by 3rd parties, and long enough to be "
"unpredictable (e.g., 43 characters base64-encoded, corresponding "
"to 256 bit)."
),
)
enable_response_messages: bool = Field(
default=False,
description=(
"Dictates whether or not to return messages as part of the "
"response object. Currently only supported for non-background."
),
)
# similar to input_messages / output_messages in ResponsesResponse
# we take in previous_input_messages (ie in harmony format)
# this cannot be used in conjunction with previous_response_id
# TODO: consider supporting non harmony messages as well
previous_input_messages: list[OpenAIHarmonyMessage | dict] | None = None
structured_outputs: StructuredOutputsParams | None = Field(
default=None,
description="Additional kwargs for structured outputs",
)
repetition_penalty: float | None = None
seed: int | None = Field(None, ge=_LONG_INFO.min, le=_LONG_INFO.max)
stop: str | list[str] | None = []
ignore_eos: bool = False
vllm_xargs: dict[str, str | int | float | list[str | int | float]] | None = Field(
default=None,
description=(
"Additional request parameters with (list of) string or "
"numeric values, used by custom extensions."
),
)
# --8<-- [end:responses-extra-params]
def build_chat_params(
self,
default_template: str | None,
default_template_content_format: ChatTemplateContentFormatOption,
) -> ChatParams:
from .utils import should_continue_final_message
# Check if we should continue the final message (partial completion)
# This enables Anthropic-style partial message completion where the
# user provides an incomplete assistant message to continue from.
continue_final = should_continue_final_message(self.input)
reasoning = self.reasoning
return ChatParams(
chat_template=default_template,
chat_template_content_format=default_template_content_format,
chat_template_kwargs=merge_kwargs( # To remove unset values
{},
dict(
add_generation_prompt=not continue_final,
continue_final_message=continue_final,
reasoning_effort=None if reasoning is None else reasoning.effort,
),
),
)
def build_tok_params(self, model_config: ModelConfig) -> TokenizeParams:
return TokenizeParams(
max_total_tokens=model_config.max_model_len,
max_output_tokens=self.max_output_tokens or 0,
truncate_prompt_tokens=-1 if self.truncation != "disabled" else None,
max_total_tokens_param="max_model_len",
max_output_tokens_param="max_output_tokens",
)
_DEFAULT_SAMPLING_PARAMS = {
"temperature": 1.0,
"top_p": 1.0,
"top_k": 0,
}
def to_sampling_params(
self,
default_max_tokens: int,
default_sampling_params: dict | None = None,
) -> SamplingParams:
if self.max_output_tokens is None:
max_tokens = default_max_tokens
else:
max_tokens = min(self.max_output_tokens, default_max_tokens)
default_sampling_params = default_sampling_params or {}
if (temperature := self.temperature) is None:
temperature = default_sampling_params.get(
"temperature", self._DEFAULT_SAMPLING_PARAMS["temperature"]
)
if (top_p := self.top_p) is None:
top_p = default_sampling_params.get(
"top_p", self._DEFAULT_SAMPLING_PARAMS["top_p"]
)
if (top_k := self.top_k) is None:
top_k = default_sampling_params.get(
"top_k", self._DEFAULT_SAMPLING_PARAMS["top_k"]
)
if (repetition_penalty := self.repetition_penalty) is None:
repetition_penalty = default_sampling_params.get("repetition_penalty", 1.0)
stop_token_ids = default_sampling_params.get("stop_token_ids")
# Structured output
structured_outputs = self.structured_outputs
# Also check text.format for OpenAI-style json_schema
if self.text is not None and self.text.format is not None:
if structured_outputs is not None:
raise VLLMValidationError(
"Cannot specify both structured_outputs and text.format",
parameter="structured_outputs",
)
response_format = self.text.format
if (
response_format.type == "json_schema"
and response_format.schema_ is not None
):
structured_outputs = StructuredOutputsParams(
json=response_format.schema_ # type: ignore[call-arg]
# --follow-imports skip hides the class definition but also hides
# multiple third party conflicts, so best of both evils
)
stop = self.stop if self.stop else []
if isinstance(stop, str):
stop = [stop]
return SamplingParams.from_optional(
temperature=temperature,
top_p=top_p,
top_k=top_k,
max_tokens=max_tokens,
logprobs=self.top_logprobs if self.is_include_output_logprobs() else None,
stop_token_ids=stop_token_ids,
stop=stop,
repetition_penalty=repetition_penalty,
seed=self.seed,
ignore_eos=self.ignore_eos,
output_kind=(
RequestOutputKind.DELTA if self.stream else RequestOutputKind.FINAL_ONLY
),
structured_outputs=structured_outputs,
logit_bias=self.logit_bias,
extra_args=self.vllm_xargs or {},
skip_clone=True, # Created fresh per request, safe to skip clone
skip_special_tokens=self.skip_special_tokens,
include_stop_str_in_output=self.include_stop_str_in_output,
)
def is_include_output_logprobs(self) -> bool:
"""Check if the request includes output logprobs."""
if self.include is None:
return False
return (
isinstance(self.include, list)
and "message.output_text.logprobs" in self.include
)
@model_validator(mode="before")
@classmethod
def validate_background(cls, data):
if not data.get("background"):
return data
if not data.get("store", True):
raise VLLMValidationError(
"background can only be used when `store` is true",
parameter="background",
)
return data
@model_validator(mode="before")
@classmethod
def validate_prompt(cls, data):
if data.get("prompt") is not None:
raise VLLMValidationError(
"prompt template is not supported", parameter="prompt"
)
return data
@model_validator(mode="before")
@classmethod
def check_cache_salt_support(cls, data):
if data.get("cache_salt") is not None and (
not isinstance(data["cache_salt"], str) or not data["cache_salt"]
):
raise VLLMValidationError(
"Parameter 'cache_salt' must be a non-empty string if provided.",
parameter="cache_salt",
)
return data
@model_validator(mode="before")
@classmethod
def function_call_parsing(cls, data):
"""Parse function_call dictionaries into ResponseFunctionToolCall objects.
This ensures Pydantic can properly resolve union types in the input field.
Function calls provided as dicts are converted to ResponseFunctionToolCall
objects before validation, while invalid structures are left for Pydantic
to reject with appropriate error messages.
"""
input_data = data.get("input")
# Early return for None, strings, or bytes
# (strings are iterable but shouldn't be processed)
if input_data is None or isinstance(input_data, (str, bytes)):
return data
# Convert iterators (like ValidatorIterator) to list
if not isinstance(input_data, list):
try:
input_data = list(input_data)
except TypeError:
# Not iterable, leave as-is for Pydantic to handle
return data
processed_input = []
for item in input_data:
if isinstance(item, dict) and item.get("type") == "function_call":
try:
processed_input.append(ResponseFunctionToolCall(**item))
except ValidationError:
# Let Pydantic handle validation for malformed function calls
logger.debug(
"Failed to parse function_call to ResponseFunctionToolCall, "
"leaving for Pydantic validation"
)
processed_input.append(item)
else:
processed_input.append(item)
data["input"] = processed_input
return data
class ResponsesResponse(OpenAIBaseModel):
id: str = Field(default_factory=lambda: f"resp_{random_uuid()}")
created_at: int = Field(default_factory=lambda: int(time.time()))
# error: Optional[ResponseError] = None
incomplete_details: IncompleteDetails | None = None
instructions: str | None = None
metadata: Metadata | None = None
model: str
object: Literal["response"] = "response"
output: list[ResponseOutputItem]
parallel_tool_calls: bool
temperature: float
tool_choice: ToolChoice
tools: list[Tool]
top_p: float
background: bool
max_output_tokens: int
max_tool_calls: int | None = None
previous_response_id: str | None = None
prompt: ResponsePrompt | None = None
reasoning: Reasoning | None = None
service_tier: Literal["auto", "default", "flex", "scale", "priority"]
status: ResponseStatus
text: ResponseTextConfig | None = None
top_logprobs: int | None = None
truncation: Literal["auto", "disabled"]
usage: ResponseUsage | None = None
user: str | None = None
# --8<-- [start:responses-response-extra-params]
# These are populated when enable_response_messages is set to True
# NOTE: custom serialization is needed
# see serialize_input_messages and serialize_output_messages
input_messages: ResponseInputOutputMessage | None = Field(
default=None,
description=(
"If enable_response_messages, we can show raw token input to model."
),
)
output_messages: ResponseInputOutputMessage | None = Field(
default=None,
description=(
"If enable_response_messages, we can show raw token output of model."
),
)
# --8<-- [end:responses-response-extra-params]
# NOTE: openAI harmony doesn't serialize TextContent properly,
# TODO: this fixes for TextContent, but need to verify for tools etc
# https://github.com/openai/harmony/issues/78
@field_serializer("output_messages", when_used="json")
def serialize_output_messages(self, msgs, _info):
return serialize_messages(msgs)
# NOTE: openAI harmony doesn't serialize TextContent properly, this fixes it
# https://github.com/openai/harmony/issues/78
@field_serializer("input_messages", when_used="json")
def serialize_input_messages(self, msgs, _info):
return serialize_messages(msgs)
@classmethod
def from_request(
cls,
request: ResponsesRequest,
sampling_params: SamplingParams,
model_name: str,
created_time: int,
output: list[ResponseOutputItem],
status: ResponseStatus,
usage: ResponseUsage | None = None,
input_messages: ResponseInputOutputMessage | None = None,
output_messages: ResponseInputOutputMessage | None = None,
) -> "ResponsesResponse":
incomplete_details: IncompleteDetails | None = None
if status == "incomplete":
incomplete_details = IncompleteDetails(reason="max_output_tokens")
# TODO: implement the other reason for incomplete_details,
# which is content_filter
# incomplete_details = IncompleteDetails(reason='content_filter')
return cls(
id=request.request_id,
created_at=created_time,
incomplete_details=incomplete_details,
instructions=request.instructions,
metadata=request.metadata,
model=model_name,
output=output,
input_messages=input_messages,
output_messages=output_messages,
parallel_tool_calls=request.parallel_tool_calls,
temperature=sampling_params.temperature,
tool_choice=request.tool_choice,
tools=request.tools,
top_p=sampling_params.top_p,
background=request.background,
max_output_tokens=sampling_params.max_tokens,
max_tool_calls=request.max_tool_calls,
previous_response_id=request.previous_response_id,
prompt=request.prompt,
reasoning=request.reasoning,
service_tier=request.service_tier,
status=status,
text=request.text,
top_logprobs=sampling_params.logprobs,
truncation=request.truncation,
user=request.user,
usage=usage,
)
# TODO: this code can be removed once
# https://github.com/openai/openai-python/issues/2634 has been resolved
class ResponseReasoningPartDoneEvent(OpenAIBaseModel):
content_index: int
"""The index of the content part that is done."""
item_id: str
"""The ID of the output item that the content part was added to."""
output_index: int
"""The index of the output item that the content part was added to."""
part: ResponseReasoningTextContent
"""The content part that is done."""
sequence_number: int
"""The sequence number of this event."""
type: Literal["response.reasoning_part.done"]
"""The type of the event. Always `response.reasoning_part.done`."""
# TODO: this code can be removed once
# https://github.com/openai/openai-python/issues/2634 has been resolved
class ResponseReasoningPartAddedEvent(OpenAIBaseModel):
content_index: int
"""The index of the content part that is done."""
item_id: str
"""The ID of the output item that the content part was added to."""
output_index: int
"""The index of the output item that the content part was added to."""
part: ResponseReasoningTextContent
"""The content part that is done."""
sequence_number: int
"""The sequence number of this event."""
type: Literal["response.reasoning_part.added"]
"""The type of the event. Always `response.reasoning_part.added`."""
# vLLM Streaming Events
# Note: we override the response type with the vLLM ResponsesResponse type
class ResponseCompletedEvent(OpenAIResponseCompletedEvent):
response: ResponsesResponse # type: ignore[override]
class ResponseCreatedEvent(OpenAIResponseCreatedEvent):
response: ResponsesResponse # type: ignore[override]
class ResponseInProgressEvent(OpenAIResponseInProgressEvent):
response: ResponsesResponse # type: ignore[override]
StreamingResponsesResponse: TypeAlias = (
ResponseCreatedEvent
| ResponseInProgressEvent
| ResponseCompletedEvent
| ResponseOutputItemAddedEvent
| ResponseOutputItemDoneEvent
| ResponseContentPartAddedEvent
| ResponseContentPartDoneEvent
| ResponseReasoningTextDeltaEvent
| ResponseReasoningTextDoneEvent
| ResponseReasoningPartAddedEvent
| ResponseReasoningPartDoneEvent
| ResponseCodeInterpreterCallInProgressEvent
| ResponseCodeInterpreterCallCodeDeltaEvent
| ResponseWebSearchCallInProgressEvent
| ResponseWebSearchCallSearchingEvent
| ResponseWebSearchCallCompletedEvent
| ResponseCodeInterpreterCallCodeDoneEvent
| ResponseCodeInterpreterCallInterpretingEvent
| ResponseCodeInterpreterCallCompletedEvent
| ResponseMcpCallArgumentsDeltaEvent
| ResponseMcpCallArgumentsDoneEvent
| ResponseMcpCallInProgressEvent
| ResponseMcpCallCompletedEvent
)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/entrypoints/openai/responses/protocol.py",
"license": "Apache License 2.0",
"lines": 571,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/v1/worker/gpu/sample/states.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import numpy as np
import torch
from vllm.sampling_params import SamplingParams
from vllm.v1.sample.ops.topk_topp_sampler import apply_top_k_top_p
from vllm.v1.worker.gpu.buffer_utils import UvaBackedTensor
from vllm.v1.worker.gpu.sample.gumbel import apply_temperature
from vllm.v1.worker.gpu.sample.min_p import apply_min_p
NO_LOGPROBS = -1
_NP_INT64_MIN = np.iinfo(np.int64).min
_NP_INT64_MAX = np.iinfo(np.int64).max
class SamplingStates:
def __init__(self, max_num_reqs: int, vocab_size: int):
self.max_num_reqs = max_num_reqs
self.vocab_size = vocab_size
self.temperature = UvaBackedTensor(max_num_reqs, dtype=torch.float32)
self.top_k = UvaBackedTensor(max_num_reqs, dtype=torch.int32)
self.top_p = UvaBackedTensor(max_num_reqs, dtype=torch.float32)
self.min_p = UvaBackedTensor(max_num_reqs, dtype=torch.float32)
self.seeds = UvaBackedTensor(max_num_reqs, dtype=torch.int64)
# Initialize top_k and top_p manually because 0 is an invalid value for them.
self.top_k.np.fill(self.vocab_size)
self.top_k.copy_to_uva()
self.top_p.np.fill(1.0)
self.top_p.copy_to_uva()
self.num_logprobs = np.empty(self.max_num_reqs, dtype=np.int32)
# -1 means no logprobs are requested.
self.num_logprobs.fill(NO_LOGPROBS)
def add_request(self, req_idx: int, sampling_params: SamplingParams) -> None:
self.temperature.np[req_idx] = sampling_params.temperature
self.top_p.np[req_idx] = sampling_params.top_p
top_k = sampling_params.top_k
if top_k <= 0 or top_k > self.vocab_size:
top_k = self.vocab_size
self.top_k.np[req_idx] = top_k
self.min_p.np[req_idx] = sampling_params.min_p
seed = sampling_params.seed
if seed is None:
seed = np.random.randint(_NP_INT64_MIN, _NP_INT64_MAX)
self.seeds.np[req_idx] = seed
num_logprobs = sampling_params.logprobs
if num_logprobs is None:
num_logprobs = NO_LOGPROBS
self.num_logprobs[req_idx] = num_logprobs
def apply_staged_writes(self) -> None:
self.temperature.copy_to_uva()
self.top_p.copy_to_uva()
self.top_k.copy_to_uva()
self.min_p.copy_to_uva()
self.seeds.copy_to_uva()
def apply_temperature(
self,
logits: torch.Tensor,
idx_mapping: torch.Tensor,
idx_mapping_np: np.ndarray,
) -> None:
temp_np = self.temperature.np[idx_mapping_np]
if np.all((temp_np == 0.0) | (temp_np == 1.0)):
# No request requires temperature. Skip the kernel launch.
return
apply_temperature(logits, idx_mapping, self.temperature.gpu)
def apply_min_p(
self,
logits: torch.Tensor,
idx_mapping: torch.Tensor,
idx_mapping_np: np.ndarray,
) -> None:
if np.all(self.min_p.np[idx_mapping_np] == 0.0):
# No request uses min_p. Skip the kernel launch.
return
apply_min_p(logits, idx_mapping, self.min_p.gpu)
def apply_top_k_top_p(
self,
logits: torch.Tensor,
idx_mapping: torch.Tensor,
idx_mapping_np: np.ndarray,
) -> torch.Tensor:
do_top_k = np.any(self.top_k.np[idx_mapping_np] != self.vocab_size)
do_top_p = np.any(self.top_p.np[idx_mapping_np] != 1.0)
if not (do_top_k or do_top_p):
return logits
top_k = self.top_k.gpu[idx_mapping] if do_top_k else None
top_p = self.top_p.gpu[idx_mapping] if do_top_p else None
return apply_top_k_top_p(logits, top_k, top_p)
def max_num_logprobs(self, idx_mapping_np: np.ndarray) -> int:
return int(np.max(self.num_logprobs[idx_mapping_np]))
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/v1/worker/gpu/sample/states.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
vllm-project/vllm:vllm/entrypoints/serve/lora/protocol.py | # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from pydantic import BaseModel, Field
class LoadLoRAAdapterRequest(BaseModel):
lora_name: str
lora_path: str
load_inplace: bool = False
class UnloadLoRAAdapterRequest(BaseModel):
lora_name: str
lora_int_id: int | None = Field(default=None)
| {
"repo_id": "vllm-project/vllm",
"file_path": "vllm/entrypoints/serve/lora/protocol.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.